repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
pytorch-kaldi-gan | pytorch-kaldi-gan-master/parallel_dataset.py | import configparser
import sox
import logging
import torch
import torchaudio
import numpy as np
import matplotlib.pyplot as plt
import os
import random
import shutil
import subprocess
import shlex
import sys
import math
def validate_dir(dir_list):
''' Remove hidden files from directory list '''
for dir in dir_list:
if dir.__contains__('.'):
dir_list.remove(dir)
return dir_list
def get_utterances(dir_list):
''' Seperate utterances and transcription files '''
transcriptions = ''
for dir in dir_list:
if dir.__contains__('.txt'):
transcriptions = dir
dir_list.remove(dir)
return transcriptions, dir_list
def plot_waveform(waveform_tensor):
''' Plot tensor in a figure '''
with torch.no_grad():
plt.figure()
plt.plot(waveform_tensor.t().detach().to("cpu").numpy())
plt.show()
def normalize_tensor(tensor):
''' Normalize tensor between -1 and 1 '''
max_val = torch.abs(torch.max(tensor))
min_val = torch.abs(torch.min(tensor))
if max_val < min_val:
max_val = min_val
return torch.div(tensor, max_val)
def get_average_power(clip):
return torch.sqrt(torch.mean(clip ** 2))
def add_noise(clean_wav, noise_wav, signal_to_noise, rate_of_repeat = 0, sample_rate = 16000):
output_len = len(clean_wav)
noise_len = len(noise_wav)
if rate_of_repeat > 0:
# Add silence gaps between noise files
temp_noise = torch.zeros(noise_len + sample_rate * rate_of_repeat)
temp_noise[0:noise_len] = noise_wav
noise_wav = temp_noise
noise_len = len(noise_wav)
if output_len < noise_len:
# Choose a random part from the noise file
if (noise_len - output_len - 1) > 10:
rnd = random.randrange(0, (noise_len - output_len - 1))
else:
rnd = 1
new_noise = noise_wav[rnd:(rnd + output_len)]
clean_power = get_average_power(clean_wav)
noisy_power = get_average_power(new_noise)
factor = (clean_power / noisy_power) / (10 ** (signal_to_noise / 20.0)) # noise Coefficient for target SNR
combined_signal = torch.add(clean_wav, torch.mul(new_noise, torch.sqrt(factor)))
elif output_len > noise_len:
# Repeat noise file to get same length as output file
n_repeat = int(output_len / noise_len)
n_remain = output_len - (n_repeat * noise_len)
new_noise = torch.zeros(output_len)
for i in range(n_repeat):
new_noise[i*noise_len:(i+1)*noise_len] = noise_wav
new_noise[n_repeat*noise_len:] = noise_wav[:n_remain]
clean_power = get_average_power(clean_wav)
noisy_power = get_average_power(new_noise)
factor = (clean_power / noisy_power) / (10 ** (signal_to_noise / 20.0)) # noise Coefficient for target SNR
combined_signal = torch.add(clean_wav, torch.mul(new_noise, torch.sqrt(factor)))
else:
# Both lengths are the same
clean_power = get_average_power(clean_wav)
noisy_power = get_average_power(noise_wav)
factor = (clean_power / noisy_power) / (10 ** (signal_to_noise / 20.0)) # noise Coefficient for target SNR
combined_signal = torch.add(clean_wav, torch.mul(noise_wav, torch.sqrt(factor)))
combined_signal = normalize_tensor(combined_signal)
return combined_signal.view(1, -1)
def convolve_impulse(clean_wav, noise_wav, signal_to_noise):
output_len = len(clean_wav)
noise_len = len(noise_wav)
clean_power = get_average_power(clean_wav)
noisy_power = get_average_power(noise_wav)
factor = math.sqrt((clean_power / noisy_power) / (10 ** (signal_to_noise / 20.0))) # noise Coefficient for target SNR
if output_len < noise_len:
# Choose a random part from the noise file
rnd = random.randrange(0, (noise_len - output_len - 1))
new_noise = noise_wav[rnd:(rnd + int(output_len / 10))]
else:
# Noise is equal or shorther than clean sample
new_noise = noise_wav
new_noise = torch.mul(new_noise, factor)
clean_np = clean_wav.numpy()
noisy_np = new_noise.numpy()
convolution = np.convolve(clean_np, noisy_np, 'same')
combined_signal = torch.from_numpy(convolution)
return combined_signal.view(1, -1)
def get_random_noise_file(data_dir):
''' Return random noise file from the given directory '''
audio_list = os.listdir(data_dir)
rnd = random.randrange(0, len(audio_list))
data_dir = os.path.join(data_dir, audio_list[rnd])
noise_tensor, n_sample_rate = torchaudio.load(data_dir)
return noise_tensor
def backup_and_replace(chapter_file, clean_word, noisy_word):
if os.path.exists(chapter_file):
chapter_txt = open(chapter_file, "r")
temp_lines = []
entries = chapter_txt.readlines()
for line in entries:
temp_lines.append(line.replace(clean_word, noisy_word))
from_dir = chapter_file
to_dir = chapter_file.replace(".TXT", "_BACKUP.TXT")
shutil.copyfile(from_dir, to_dir)
new_file = open(from_dir, "w")
new_file.writelines(temp_lines)
new_file.close()
chapter_txt.close()
def update_metadata_files(root_folder):
chapter_file = os.path.join(root_folder, "CHAPTERS.TXT")
speaker_file = os.path.join(root_folder, "SPEAKERS.TXT")
clean_word = config["data"]["clean_word"]
noisy_word = config["data"]["noisy_word"]
backup_and_replace(chapter_file, clean_word, noisy_word)
backup_and_replace(speaker_file, clean_word, noisy_word)
def run_shell(cmd):
import subprocess
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p.wait()
return output.decode('utf-8')
def invoke_process_popen_poll_live(command, shellType=False, stdoutType=subprocess.PIPE):
"""runs subprocess with Popen/poll so that live stdout is shown"""
try:
process = subprocess.Popen(
shlex.split(command), shell=shellType, stdout=stdoutType)
except:
print("ERROR {} while running {}".format(sys.exc_info()[1], command))
return None
while True:
output = process.stdout.readline()
# used to check for empty output in Python2, but seems
# to work with just poll in 2.7.12 and 3.5.2
# if output == '' and process.poll() is not None:
if process.poll() is not None:
break
if output:
print(output.strip().decode())
rc = process.poll()
return rc
def invoke_process_silent(command, shellType=False, stdoutType=subprocess.PIPE):
try:
process = subprocess.Popen(
shlex.split(command), shell=shellType, stdout=stdoutType)
except:
print("ERROR {} while running {}".format(sys.exc_info()[1], command))
return None
while True:
output = process.stdout.readline()
if process.poll() is not None:
break
if output:
print()
rc = process.poll()
return rc
logging.getLogger('sox').setLevel(logging.ERROR)
# Reading global cfg file (first argument-mandatory file)
cfg_file = sys.argv[1]
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Set seed for noise adding
random.seed(int(config["noise"]["seed"]))
torch.manual_seed(int(config["noise"]["seed"]))
# Output folder creation
out_folder = config["data"]["out_folder"]
if not os.path.exists(out_folder):
os.makedirs(out_folder)
data_folder = config["data"]["data_folder"]
# Read cfg file options
snr_db = int(config["noise"]["snr_db"])
snr = math.pow(10, snr_db / 10)
snr_array = np.array(list(map(int, config["impulse"]["snrs"].split(","))))
snr_list = 10 ** (snr_array / 10)
print("- Reading config file......OK!")
if config["data"]["dataset"] == "librispeech":
update_metadata_files(config["data"]["root_folder"])
speaker_lst = os.listdir(data_folder)
speaker_lst = validate_dir(speaker_lst)
# Create parallel dataset
print("\n- Starting dataset parallelization.\n")
speaker_count = 1
for speaker in speaker_lst:
print(" Speaker {} / {} ".format(speaker_count, len(speaker_lst)).center(40, "="))
speaker_count += 1
speaker_dir = os.path.join(data_folder, speaker)
# Get chapters by speaker
chapter_lst = os.listdir(speaker_dir)
chapter_lst = validate_dir(chapter_lst)
chapter_count = 1
for chap in chapter_lst:
print("Chapter {} / {} \r".format(chapter_count, len(chapter_lst)), end = '')
chapter_count += 1
chapter_dir = os.path.join(speaker_dir, chap)
# Get utterances by speaker per chapter
utterance_lst = os.listdir(chapter_dir)
utt_transcripitons, utterance_lst = get_utterances(utterance_lst)
output_dir = os.path.join(out_folder, speaker, chap)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
transcription_from_dir = os.path.join(chapter_dir, utt_transcripitons)
transcription_to_dir = os.path.join(output_dir, utt_transcripitons)
shutil.copyfile(transcription_from_dir, transcription_to_dir)
for utt in utterance_lst:
utterance_dir = os.path.join(chapter_dir, utt)
utt_save_dir = os.path.join(output_dir, utt)
if config["styles"]["change_speed"] == "True":
random_number = random.randint(0, 1)
if random_number == 1:
vol_fraction = 1 + float(config["impulse"]["volume_change"])
else:
vol_fraction = 1 - float(config["impulse"]["volume_change"])
else:
vol_fraction = 1
if config["styles"]["change_volume"] == "True":
random_number = random.randint(0, 1)
if random_number == 1:
speed_fraction = 1 + float(config["impulse"]["speed_change"])
else:
speed_fraction = 1 - float(config["impulse"]["speed_change"])
else:
speed_fraction = 1
if config["styles"]["change_speed"] == "True" or config["styles"]["change_volume"] == "True":
# create a transformer
tfm = sox.Transformer()
tfm.tempo(speed_fraction, 's')
tfm.vol(vol_fraction)
tfm.build_file(
input_filepath = utterance_dir, sample_rate_in = int(config["impulse"]["sample_rate"]),
output_filepath = utt_save_dir
)
utterance_dir = utt_save_dir
if config["styles"]["additive_noise"] == "True":
recording, sample_rate = torchaudio.load(utterance_dir)
noise = get_random_noise_file(config["noise"]["noise_dir"])
recording = normalize_tensor(recording)
recording = add_noise(recording[0], noise[0], snr)
torchaudio.save(utt_save_dir, recording, sample_rate = sample_rate)
utterance_dir = utt_save_dir
if config["styles"]["add_impulse"] == "True":
recording, sample_rate = torchaudio.load(utterance_dir)
noise = get_random_noise_file(config["impulse"]["impulse_dir"])
recording = normalize_tensor(recording)
random_snr_value = random.randrange(len(snr_list))
recording = convolve_impulse(recording[0], noise[0], snr_list[random_snr_value])
recording = normalize_tensor(recording)
torchaudio.save(utt_save_dir, recording, sample_rate = sample_rate)
utterance_dir = utt_save_dir
downsample_clean = False
if config["styles"]["wav49_encode"] == "True":
output_sampling_rate = "16000"
if utterance_dir == utt_save_dir:
encode = "sox -G " + utterance_dir + " -r 8000 -c 1 -e gsm " + utterance_dir.replace(".flac", ".wav")
invoke_process_silent(encode)
removed = "rm " + utterance_dir
invoke_process_silent(removed)
flac_convert = "sox -G " + utterance_dir.replace(".flac", ".wav") + " -r " + output_sampling_rate + " " + utterance_dir
invoke_process_silent(flac_convert)
removed_wav = "rm " + utterance_dir.replace(".flac", ".wav")
invoke_process_silent(removed_wav)
else:
encode = "sox -G " + utterance_dir + " -r 8000 -c 1 -e gsm " + utt_save_dir.replace(".flac", ".wav")
invoke_process_silent(encode)
flac_convert = "sox -G " + utt_save_dir.replace(".flac", ".wav") + " -r " + output_sampling_rate + " " + utt_save_dir
invoke_process_silent(flac_convert)
removed_wav = "rm " + utt_save_dir.replace(".flac", ".wav")
invoke_process_silent(removed_wav)
elif downsample_clean:
encode = "sox -G " + utterance_dir + " -r 8000 -c 1 " + utt_save_dir
invoke_process_silent(encode)
cmd = "kaldi_decoding_scripts/create_parallel_dataset.sh " \
+ os.path.basename(config["data"]["out_folder"]) + " " \
+ os.path.dirname(config["data"]["root_folder"])
invoke_process_popen_poll_live(cmd)
print("\n\nDataset created successfully\n")
elif config["data"]["dataset"] == "swahili":
speaker_lst = os.listdir(data_folder)
speaker_lst = validate_dir(speaker_lst)
# Create parallel dataset
print("\n- Starting dataset parallelization.\n")
speaker_count = 1
for speaker in speaker_lst:
print(" Speaker {} / {} ".format(speaker_count, len(speaker_lst)).center(40, "="))
speaker_count += 1
speaker_dir = os.path.join(data_folder, speaker)
# Get utterances by speaker per chapter
utterance_lst = os.listdir(speaker_dir)
output_dir = os.path.join(out_folder, speaker)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for utt in utterance_lst:
utterance_dir = os.path.join(speaker_dir, utt)
utt_save_dir = os.path.join(output_dir, utt)
if config["styles"]["change_speed"] == "True":
random_number = random.randint(0, 1)
if random_number == 1:
vol_fraction = 1 + float(config["impulse"]["volume_change"])
else:
vol_fraction = 1 - float(config["impulse"]["volume_change"])
else:
vol_fraction = 1
if config["styles"]["change_volume"] == "True":
random_number = random.randint(0, 1)
if random_number == 1:
speed_fraction = 1 + float(config["impulse"]["speed_change"])
else:
speed_fraction = 1 - float(config["impulse"]["speed_change"])
else:
speed_fraction = 1
if config["styles"]["change_speed"] == "True" or config["styles"]["change_volume"] == "True":
# create a transformer
tfm = sox.Transformer()
tfm.tempo(speed_fraction, 's')
tfm.vol(vol_fraction)
tfm.build_file(
input_filepath = utterance_dir, sample_rate_in = int(config["impulse"]["sample_rate"]),
output_filepath = utt_save_dir
)
utterance_dir = utt_save_dir
if config["styles"]["additive_noise"] == "True":
recording, sample_rate = torchaudio.load(utterance_dir)
noise = get_random_noise_file(config["noise"]["noise_dir"])
recording = normalize_tensor(recording)
recording = add_noise(recording[0], noise[0], snr)
torchaudio.save(utt_save_dir, recording, sample_rate = sample_rate)
utterance_dir = utt_save_dir
if config["styles"]["add_impulse"] == "True":
recording, sample_rate = torchaudio.load(utterance_dir)
noise = get_random_noise_file(config["impulse"]["impulse_dir"])
recording = normalize_tensor(recording)
random_snr_value = random.randrange(len(snr_list))
recording = convolve_impulse(recording[0], noise[0], snr_list[random_snr_value])
recording = normalize_tensor(recording)
torchaudio.save(utt_save_dir, recording, sample_rate = sample_rate)
utterance_dir = utt_save_dir
'''cmd = "kaldi_decoding_scripts/create_parallel_dataset.sh " \
+ os.path.basename(config["data"]["out_folder"]) + " " \
+ os.path.dirname(config["data"]["root_folder"])
invoke_process_popen_poll_live(cmd)'''
print("\n\nDataset created successfully\n") | 17,550 | 34.031936 | 143 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/plot_acc_and_loss.py | ##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import sys
import configparser
import os
from utils import create_curves
# Checking arguments
if len(sys.argv) != 2:
print("ERROR: Please provide only the path of the cfg_file as : python plot_acc_and_loss.py cfg/TIMIT_MLP_mfcc.cfg")
# Checking if the cfg_file exists and loading it
cfg_file = sys.argv[1]
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist !\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Getting the parameters
valid_data_lst = config["data_use"]["valid_with"].split(",")
out_folder = config["exp"]["out_folder"]
N_ep = int(config["exp"]["N_epochs_tr"])
# Handling call without running run_exp.py before
if not (os.path.exists(out_folder + "res.res")):
sys.stderr.write("ERROR: Please run the experiment in order to get results to plot first !\n")
sys.exit(0)
# Creating files and curves
create_curves(out_folder, N_ep, valid_data_lst)
| 1,203 | 30.684211 | 120 | py |
pytorch-kaldi-gan | pytorch-kaldi-gan-master/save_raw_fea.py | ##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
#
# Description: This script generates kaldi ark files containing raw features.
# The file list must be a file containing "snt_id file.wav".
# Note that only wav files are supported here (sphere or other format are not supported)
##########################################################
import scipy.io.wavfile
import math
import numpy as np
import os
from data_io import read_vec_int_ark, write_mat
# Run it for all the data chunks (e.g., train, dev, test) => uncomment
lab_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/exp/dnn4_pretrain-dbn_dnn_ali_test"
lab_opts = "ali-to-pdf"
out_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test"
wav_lst = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/test/wav.lst"
scp_file_out = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test/feats_raw.scp"
# lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_dev'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/dev'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/dev/wav_lst.scp'
# scp_file_out='quick_test/data/dev/feats_raw.scp'
# lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_test'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/test'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/test/wav_lst.scp'
# scp_file_out='quick_test/data/test/feats_raw.scp'
sig_fs = 16000 # Hz
sig_wlen = 200 # ms
lab_fs = 16000 # Hz
lab_wlen = 25 # ms
lab_wshift = 10 # ms
sig_wlen_samp = int((sig_fs * sig_wlen) / 1000)
lab_wlen_samp = int((lab_fs * lab_wlen) / 1000)
lab_wshift_samp = int((lab_fs * lab_wshift) / 1000)
# Create the output folder
try:
os.stat(out_folder)
except:
os.makedirs(out_folder)
# Creare the scp file
scp_file = open(scp_file_out, "w")
# reading the labels
lab = {
k: v
for k, v in read_vec_int_ark(
"gunzip -c " + lab_folder + "/ali*.gz | " + lab_opts + " " + lab_folder + "/final.mdl ark:- ark:-|", out_folder
)
}
# reading the list file
with open(wav_lst) as f:
sig_lst = f.readlines()
sig_lst = [x.strip() for x in sig_lst]
for sig_file in sig_lst:
sig_id = sig_file.split(" ")[0]
sig_path = sig_file.split(" ")[1]
[fs, signal] = scipy.io.wavfile.read(sig_path)
signal = signal.astype(float) / 32768
signal = signal / np.max(np.abs(signal))
cnt_fr = 0
beg_samp = 0
frame_all = []
while beg_samp + lab_wlen_samp < signal.shape[0]:
sample_fr = np.zeros(sig_wlen_samp)
central_sample_lab = int(((beg_samp + lab_wlen_samp / 2) - 1))
central_fr_index = int(((sig_wlen_samp / 2) - 1))
beg_signal_fr = int(central_sample_lab - (sig_wlen_samp / 2))
end_signal_fr = int(central_sample_lab + (sig_wlen_samp / 2))
if beg_signal_fr >= 0 and end_signal_fr <= signal.shape[0]:
sample_fr = signal[beg_signal_fr:end_signal_fr]
else:
if beg_signal_fr < 0:
n_left_samples = central_sample_lab
sample_fr[central_fr_index - n_left_samples + 1 :] = signal[0:end_signal_fr]
if end_signal_fr > signal.shape[0]:
n_right_samples = signal.shape[0] - central_sample_lab
sample_fr[0 : central_fr_index + n_right_samples + 1] = signal[beg_signal_fr:]
frame_all.append(sample_fr)
cnt_fr = cnt_fr + 1
beg_samp = beg_samp + lab_wshift_samp
frame_all = np.asarray(frame_all)
# Save the matrix into a kaldi ark
out_file = out_folder + "/" + sig_id + ".ark"
write_mat(out_folder, out_file, frame_all, key=sig_id)
print(sig_id)
scp_file.write(sig_id + " " + out_folder + "/" + sig_id + ".ark:" + str(len(sig_id) + 1) + "\n")
N_fr_comp = 1 + math.floor((signal.shape[0] - 400) / 160)
# print("%s %i %i "%(lab[sig_id].shape[0],N_fr_comp,cnt_fr))
scp_file.close()
| 4,010 | 31.877049 | 119 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/setup.py | from setuptools import setup, find_packages
setup(
name='pytorchts',
version='0.1.0',
description="PyTorch Probabilistic Time Series Modeling framework",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url='https://github.com/kashif/pytorch-ts',
license='MIT',
packages=find_packages(exclude=["tests"]),
include_package_data=True,
zip_safe=True,
python_requires=">=3.6",
install_requires = [
'torch==1.4.0',
'holidays',
'numpy',
'pandas',
'scipy',
'tqdm',
'pydantic==1.4.0',
'matplotlib',
'python-rapidjson',
'tensorboard',
],
test_suite='tests',
tests_require = [
'flake8',
'pytest'
],
)
#pip install git+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git
| 871 | 21.947368 | 74 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/training.py | import time
import torch
import numpy as np
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from pathlib import Path
import logging
import os
from pts.model import Predictor
from pts.model.deepar import DeepAREstimator
from pts.modules import TweedieOutput
from pts.trainer import Trainer
from pts.core.logging import get_log_path, set_logger
from load_dataset import make_m5_dataset
from pts.feature.time_feature import *
logger = logging.getLogger("mofl").getChild("training")
prediction_length = 28
def get_rolled_deepAR_estimator(stat_cat_cardinalities, device, log_path):
batch_size = 64
num_batches_per_epoch = 30490 // batch_size + 1
return DeepAREstimator(
input_size=102,
num_cells=120,
prediction_length=prediction_length,
dropout_rate=0.1,
freq="D",
time_features=[DayOfWeek(), DayOfMonth(), MonthOfYear(), WeekOfYear(), Year()],
distr_output = TweedieOutput(1.2),
lags_seq=[1],
moving_avg_windows=[7, 28],
scaling=False,
use_feat_dynamic_real=True,
use_feat_static_cat=True,
use_feat_dynamic_cat=True,
cardinality=stat_cat_cardinalities,
dc_cardinality=[5, 5, 31, 31], #event_type1,2 / event_name1,2
dc_embedding_dimension=[2, 2, 15, 2],
pick_incomplete=True,
trainer=Trainer(
learning_rate=1e-3,
epochs=300,
num_batches_per_epoch=num_batches_per_epoch,
betas=(0.9, 0.98),
use_lr_scheduler=True,
lr_warmup_period=num_batches_per_epoch*5,
batch_size=batch_size,
device=device,
log_path=log_path,
num_workers=4,
)
)
def get_estimator(model_name, stat_cat_cardinalities, device, base_log_path, full_log_path):
estimator = globals()["get_" + model_name + "_estimator"](stat_cat_cardinalities, device, full_log_path)
return estimator
def main(args):
# parameters
comment = args.comment
model_name = args.model
data_path = args.data_path
trial = args.trial
# set default config
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
full_log_path, base_log_path, trial_path = get_log_path(f"m5_submission/{model_name}", comment, trial)
# set logger
set_logger(full_log_path)
# make dataset
train_ds, val_ds, stat_cat_cardinalities = make_m5_dataset(m5_input_path=data_path, exclude_no_sales=True)
# get estimator
logger.info(f"Using {model_name} model...")
estimator = get_estimator(model_name, stat_cat_cardinalities, device, base_log_path, full_log_path)
# path for trained model
model_path = Path(full_log_path+"/trained_model")
model_path.mkdir()
# prediction
predictor = estimator.train(train_ds, validation_period=10)
# save model
if args.save_model:
logger.info(f"Save {model_name} model...")
model_path = Path(full_log_path+"/predictor")
model_path.mkdir()
predictor.serialize(model_path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path",
default='/data/m5'
)
parser.add_argument(
"--comment",
type=str,
default='drop1'
)
parser.add_argument(
"--trial",
type=str,
default='t0'
)
args = parser.parse_args()
args.save_model = True # always save model
args.model = 'rolled_deepAR'
main(args) | 3,592 | 27.744 | 110 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/make_predictions.py |
import numpy as np
import torch
import os
from tqdm import tqdm
from pathlib import Path
import logging
from pts.core.logging import get_log_path
from pts.model import Predictor
from load_dataset import make_m5_dataset
from pts.evaluation.backtest import make_evaluation_predictions, make_validation_data
#test_start : Start index of the final possible data chunk. For example, for M5 dataset, correct value is 1942
TEST_START = 1942
PREDICTION_LEN = 28
def make_predictions(predictor, ds, num_samples=30, n_iter = 15, start_offset=0, log_path=None, show_progress=True):
for i in tqdm(range(start_offset, n_iter+start_offset), disable=not show_progress):
start_this = TEST_START - PREDICTION_LEN * i
#make prediction
forecast_it, ts_it = make_evaluation_predictions(
dataset=make_validation_data(ds, val_start=start_this, val_start_final=TEST_START - PREDICTION_LEN),
predictor=predictor,
num_samples=100 if start_this==1942 else num_samples)
forecasts = list(forecast_it)
#[TODO]
#is this loop necessary?
prediction = np.zeros((len(forecasts), PREDICTION_LEN))
for n in range(len(forecasts)):
prediction[n] = np.mean(forecasts[n].samples, axis=0)
# save result
if log_path is not None:
np.save(log_path / f'prediction_{start_this}.npy', prediction)
return prediction #return last prediction
def run_prediction(args, trial_path, model_idx, ds, predictor):
cv_log_path = Path(os.path.join(trial_path, 'CV', model_idx))
cv_log_path.mkdir(parents=True, exist_ok=True)
# load trained model
trained_model_path = Path(os.path.join(trial_path, 'trained_model'))
predictor.prediction_net.load_state_dict(torch.load(trained_model_path / model_idx))
if args.bs is not None:
predictor.batch_size = args.bs
predictor.prediction_net.num_parallel_samples = args.n_par
make_predictions(predictor, ds, num_samples=args.n_samples, log_path=cv_log_path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
###[Important argument]
parser.add_argument(
"--data_path",
default='/data/m5'
)
parser.add_argument(
"--comment",
type=str,
default='drop1'
)
parser.add_argument(
"--trial",
type=str,
default='t0'
)
###[Important argument]
parser.add_argument(
"--bs",
type=int,
default=6400
)
parser.add_argument(
"--n_par",
default=30
)
parser.add_argument(
"--n_samples",
default=30
)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load model
model_name = 'rolled_deepAR'
trial_path, _, _ = get_log_path(f"m5_submission/{model_name}", log_comment=args.comment, trial=args.trial, mkdir=False)
print(f"Make predictions for {trial_path}")
pretrained_model_path = Path(os.path.join(trial_path, 'predictor'))
if not pretrained_model_path.exists():
assert False, "Error: Pretrained model not exist!"
predictor = Predictor.deserialize(pretrained_model_path, device)
# load data
test_ds = make_m5_dataset(m5_input_path=args.data_path, exclude_no_sales=False, ds_split=False, prediction_start=1942)
# generate predictions
for epoch in range(200,300,10):
model_idx = f"train_net_{epoch}"
run_prediction(args, trial_path, model_idx, test_ds, predictor) | 3,587 | 30.2 | 123 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/trainer.py | import time
from typing import List, Optional, Tuple
import logging
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import CosineAnnealingLR
from warmup_scheduler import GradualWarmupScheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from pts.core.component import validated
import os
logger = logging.getLogger("mofl").getChild("trainer")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class Trainer:
@validated()
def __init__(
self,
epochs: int = 100,
batch_size: int = 32,
num_batches_per_epoch: int = 50,
num_workers: int = 0, # TODO worker>0 causes performace drop if uses iterable dataset
pin_memory: bool = False,
learning_rate: float = 1e-3,
weight_decay: float = 1e-6,
betas: Tuple[float, float] = (0.9, 0.999),
device: Optional[torch.device] = None,
log_path: Optional[str] = None,
use_lr_scheduler: bool = False,
lr_warmup_period: int = 0, # num of iterations for warmup
) -> None:
self.epochs = epochs
self.batch_size = batch_size
self.num_batches_per_epoch = num_batches_per_epoch
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.betas = betas
self.device = device
self.num_workers = num_workers
self.pin_memory = pin_memory
self.log_path = log_path
self.use_lr_scheduler = use_lr_scheduler
self.lr_warmup_period = lr_warmup_period
self.roll_mat_csr = None
def __call__(
self, net: nn.Module, input_names: List[str], training_data_loader: DataLoader, validation_period: int = 1
) -> None:
# loggin model size
net_name = type(net).__name__
num_model_param = count_parameters(net)
logger.info(
f"Number of parameters in {net_name}: {num_model_param}"
)
if torch.cuda.device_count() > 1:
logger.info("Training with %d gpus..." % (torch.cuda.device_count()))
net = nn.DataParallel(net)
optimizer = torch.optim.Adam(
net.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay, betas=self.betas, #eps=1e-9,
)
if self.use_lr_scheduler:
total_iter = self.epochs * self.num_batches_per_epoch
scheduler_cos = CosineAnnealingLR(optimizer, total_iter, eta_min=1e-6)
scheduler = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=self.lr_warmup_period, after_scheduler=scheduler_cos)
writer = SummaryWriter(log_dir=self.log_path)
#writer.add_graph(net)
def loop(
epoch_no, data_loader, is_training: bool = True
) -> float:
tic = time.time()
avg_epoch_loss = 0.0
cumlated_sqerr = 0.0
total_seq = 0
errors = []
if is_training:
net.train()
else:
net.eval()
with tqdm(data_loader, total=float("inf"),disable=os.environ.get("DISABLE_TQDM", False)) as it:
for batch_no, data_entry in enumerate(it, start=1):
optimizer.zero_grad()
inputs = [data_entry[k].to(self.device) for k in input_names]
output = net(*inputs)
if isinstance(output, (list, tuple)):
loss = output[0]
error = output[1]
cumlated_sqerr += (error ** 2).sum()
total_seq += len(inputs[0])
if not is_training:
errors.append(error)
else:
loss = output
loss = loss.sum()
avg_epoch_loss += loss.item()
lr = optimizer.param_groups[0]['lr']
it.set_postfix(
ordered_dict={
"lr": lr,
("" if is_training else "validation_")
+ "avg_epoch_loss": avg_epoch_loss / batch_no,
"epoch": epoch_no,
},
refresh=False,
)
n_iter = epoch_no*self.num_batches_per_epoch + batch_no
if n_iter % 20 == 0:
if is_training:
writer.add_scalar('Loss/train', loss.item(), n_iter)
writer.add_scalar('Learning rate', lr, n_iter)
else:
writer.add_scalar('Loss/validation', loss.item(), n_iter)
if is_training:
loss.backward()
optimizer.step()
if self.use_lr_scheduler:
scheduler.step()
if self.num_batches_per_epoch == batch_no:
#for name, param in net.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), n_iter)
break
# mark epoch end time and log time cost of current epoch
toc = time.time()
# logging
'''logger.info(
"Epoch[%d] Elapsed time %.3f seconds",
epoch_no,
(toc - tic),
)'''
lv = avg_epoch_loss / batch_no
logger.info(
"Epoch[%d] Evaluation metric '%s'=%f",
epoch_no,
("" if is_training else "validation_") + "epoch_loss",
lv,
)
writer.add_scalar('Loss_epoch/' + ("train" if is_training else "validation") , lv, epoch_no)
if total_seq != 0:
writer.add_scalar('MSE_epoch/' + ("train" if is_training else "validation") , cumlated_sqerr / total_seq, epoch_no)
return lv
for epoch_no in range(self.epochs):
# training
epoch_loss = loop(epoch_no, training_data_loader)
if epoch_no % validation_period == 0 and epoch_no != 0:
# save model
torch.save(net.state_dict(), f"{self.log_path}/trained_model/train_net_{epoch_no}")
writer.close()
| 6,578 | 35.55 | 137 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/scaler.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
import torch.nn as nn
class Scaler(ABC, nn.Module):
def __init__(self, keepdim: bool = False):
super().__init__()
self.keepdim = keepdim
@abstractmethod
def compute_scale(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> torch.Tensor:
pass
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
data
tensor of shape (N, T, C) containing the data to be scaled
observed_indicator
observed_indicator: binary tensor with the same shape as
``data``, that has 1 in correspondence of observed data points,
and 0 in correspondence of missing data points.
Returns
-------
Tensor
Tensor containing the "scaled" data, shape: (N, T, C).
Tensor
Tensor containing the scale, of shape (N, C) if ``keepdim == False``, and shape
(N, 1, C) if ``keepdim == True``.
"""
scale = self.compute_scale(data, observed_indicator)
if self.keepdim:
scale = scale.unsqueeze(1)
return data / scale, scale
else:
return data / scale.unsqueeze(1), scale
class MeanScaler(Scaler):
"""
The ``MeanScaler`` computes a per-item scale according to the average
absolute value over time of each item. The average is computed only among
the observed values in the data tensor, as indicated by the second
argument. Items with no observed data are assigned a scale based on the
global average.
Parameters
----------
minimum_scale
default scale that is used if the time series has only zeros.
"""
def __init__(self, minimum_scale: float = 1e-10, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer("minimum_scale", torch.tensor(minimum_scale))
def compute_scale(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> torch.Tensor:
# these will have shape (N, C)
num_observed = observed_indicator.sum(dim=1)
sum_observed = (data.abs() * observed_indicator).sum(dim=1)
# first compute a global scale per-dimension
total_observed = num_observed.sum(dim=0)
denominator = torch.max(total_observed, torch.ones_like(total_observed))
default_scale = sum_observed.sum(dim=0) / denominator
# then compute a per-item, per-dimension scale
denominator = torch.max(num_observed, torch.ones_like(num_observed))
scale = sum_observed / denominator
# use per-batch scale when no element is observed
# or when the sequence contains only zeros
scale = torch.where(
sum_observed > torch.zeros_like(sum_observed),
scale,
default_scale * torch.ones_like(num_observed),
)
return torch.max(scale, self.minimum_scale).detach()
class NOPScaler(Scaler):
"""
The ``NOPScaler`` assigns a scale equals to 1 to each input item, i.e.,
no scaling is applied upon calling the ``NOPScaler``.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def compute_scale(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> torch.Tensor:
return torch.ones_like(data).mean(dim=1)
| 3,528 | 31.376147 | 91 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/lambda_layer.py | import torch.nn as nn
class LambdaLayer(nn.Module):
def __init__(self, function):
super().__init__()
self._func = function
def forward(self, x, *args):
return self._func(x, *args)
| 215 | 18.636364 | 35 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/distribution_output.py | from abc import ABC, abstractmethod
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import (
Distribution,
Beta,
NegativeBinomial,
StudentT,
Normal,
Independent,
LowRankMultivariateNormal,
MultivariateNormal,
TransformedDistribution,
AffineTransform,
Poisson
)
from pts.core.component import validated
from .lambda_layer import LambdaLayer
from .distribution import ConstantDistribution, Tweedie
class ArgProj(nn.Module):
def __init__(
self,
in_features: int,
args_dim: Dict[str, int],
domain_map: Callable[..., Tuple[torch.Tensor]],
dtype: np.dtype = np.float32,
prefix: Optional[str] = None,
**kwargs,
):
super().__init__(**kwargs)
self.args_dim = args_dim
self.dtype = dtype
self.projection = (in_features != 0)
if self.projection:
self.proj = nn.ModuleList(
[nn.Linear(in_features, dim) for dim in args_dim.values()]
)
self.domain_map = domain_map
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]:
if self.projection:
params_unbounded = [proj(x) for proj in self.proj]
else:
params_unbounded = [x]
return self.domain_map(*params_unbounded)
class Output(ABC):
in_features: int
args_dim: Dict[str, int]
_dtype: np.dtype = np.float32
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, dtype: np.dtype):
self._dtype = dtype
def get_args_proj(self, in_features: int, prefix: Optional[str] = None) -> ArgProj:
return ArgProj(
in_features=in_features,
args_dim=self.args_dim,
domain_map=LambdaLayer(self.domain_map),
prefix=prefix,
dtype=self.dtype,
)
@abstractmethod
def domain_map(self, *args: torch.Tensor):
pass
class DistributionOutput(Output, ABC):
distr_cls: type
@validated()
def __init__(self) -> None:
pass
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
if scale is None:
return self.distr_cls(*distr_args)
else:
distr = self.distr_cls(*distr_args)
return TransformedDistribution(distr, [AffineTransform(loc=0, scale=scale)])
class NormalOutput(DistributionOutput):
args_dim: Dict[str, int] = {"loc": 1, "scale": 1}
distr_cls: type = Normal
@classmethod
def domain_map(self, loc, scale):
scale = F.softplus(scale)
return loc.squeeze(-1), scale.squeeze(-1)
@property
def event_shape(self) -> Tuple:
return ()
class BetaOutput(DistributionOutput):
args_dim: Dict[str, int] = {"concentration1": 1, "concentration0": 1}
distr_cls: type = Beta
@classmethod
def domain_map(cls, concentration1, concentration0):
concentration1 = F.softplus(concentration1) + 1e-8
concentration0 = F.softplus(concentration0) + 1e-8
return concentration1.squeeze(-1), concentration0.squeeze(-1)
@property
def event_shape(self) -> Tuple:
return ()
class TweedieOutput(DistributionOutput):
args_dim: Dict[str, int] = {"log_mu": 1, "rho": 1} #, "dispersion": 1} TODO: add dispersion
@validated()
def __init__(self, tweedie_power=1.5) -> None:
# rho : tweedie_variance_power (1 ~ 2)
self.tweedie_power = tweedie_power
def domain_map(self, log_mu, rho):
rho = self.tweedie_power * torch.ones_like(log_mu)
return log_mu.squeeze(-1), rho.squeeze(-1)
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
log_mu, rho = distr_args
if scale is not None:
log_mu += torch.log(scale)
# TODO : rho scaling
return Tweedie(log_mu, rho)
@property
def event_shape(self) -> Tuple:
return ()
class NegativeBinomialOutput(DistributionOutput):
args_dim: Dict[str, int] = {"mu": 1, "alpha": 1}
@classmethod
def domain_map(cls, mu, alpha):
mu = F.softplus(mu) + 1e-8
alpha = F.softplus(alpha) + 1e-8 # alpha = 1/r
return mu.squeeze(-1), alpha.squeeze(-1)
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
mu, alpha = distr_args
if scale is not None:
mu *= scale
alpha /= scale # FIXME: wrong calculation
#alpha += (scale - 1) / mu # TODO: if scale < 1, alpha can be negative
#alpha = alpha.clamp(min=1e-8)
r = 1.0 / alpha
p = mu * alpha / (1.0 + mu * alpha) # p = mu / (r+mu)
return NegativeBinomial(total_count=r, probs=p)
@property
def event_shape(self) -> Tuple:
return ()
class StudentTOutput(DistributionOutput):
args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
distr_cls: type = StudentT
@classmethod
def domain_map(cls, df, loc, scale):
scale = F.softplus(scale)
df = 2.0 + F.softplus(df)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
@property
def event_shape(self) -> Tuple:
return ()
class LowRankMultivariateNormalOutput(DistributionOutput):
def __init__(
self, dim: int, rank: int, sigma_init: float = 1.0, sigma_minimum: float = 1e-3,
) -> None:
self.distr_cls = LowRankMultivariateNormal
self.dim = dim
self.rank = rank
self.sigma_init = sigma_init
self.sigma_minimum = sigma_minimum
self.args_dim = {"loc": dim, "cov_factor": dim * rank, "cov_diag": dim}
def domain_map(self, loc, cov_factor, cov_diag):
diag_bias = (
self.inv_softplus(self.sigma_init ** 2) if self.sigma_init > 0.0 else 0.0
)
shape = cov_factor.shape[:-1] + (self.dim, self.rank)
cov_factor = cov_factor.reshape(shape)
cov_diag = F.softplus(cov_diag + diag_bias) + self.sigma_minimum ** 2
return loc, cov_factor, cov_diag
def inv_softplus(self, y):
if y < 20.0:
return np.log(np.exp(y) - 1.0)
else:
return y
@property
def event_shape(self) -> Tuple:
return (self.dim,)
class IndependentNormalOutput(DistributionOutput):
def __init__(self, dim: int) -> None:
self.dim = dim
self.args_dim = {"loc": self.dim, "scale": self.dim}
def domain_map(self, loc, scale):
return loc, F.softplus(scale)
@property
def event_shape(self) -> Tuple:
return (self.dim,)
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
distr = Independent(Normal(*distr_args), 1)
if scale is None:
return distr
else:
return TransformedDistribution(distr, [AffineTransform(loc=0, scale=scale)])
class MultivariateNormalOutput(DistributionOutput):
def __init__(self, dim: int) -> None:
self.args_dim = {"loc": dim, "scale_tril": dim * dim}
self.dim = dim
def domain_map(self, loc, scale):
d = self.dim
device = scale.device
shape = scale.shape[:-1] + (d, d)
scale = scale.reshape(shape)
scale_diag = F.softplus(scale * torch.eye(d, device=device)) * torch.eye(
d, device=device
)
mask = torch.tril(torch.ones_like(scale), diagonal=-1)
scale_tril = (scale * mask) + scale_diag
return loc, scale_tril
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
loc, scale_tri = distr_args
distr = MultivariateNormal(loc=loc, scale_tril=scale_tri)
if scale is None:
return distr
else:
return TransformedDistribution(distr, [AffineTransform(loc=0, scale=scale)])
@property
def event_shape(self) -> Tuple:
return (self.dim,)
class FlowOutput(DistributionOutput):
def __init__(self, flow, input_size, cond_size):
self.args_dim = {"cond": cond_size}
self.flow = flow
self.dim = input_size
def domain_map(self, cond):
return (cond,)
def distribution(self, distr_args, scale=None):
cond, = distr_args
if scale is not None:
self.flow.scale = scale
self.flow.cond = cond
return self.flow
@property
def event_shape(self) -> Tuple:
return (self.dim,)
| 8,743 | 25.904615 | 95 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/flows.py | import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
def create_masks(
input_size, hidden_size, n_hidden, input_order="sequential", input_degrees=None
):
# MADE paper sec 4:
# degrees of connections between layers -- ensure at most in_degree - 1 connections
degrees = []
# set input degrees to what is provided in args (the flipped order of the previous layer in a stack of mades);
# else init input degrees based on strategy in input_order (sequential or random)
if input_order == "sequential":
degrees += (
[torch.arange(input_size)] if input_degrees is None else [input_degrees]
)
for _ in range(n_hidden + 1):
degrees += [torch.arange(hidden_size) % (input_size - 1)]
degrees += (
[torch.arange(input_size) % input_size - 1]
if input_degrees is None
else [input_degrees % input_size - 1]
)
elif input_order == "random":
degrees += (
[torch.randperm(input_size)] if input_degrees is None else [input_degrees]
)
for _ in range(n_hidden + 1):
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += [torch.randint(min_prev_degree, input_size, (hidden_size,))]
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += (
[torch.randint(min_prev_degree, input_size, (input_size,)) - 1]
if input_degrees is None
else [input_degrees - 1]
)
# construct masks
masks = []
for (d0, d1) in zip(degrees[:-1], degrees[1:]):
masks += [(d1.unsqueeze(-1) >= d0.unsqueeze(0)).float()]
return masks, degrees[0]
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def forward(self, x, y):
sum_log_abs_det_jacobians = 0
for module in self:
x, log_abs_det_jacobian = module(x, y)
sum_log_abs_det_jacobians += log_abs_det_jacobian
return x, sum_log_abs_det_jacobians
def inverse(self, u, y):
sum_log_abs_det_jacobians = 0
for module in reversed(self):
u, log_abs_det_jacobian = module.inverse(u, y)
sum_log_abs_det_jacobians += log_abs_det_jacobian
return u, sum_log_abs_det_jacobians
class BatchNorm(nn.Module):
""" RealNVP BatchNorm layer """
def __init__(self, input_size, momentum=0.9, eps=1e-5):
super().__init__()
self.momentum = momentum
self.eps = eps
self.log_gamma = nn.Parameter(torch.zeros(input_size))
self.beta = nn.Parameter(torch.zeros(input_size))
self.register_buffer("running_mean", torch.zeros(input_size))
self.register_buffer("running_var", torch.ones(input_size))
def forward(self, x, cond_y=None):
if self.training:
self.batch_mean = x.view(-1, x.shape[-1]).mean(0)
# note MAF paper uses biased variance estimate; ie x.var(0, unbiased=False)
self.batch_var = x.view(-1, x.shape[-1]).var(0)
# update running mean
self.running_mean.mul_(self.momentum).add_(
self.batch_mean.data * (1 - self.momentum)
)
self.running_var.mul_(self.momentum).add_(
self.batch_var.data * (1 - self.momentum)
)
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
# compute normalized input (cf original batch norm paper algo 1)
x_hat = (x - mean) / torch.sqrt(var + self.eps)
y = self.log_gamma.exp() * x_hat + self.beta
# compute log_abs_det_jacobian (cf RealNVP paper)
log_abs_det_jacobian = self.log_gamma - 0.5 * torch.log(var + self.eps)
# print('in sum log var {:6.3f} ; out sum log var {:6.3f}; sum log det {:8.3f}; mean log_gamma {:5.3f}; mean beta {:5.3f}'.format(
# (var + self.eps).log().sum().data.numpy(), y.var(0).log().sum().data.numpy(), log_abs_det_jacobian.mean(0).item(), self.log_gamma.mean(), self.beta.mean()))
return y, log_abs_det_jacobian.expand_as(x)
def inverse(self, y, cond_y=None):
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (y - self.beta) * torch.exp(-self.log_gamma)
x = x_hat * torch.sqrt(var + self.eps) + mean
log_abs_det_jacobian = 0.5 * torch.log(var + self.eps) - self.log_gamma
return x, log_abs_det_jacobian.expand_as(x)
class LinearMaskedCoupling(nn.Module):
""" Modified RealNVP Coupling Layers per the MAF paper """
def __init__(self, input_size, hidden_size, n_hidden, mask, cond_label_size=None):
super().__init__()
self.register_buffer("mask", mask)
# scale function
s_net = [
nn.Linear(
input_size + (cond_label_size if cond_label_size is not None else 0),
hidden_size,
)
]
for _ in range(n_hidden):
s_net += [nn.Tanh(), nn.Linear(hidden_size, hidden_size)]
s_net += [nn.Tanh(), nn.Linear(hidden_size, input_size)]
self.s_net = nn.Sequential(*s_net)
# translation function
self.t_net = copy.deepcopy(self.s_net)
# replace Tanh with ReLU's per MAF paper
for i in range(len(self.t_net)):
if not isinstance(self.t_net[i], nn.Linear):
self.t_net[i] = nn.ReLU()
def forward(self, x, y=None):
# apply mask
mx = x * self.mask
# run through model
s = self.s_net(mx if y is None else torch.cat([y, mx], dim=-1))
t = self.t_net(mx if y is None else torch.cat([y, mx], dim=-1)) * (
1 - self.mask
)
# cf RealNVP eq 8 where u corresponds to x (here we're modeling u)
log_s = torch.tanh(s) * (1 - self.mask)
u = x * torch.exp(log_s) + t
# u = (x - t) * torch.exp(log_s)
# u = mx + (1 - self.mask) * (x - t) * torch.exp(-s)
# log det du/dx; cf RealNVP 8 and 6; note, sum over input_size done at model log_prob
# log_abs_det_jacobian = -(1 - self.mask) * s
# log_abs_det_jacobian = -log_s #.sum(-1, keepdim=True)
log_abs_det_jacobian = log_s
return u, log_abs_det_jacobian
def inverse(self, u, y=None):
# apply mask
mu = u * self.mask
# run through model
s = self.s_net(mu if y is None else torch.cat([y, mu], dim=-1))
t = self.t_net(mu if y is None else torch.cat([y, mu], dim=-1)) * (
1 - self.mask
)
log_s = torch.tanh(s) * (1 - self.mask)
x = (u - t) * torch.exp(-log_s)
# x = u * torch.exp(log_s) + t
# x = mu + (1 - self.mask) * (u * s.exp() + t) # cf RealNVP eq 7
# log_abs_det_jacobian = (1 - self.mask) * s # log det dx/du
# log_abs_det_jacobian = log_s #.sum(-1, keepdim=True)
log_abs_det_jacobian = -log_s
return x, log_abs_det_jacobian
class MaskedLinear(nn.Linear):
""" MADE building block layer """
def __init__(self, input_size, n_outputs, mask, cond_label_size=None):
super().__init__(input_size, n_outputs)
self.register_buffer("mask", mask)
self.cond_label_size = cond_label_size
if cond_label_size is not None:
self.cond_weight = nn.Parameter(
torch.rand(n_outputs, cond_label_size) / math.sqrt(cond_label_size)
)
def forward(self, x, y=None):
out = F.linear(x, self.weight * self.mask, self.bias)
if y is not None:
out = out + F.linear(y, self.cond_weight)
return out
class MADE(nn.Module):
def __init__(
self,
input_size,
hidden_size,
n_hidden,
cond_label_size=None,
activation="ReLU",
input_order="sequential",
input_degrees=None,
):
"""
Args:
input_size -- scalar; dim of inputs
hidden_size -- scalar; dim of hidden layers
n_hidden -- scalar; number of hidden layers
activation -- str; activation function to use
input_order -- str or tensor; variable order for creating the autoregressive masks (sequential|random)
or the order flipped from the previous layer in a stack of MADEs
conditional -- bool; whether model is conditional
"""
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer("base_dist_mean", torch.zeros(input_size))
self.register_buffer("base_dist_var", torch.ones(input_size))
# create masks
masks, self.input_degrees = create_masks(
input_size, hidden_size, n_hidden, input_order, input_degrees
)
# setup activation
if activation == "ReLU":
activation_fn = nn.ReLU()
elif activation == "Tanh":
activation_fn = nn.Tanh()
else:
raise ValueError("Check activation function.")
# construct model
self.net_input = MaskedLinear(
input_size, hidden_size, masks[0], cond_label_size
)
self.net = []
for m in masks[1:-1]:
self.net += [activation_fn, MaskedLinear(hidden_size, hidden_size, m)]
self.net += [
activation_fn,
MaskedLinear(hidden_size, 2 * input_size, masks[-1].repeat(2, 1)),
]
self.net = nn.Sequential(*self.net)
@property
def base_dist(self):
return Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
# MAF eq 4 -- return mean and log std
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=-1)
u = (x - m) * torch.exp(-loga)
# MAF eq 5
log_abs_det_jacobian = -loga
return u, log_abs_det_jacobian
def inverse(self, u, y=None, sum_log_abs_det_jacobians=None):
# MAF eq 3
# D = u.shape[-1]
x = torch.zeros_like(u)
# run through reverse model
for i in self.input_degrees:
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=-1)
x[..., i] = u[..., i] * torch.exp(loga[..., i]) + m[..., i]
log_abs_det_jacobian = loga
return x, log_abs_det_jacobian
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=-1)
class Flow(nn.Module):
def __init__(self, input_size):
super().__init__()
self.__scale = None
self.net = None
# base distribution for calculation of log prob under the model
self.register_buffer("base_dist_mean", torch.zeros(input_size))
self.register_buffer("base_dist_var", torch.ones(input_size))
@property
def base_dist(self):
return Normal(self.base_dist_mean, self.base_dist_var)
@property
def scale(self):
return self.__scale
@scale.setter
def scale(self, scale):
self.__scale = scale
def forward(self, x, cond):
if self.scale is not None:
x /= self.scale
u, log_abs_det_jacobian = self.net(x, cond)
if self.scale is not None:
log_abs_det_jacobian -= torch.log(torch.abs(self.scale))
return u, log_abs_det_jacobian
def inverse(self, u, cond):
x, log_abs_det_jacobian = self.net.inverse(u, cond)
if self.scale is not None:
x *= self.scale
log_abs_det_jacobian += torch.log(torch.abs(self.scale))
return x, log_abs_det_jacobian
def log_prob(self, x, cond):
u, sum_log_abs_det_jacobians = self.forward(x, cond)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=-1)
def sample(self, sample_shape=torch.Size(), cond=None):
if cond is not None:
shape = cond.shape[:-1]
else:
shape = sample_shape
u = self.base_dist.sample(shape)
sample, _ = self.inverse(u, cond)
return sample
class RealNVP(Flow):
def __init__(
self,
n_blocks,
input_size,
hidden_size,
n_hidden,
cond_label_size=None,
batch_norm=True,
):
super().__init__(input_size)
# construct model
modules = []
mask = torch.arange(input_size).float() % 2
for i in range(n_blocks):
modules += [
LinearMaskedCoupling(
input_size, hidden_size, n_hidden, mask, cond_label_size
)
]
mask = 1 - mask
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
class MAF(Flow):
def __init__(
self,
n_blocks,
input_size,
hidden_size,
n_hidden,
cond_label_size=None,
activation="ReLU",
input_order="sequential",
batch_norm=True,
):
super().__init__(input_size)
# construct model
modules = []
self.input_degrees = None
for i in range(n_blocks):
modules += [
MADE(
input_size,
hidden_size,
n_hidden,
cond_label_size,
activation,
input_order,
self.input_degrees,
)
]
self.input_degrees = modules[-1].input_degrees.flip(0)
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
| 13,996 | 32.646635 | 177 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/feature.py | from typing import List, Optional
import torch
import torch.nn as nn
class FeatureEmbedder(nn.Module):
def __init__(self, cardinalities: List[int], embedding_dims: List[int],) -> None:
super().__init__()
assert len(cardinalities) == len(embedding_dims), 'the length of two variables should match'
self.__num_features = len(cardinalities)
def create_embedding(c: int, d: int) -> nn.Embedding:
embedding = nn.Embedding(c, d)
return embedding
self.__embedders = nn.ModuleList(
[create_embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.__num_features > 1:
# we slice the last dimension, giving an array of length
# self.__num_features with shape (N,T) or (N)
cat_feature_slices = torch.chunk(features, self.__num_features, dim=-1)
else:
cat_feature_slices = [features]
return torch.cat(
[
embed(cat_feature_slice.squeeze(-1))
for embed, cat_feature_slice in zip(
self.__embedders, cat_feature_slices
)
],
dim=-1,
)
class FeatureAssembler(nn.Module):
def __init__(
self,
T: int,
embed_static: Optional[FeatureEmbedder] = None,
embed_dynamic: Optional[FeatureEmbedder] = None,
) -> None:
super().__init__()
self.T = T
self.embeddings = nn.ModuleDict(
{"embed_static": embed_static, "embed_dynamic": embed_dynamic}
)
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
feat_dynamic_cat: torch.Tensor,
feat_dynamic_real: torch.Tensor,
) -> torch.Tensor:
processed_features = [
self.process_static_cat(feat_static_cat),
self.process_static_real(feat_static_real),
self.process_dynamic_cat(feat_dynamic_cat),
self.process_dynamic_real(feat_dynamic_real),
]
return torch.cat(processed_features, dim=-1)
def process_static_cat(self, feature: torch.Tensor) -> torch.Tensor:
if self.embeddings["embed_static"] is not None:
feature = self.embeddings["embed_static"](feature)
return feature.unsqueeze(1).expand(-1, self.T, -1).float()
def process_dynamic_cat(self, feature: torch.Tensor) -> torch.Tensor:
if self.embeddings["embed_dynamic"] is None:
return feature.float()
else:
return self.embeddings["embed_dynamic"](feature)
def process_static_real(self, feature: torch.Tensor) -> torch.Tensor:
return feature.unsqueeze(1).expand(-1, self.T, -1)
def process_dynamic_real(self, feature: torch.Tensor) -> torch.Tensor:
return feature
| 2,938 | 32.397727 | 100 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/activation.py | from typing import Optional, Union, List, Tuple
# Third-party imports
import torch.nn as nn
from torch import Tensor
class Activation(nn.Module):
"""
Activation fuction
Parameters
----------
activation
Activation function to use.
"""
def __init__(
self,
activation: Optional[str] = "identity",
):
super(Activation, self).__init__()
activations = {
'identity': nn.Identity(),
'relu': nn.ReLU(),
'sigmoid': nn.Sigmoid(),
'tanh': nn.Tanh()
}
self.activation = activations[activation]
def forward(self, data: Tensor) -> Tensor:
"""
applying activation function.
Parameters
----------
data
Shape : any shape of tensor
Returns
-------
Tensor
activation(x). Shape is the same with input
"""
return self.activation(data) | 979 | 19.416667 | 55 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/cnn.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Optional, Union, List, Tuple
# Third-party imports
import torch
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.modules.block.activation import Activation
class CausalConv1D(nn.Module):
"""
1D causal temporal convolution, where the term causal means that output[t]
does not depend on input[t+1:]. Notice that Conv1D is not implemented in
Gluon.
This is the basic structure used in Wavenet [ODZ+16]_ and Temporal
Convolution Network [BKK18]_.
The output has the same shape as the input, while we always left-pad zeros.
Parameters
----------
channels
The dimensionality of the output space, i.e. the number of output
channels (filters) in the convolution.
kernel_size
Specifies the dimensions of the convolution window.
dilation
Specifies the dilation rate to use for dilated convolution.
activation
Activation function to use.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
dilation: int = 1,
activation: Optional[str] = "identity",
**kwargs,
):
super(CausalConv1D, self).__init__(**kwargs)
self.dilation = dilation
self.kernel_size = kernel_size
self.padding = dilation * (kernel_size - 1)
self.conv1d = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
padding=self.padding,
**kwargs,
)
self.activation = Activation(activation)
# noinspection PyMethodOverriding
def forward(self, data: Tensor) -> Tensor:
"""
In Gluon's conv1D implementation, input has dimension NCW where N is
batch_size, C is channel, and W is time (sequence_length).
Parameters
----------
data
Shape (batch_size, num_features, sequence_length)
Returns
-------
Tensor
causal conv1d output. Shape (batch_size, num_features, sequence_length)
"""
ct = self.conv1d(data)
ct = self.activation(ct)
if self.kernel_size > 0:
ct = ct[:, :, :-self.padding, ...]
return ct
class DilatedCausalGated(nn.Module):
"""
1D convolution with Gated mechanism, see the Wavenet papers described above.
Parameters
----------
inner_channels
The dimensionality of the intermediate space
out_channels
The dimensionality of the output space
kernel_size
Specifies the dimensions of the convolution window.
dilation
Specifies the dilation rate to use for dilated convolution.
"""
def __init__(
self,
in_channels: int,
inner_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int], List[int]],
dilation: Union[int, Tuple[int], List[int]],
**kwargs,
) -> None:
super(DilatedCausalGated, self).__init__(**kwargs)
self.conv1 = CausalConv1D(
in_channels=in_channels,
out_channels=inner_channels,
kernel_size=kernel_size,
dilation=dilation,
activation="tanh",
)
self.conv2 = CausalConv1D(
in_channels=in_channels,
out_channels=inner_channels,
kernel_size=kernel_size,
dilation=dilation,
activation="sigmoid",
)
self.output_conv = nn.Conv1d(
in_channels=inner_channels, out_channels=out_channels, kernel_size=1
)
# noinspection PyMethodOverriding
def forward(self, x: Tensor) -> Tensor:
"""
Compute the 1D convolution with Gated mechanism.
Parameters
----------
x
input features, shape (batch_size, num_features, sequence_length)
Returns
-------
Tensor
output, shape (batch_size, num_features, sequence_length)
"""
x1 = self.conv1(x)
x2 = self.conv2(x)
return self.output_conv(x1 * x2)
class ResidualSequential(nn.Sequential):
"""
Adding residual connection to each layer of the sequential blocks
"""
def __init__(self, **kwargs):
super(ResidualSequential, self).__init__(**kwargs)
# noinspection PyMethodOverriding
def forward(self, x: Tensor) -> Tensor:
"""
Parameters
----------
x
input tensor
Returns
-------
Tensor
output of the ResidualSequential
"""
outs = []
for i, block in enumerate(self._children.values()):
out = block(x)
outs.append(out)
if i == 0:
x = out
else:
x = x + out
return sum(outs)
| 5,557 | 26.37931 | 83 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/mlp.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.core.component import validated
from pts.modules.block.activation import Activation
class MLP(nn.Module):
"""
Defines an MLP block.
Parameters
----------
layer_sizes
number of hidden units per layer.
flatten
toggle whether to flatten the output tensor.
activation
activation function of the MLP, default is relu.
"""
@validated()
def __init__(
self, input_size, layer_sizes: List[int], activation="relu"
) -> None:
super().__init__()
self.layer_sizes = layer_sizes
self.layers = nn.Sequential()
in_channel = input_size
for layer_no, layer_dim in enumerate(layer_sizes):
self.layers.add_module(
'linear_%02d' % (layer_no),
nn.Linear(in_channel, layer_dim)
)
self.layers.add_module('%s_%02d' % (activation, layer_no), Activation(activation))
in_channel = layer_dim
# noinspection PyMethodOverriding
def forward(self, x: Tensor) -> Tensor:
"""
Parameters
----------
x
Input tensor
Returns
-------
Tensor
Output of the MLP given the input tensor.
"""
return self.layers(x)
| 2,023 | 26.726027 | 94 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/encoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Tuple
# Third-party imports
import torch
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.core.component import validated
from pts.modules.block.cnn import CausalConv1D
#from pts.modules.block.mlp import MLP
#from pts.modules.block.rnn import RNN
class Seq2SeqEncoder(nn.Module):
"""
Abstract class for the encoder. An encoder takes a `target` sequence with
corresponding covariates and maps it into a static latent and
a dynamic latent code with the same length as the `target` sequence.
"""
@validated()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# noinspection PyMethodOverriding
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
raise NotImplementedError
@staticmethod
def _assemble_inputs(
target: Tensor, static_features: Tensor, dynamic_features: Tensor
) -> Tensor:
"""
Assemble features from target, static features, and the dynamic
features.
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
combined features,
shape (batch_size, sequence_length,
num_static_features + num_dynamic_features + 1)
"""
target = target.unsqueeze(dim=-1) # (N, T, 1)
helper_ones = torch.ones_like(target) # Ones of (N, T, 1)
tiled_static_features = torch.einsum('bnm, bmk -> bnk',
helper_ones, static_features.unsqueeze(1)
) # (N, T, C)
inputs = torch.cat(
[target, tiled_static_features, dynamic_features], dim=2
) # (N, T, C)
return inputs
@property
def out_channels(self) -> int:
"""
the size of output channel
"""
raise NotImplementedError
class HierarchicalCausalConv1DEncoder(Seq2SeqEncoder):
"""
Defines a stack of dilated convolutions as the encoder.
See the following paper for details:
1. Van Den Oord, A., Dieleman, S., Zen, H., Simonyan, K., Vinyals, O., Graves, A., Kalchbrenner,
N., Senior, A.W. and Kavukcuoglu, K., 2016, September. WaveNet: A generative model for raw audio. In SSW (p. 125).
Parameters
----------
dilation_seq
dilation for each convolution in the stack.
kernel_size_seq
kernel size for each convolution in the stack.
channels_seq
number of channels for each convolution in the stack.
use_residual
flag to toggle using residual connections.
use_covariates
flag to toggle whether to use coveriates as input to the encoder
"""
@validated()
def __init__(
self,
input_size: int,
dilation_seq: List[int],
kernel_size_seq: List[int],
channels_seq: List[int],
use_residual: bool = False,
use_covariates: bool = False,
**kwargs,
) -> None:
assert all(
[x > 0 for x in dilation_seq]
), "`dilation_seq` values must be greater than zero"
assert all(
[x > 0 for x in kernel_size_seq]
), "`kernel_size_seq` values must be greater than zero"
assert all(
[x > 0 for x in channels_seq]
), "`channel_dim_seq` values must be greater than zero"
super().__init__(**kwargs)
self.use_residual = use_residual
self.use_covariates = use_covariates
self.CNNs = nn.Sequential()
self.last_out_channel = channels_seq[-1]
it = zip(channels_seq, kernel_size_seq, dilation_seq)
in_channels = input_size
for layer_no, (out_channels, kernel_size, dilation) in enumerate(it):
convolution = CausalConv1D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
activation="relu",
)
self.CNNs.add_module('conv_%02d' % (layer_no), convolution)
in_channels = out_channels
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
if self.use_covariates:
inputs = Seq2SeqEncoder._assemble_inputs(
target=target,
static_features=static_features,
dynamic_features=dynamic_features,
)
else:
inputs = target
# NTC -> NCT (or NCW)
ct = inputs.transpose(1, 2)
ct = self.CNNs(ct)
# now we are back in NTC
ct = ct.transpose(1, 2)
if self.use_residual:
ct = torch.cat([ct, target.unsqueeze(-1)], dim=2)
# return the last state as the static code
static_code = ct[:, -1:, ...]
static_code = torch.squeeze(static_code, dim=1)
return static_code, ct
@property
def out_channels(self) -> int:
"""
the size of output channel
"""
return self.last_out_channel + 1
'''
class RNNEncoder(Seq2SeqEncoder):
"""
Defines an RNN as the encoder.
Parameters
----------
mode
type of the RNN. Can be either: rnn_relu (RNN with relu activation),
rnn_tanh, (RNN with tanh activation), lstm or gru.
hidden_size
number of units per hidden layer.
num_layers
number of hidden layers.
bidirectional
toggle use of bi-directional RNN as encoder.
"""
@validated()
def __init__(
self,
mode: str,
hidden_size: int,
num_layers: int,
bidirectional: bool,
**kwargs,
) -> None:
assert num_layers > 0, "`num_layers` value must be greater than zero"
assert hidden_size > 0, "`hidden_size` value must be greater than zero"
super().__init__(**kwargs)
self.rnn = RNN(mode, hidden_size, num_layers, bidirectional)
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
dynamic_code = self.rnn(target)
static_code = dynamic_code[:, -1:, ...]
return static_code, dynamic_code
class MLPEncoder(Seq2SeqEncoder):
"""
Defines a multilayer perceptron used as an encoder.
Parameters
----------
layer_sizes
number of hidden units per layer.
kwargs
"""
@validated()
def __init__(self, layer_sizes: List[int], **kwargs) -> None:
super().__init__(**kwargs)
self.model = MLP(layer_sizes, flatten=True)
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
inputs = Seq2SeqEncoder._assemble_inputs(
target, static_features, dynamic_features
)
static_code = self.model(inputs)
dynamic_code = torch.zeros_like(target).unsqueeze(2)
return static_code, dynamic_code
class RNNCovariateEncoder(Seq2SeqEncoder):
"""
Defines RNN encoder that uses covariates and target as input to the RNN.
Parameters
----------
mode
type of the RNN. Can be either: rnn_relu (RNN with relu activation),
rnn_tanh, (RNN with tanh activation), lstm or gru.
hidden_size
number of units per hidden layer.
num_layers
number of hidden layers.
bidirectional
toggle use of bi-directional RNN as encoder.
"""
@validated()
def __init__(
self,
mode: str,
hidden_size: int,
num_layers: int,
bidirectional: bool,
**kwargs,
) -> None:
assert num_layers > 0, "`num_layers` value must be greater than zero"
assert hidden_size > 0, "`hidden_size` value must be greater than zero"
super().__init__(**kwargs)
self.rnn = RNN(mode, hidden_size, num_layers, bidirectional)
def forward(
self,
target: Tensor,
static_features: Tensor,
dynamic_features: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Parameters
----------
target
target time series,
shape (batch_size, sequence_length)
static_features
static features,
shape (batch_size, num_static_features)
dynamic_features
dynamic_features,
shape (batch_size, sequence_length, num_dynamic_features)
Returns
-------
Tensor
static code,
shape (batch_size, num_static_features)
Tensor
dynamic code,
shape (batch_size, sequence_length, num_dynamic_features)
"""
inputs = Seq2SeqEncoder._assemble_inputs(
target, static_features, dynamic_features
)
dynamic_code = self.rnn(inputs)
# using the last state as the static code,
# but not working as well as the concat of all the previous states
static_code = torch.squeeze(dynamic_code[:, -1:, ...], dim=1)
return static_code, dynamic_code
''' | 12,696 | 26.188437 | 118 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/enc2dec.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Tuple
# Third-party imports
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.core.component import validated
class Seq2SeqEnc2Dec(nn.Module):
"""
Abstract class for any module that pass encoder to decoder, such as
attention network.
"""
@validated()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# noinspection PyMethodOverriding
def forward(
self,
encoder_output_static: Tensor,
encoder_output_dynamic: Tensor,
future_features: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
encoder_output_static
shape (batch_size, num_features) or (N, C)
encoder_output_dynamic
shape (batch_size, context_length, num_features) or (N, T, C)
future_features
shape (batch_size, prediction_length, num_features) or (N, T, C)
Returns
-------
Tensor
shape (batch_size, num_features) or (N, C)
Tensor
shape (batch_size, prediction_length, num_features) or (N, T, C)
Tensor
shape (batch_size, sequence_length, num_features) or (N, T, C)
"""
pass
class PassThroughEnc2Dec(Seq2SeqEnc2Dec):
"""
Simplest class for passing encoder tensors do decoder. Passes through
tensors.
"""
def forward(
self,
encoder_output_static: Tensor,
encoder_output_dynamic: Tensor,
future_features: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
encoder_output_static
shape (batch_size, num_features) or (N, C)
encoder_output_dynamic
shape (batch_size, context_length, num_features) or (N, T, C)
future_features
shape (batch_size, prediction_length, num_features) or (N, T, C)
Returns
-------
Tensor
shape (batch_size, num_features) or (N, C)
Tensor
shape (batch_size, prediction_length, num_features) or (N, T, C)
Tensor
shape (batch_size, sequence_length, num_features) or (N, T, C)
"""
return encoder_output_static, encoder_output_dynamic, future_features
| 2,929 | 25.636364 | 77 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/decoder.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List
# Third-party imports
import torch
import torch.nn as nn
from torch import Tensor
# First-party imports
from pts.core.component import validated
from pts.modules.block.mlp import MLP
class Seq2SeqDecoder(nn.Module):
"""
Abstract class for the Decoder block in sequence-to-sequence models.
"""
@validated()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# noinspection PyMethodOverriding
def forward(
self, dynamic_input: Tensor, static_input: Tensor
) -> None:
"""
Abstract function definition of the forward.
Parameters
----------
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C)
static_input
static features, shape (batch_size, num_features) or (N, C)
"""
pass
'''
class ForkingMLPDecoder(Seq2SeqDecoder):
"""
Multilayer perceptron decoder for sequence-to-sequence models.
See [WTN+17]_ for details.
Parameters
----------
dec_len
length of the decoder (usually the number of forecasted time steps).
final_dim
dimensionality of the output per time step (number of predicted
quantiles).
hidden_dimension_sequence
number of hidden units for each MLP layer.
"""
@validated()
def __init__(
self,
dec_len: int,
final_dim: int,
hidden_dimension_sequence: List[int] = list([]),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dec_len = dec_len
self.final_dims = final_dim
self.model = nn.Sequential()
for layer_no, layer_dim in enumerate(hidden_dimension_sequence):
layer = nn.Linear(
dec_len * layer_dim,
flatten=False,
activation="relu",
prefix=f"mlp_{layer_no:#02d}'_",
)
self.model.add(layer)
layer = nn.Linear(
dec_len * final_dim,
flatten=False,
activation="softrelu",
prefix=f"mlp_{len(hidden_dimension_sequence):#02d}'_",
)
self.model.add(layer)
def forward(
self, dynamic_input: Tensor, static_input: Tensor = None
) -> Tensor:
"""
ForkingMLPDecoder forward call.
Parameters
----------
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C).
static_input
not used in this decoder.
Returns
-------
Tensor
mlp output, shape (0, 0, dec_len, final_dims).
"""
mlp_output = self.model(dynamic_input)
mlp_output = mlp_output.reshape(
shape=(0, 0, self.dec_len, self.final_dims)
)
return mlp_output
'''
class OneShotDecoder(Seq2SeqDecoder):
"""
OneShotDecoder.
Parameters
----------
decoder_length
length of the decoder (number of time steps)
layer_sizes
dimensions of the hidden layers
static_outputs_per_time_step
number of outputs per time step
"""
@validated()
def __init__(
self,
input_size: int,
decoder_length: int,
layer_sizes: List[int],
static_outputs_per_time_step: int,
) -> None:
super().__init__()
self.decoder_length = decoder_length
self.static_outputs_per_time_step = static_outputs_per_time_step
self.expander = nn.Linear(
input_size,
decoder_length * static_outputs_per_time_step
)
input_size = 4 + static_outputs_per_time_step #TODO: fix hard coded dimension for covariates
self.mlp = MLP(input_size, layer_sizes)
def forward(
self,
static_input: Tensor, # (batch_size, static_input_dim)
dynamic_input: Tensor, # (batch_size,
) -> Tensor:
"""
OneShotDecoder forward call
Parameters
----------
static_input
static features, shape (batch_size, num_features) or (N, C)
dynamic_input
dynamic_features, shape (batch_size, sequence_length, num_features)
or (N, T, C)
Returns
-------
Tensor
mlp output, shape (batch_size, dec_len, size of last layer)
"""
static_input_tile = self.expander(static_input).reshape(
(-1, self.decoder_length, self.static_outputs_per_time_step)
)
combined_input = torch.cat([dynamic_input, static_input_tile], dim=2)
out = self.mlp(combined_input) # (N, T, layer_sizes[-1])
return out
| 5,368 | 25.979899 | 100 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/block/quantile_output.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
# Third-party imports
# First-party imports
from pts.core.component import validated
class QuantileLoss(nn.Module):
@validated()
def __init__(
self,
quantiles: List[float],
quantile_weights: List[float] = None,
**kwargs,
) -> None:
"""
Represents the quantile loss used to fit decoders that learn quantiles.
Parameters
----------
quantiles
list of quantiles to compute loss over.
quantile_weights
weights of the quantiles.
"""
super().__init__(**kwargs)
self.quantiles = quantiles
self.num_quantiles = len(quantiles)
self.quantile_weights = (
torch.ones(self.num_quantiles) / self.num_quantiles
if not quantile_weights
else quantile_weights
)
# noinspection PyMethodOverriding
def forward(
self, y_true: Tensor, y_pred: Tensor, sample_weight=None
):
"""
Compute the weighted sum of quantile losses.
Parameters
----------
y_true
true target, shape (N1 x N2 x ... x Nk x dimension of time series
(normally 1))
y_pred
predicted target, shape (N1 x N2 x ... x Nk x num_quantiles)
sample_weight
sample weights
Returns
-------
Tensor
weighted sum of the quantile losses, shape N1 x N1 x ... Nk
"""
y_pred_all = y_pred.chunk(self.num_quantiles, dim=-1)
qt_loss = []
for i, y_pred_q in enumerate(y_pred_all):
q = self.quantiles[i]
weighted_qt = (
self.compute_quantile_loss(y_true, y_pred_q.squeeze(-1), q)
* self.quantile_weights[i].detach()
)
qt_loss.append(weighted_qt)
stacked_qt_losses = torch.stack(qt_loss, axis=-1)
sum_qt_loss = torch.mean(
stacked_qt_losses, axis=-1
) # avg across quantiles
if sample_weight is not None:
return sample_weight * sum_qt_loss
else:
return sum_qt_loss
@staticmethod
def compute_quantile_loss(
y_true: Tensor, y_pred_p: Tensor, p: float
) -> Tensor:
"""
Compute the quantile loss of the given quantile
Parameters
----------
y_true
true target, shape (N1 x N2 x ... x Nk x dimension of time series
(normally 1)).
y_pred_p
predicted target quantile, shape (N1 x N2 x ... x Nk x 1).
p
quantile error to compute the loss.
Returns
-------
Tensor
quantile loss, shape: (N1 x N2 x ... x Nk x 1)
"""
under_bias = p * F.relu(y_true - y_pred_p)
over_bias = (1 - p) * F.relu(y_pred_p - y_true)
qt_loss = 2 * (under_bias + over_bias)
return qt_loss
class ProjectParams(nn.Module):
"""
Defines a dense layer to compute the projection weights into the quantile
space.
Parameters
----------
num_quantiles
number of quantiles to compute the projection.
"""
@validated()
def __init__(self, input_size, num_quantiles, **kwargs):
super().__init__(**kwargs)
self.projection = nn.Linear(input_size, num_quantiles)
# noinspection PyMethodOverriding,PyPep8Naming
def forward(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
"""
Parameters
----------
x
input tensor
Returns
-------
Tensor
output of the projection layer
"""
return self.projection(x)
class QuantileOutput:
"""
Output layer using a quantile loss and projection layer to connect the
quantile output to the network.
Parameters
----------
quantiles
list of quantiles to compute loss over.
quantile_weights
weights of the quantiles.
"""
@validated()
def __init__(
self,
input_size: int,
quantiles: List[float],
quantile_weights: Optional[List[float]] = None,
) -> None:
self.input_size = input_size
self.quantiles = quantiles
self.quantile_weights = quantile_weights
def get_loss(self) -> Tensor:
"""
Returns
-------
nn.Module
constructs quantile loss object.
"""
return QuantileLoss(
quantiles=self.quantiles, quantile_weights=self.quantile_weights
)
def get_quantile_proj(self, **kwargs) -> nn.Module:
"""
Returns
-------
nn.Module
constructs projection parameter object.
"""
return ProjectParams(self.input_size, len(self.quantiles), **kwargs)
| 5,592 | 25.258216 | 79 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/distribution/constant.py | import torch
from torch.distributions.distribution import Distribution
class ConstantDistribution(Distribution):
r"""
Creates a constant distribution, i.e. Var(x) = 0
Args:
loss_type: L1 or L2
mu (Tensor): mean
"""
def __init__(self, loss_type, mu, validate_args=None):
self.loss_type = loss_type
self.mu = mu
batch_shape = mu.size()
super(ConstantDistribution, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return self.mu
@property
def variance(self):
return torch.zeros_like(self.mu)
def sample(self, sample_shape=torch.Size()):
return torch.ones_like(self.mu) * self.mu
def log_prob(self, y_true):
mu = self.mu
if self.loss_type == "L1":
loss = torch.abs(y_true - mu)
elif self.loss_type == "L2":
loss = (y_true - mu)**2
else:
raise NotImplementedError
return -loss # loss == negative log_prob | 1,045 | 25.15 | 92 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/distribution/tweedie.py | import torch
import numpy as np
from torch.distributions.distribution import Distribution
def est_lambda(mu, p):
return mu ** (2 - p) / (2 - p)
def est_alpha(p):
return (2 - p) / (p - 1)
def est_beta(mu, p):
return mu ** (1 - p) / (p - 1)
class Tweedie(Distribution):
r"""
Creates a Tweedie distribution, i.e. distribution
Args:
log_mu (Tensor): log(mean)
rho (Tensor): tweedie_variance_power (1 ~ 2)
"""
def __init__(self, log_mu, rho, validate_args=None):
self.log_mu = log_mu
self.rho = rho
batch_shape = log_mu.size()
super(Tweedie, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return torch.exp(self.log_mu)
@property
def variance(self):
return torch.ones_line(self.log_mu) #TODO need to be assigned
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
mu = self.mean
p = self.rho
phi = 1 #TODO
rate = est_lambda(mu, p) / phi #rate for poisson
alpha = est_alpha(p) #alpha for Gamma distribution
beta = est_beta(mu, p) / phi #beta for Gamma distribution
N = torch.poisson(rate)
gamma = torch.distributions.gamma.Gamma(N*alpha, beta)
samples = gamma.sample()
samples[N==0] = 0
return samples
def log_prob(self, y_true):
rho = self.rho
y_pred = self.log_mu
a = y_true * torch.exp((1 - rho) * y_pred) / (1 - rho)
b = torch.exp((2 - rho) * y_pred) / (2 - rho)
return a - b | 1,660 | 24.166667 | 79 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/core/serde.py | import itertools
import json
import math
import textwrap
from functools import singledispatch
from pydoc import locate
from typing import Any, Optional, cast, NamedTuple
import numpy as np
import torch
from pts.core import fqname_for
bad_type_msg = textwrap.dedent(
"""
Cannot serialize type {}. See the documentation of the `encode` and
`validate` functions at
http://gluon-ts.mxnet.io/api/gluonts/gluonts.html
and the Python documentation of the `__getnewargs_ex__` magic method at
https://docs.python.org/3/library/pickle.html#object.__getnewargs_ex__
for more information how to make this type serializable.
"""
).lstrip()
def dump_code(o: Any) -> str:
"""
Serializes an object to a Python code string.
Parameters
----------
o
The object to serialize.
Returns
-------
str
A string representing the object as Python code.
See Also
--------
load_code
Inverse function.
"""
def _dump_code(x: Any) -> str:
# r = { 'class': ..., 'args': ... }
# r = { 'class': ..., 'kwargs': ... }
if type(x) == dict and x.get("__kind__") == kind_inst:
args = x.get("args", [])
kwargs = x.get("kwargs", {})
fqname = x["class"]
bindings = ", ".join(
itertools.chain(
map(_dump_code, args),
[f"{k}={_dump_code(v)}" for k, v in kwargs.items()],
)
)
return f"{fqname}({bindings})"
if type(x) == dict and x.get("__kind__") == kind_type:
return x["class"]
if isinstance(x, dict):
inner = ", ".join(
f"{_dump_code(k)}: {_dump_code(v)}" for k, v in x.items()
)
return f"{{{inner}}}"
if isinstance(x, list):
inner = ", ".join(list(map(dump_code, x)))
return f"[{inner}]"
if isinstance(x, tuple):
inner = ", ".join(list(map(dump_code, x)))
# account for the extra `,` in `(x,)`
if len(x) == 1:
inner += ","
return f"({inner})"
if isinstance(x, str):
# json.dumps escapes the string
return json.dumps(x)
if isinstance(x, float) or np.issubdtype(type(x), np.inexact):
if math.isfinite(x):
return str(x)
else:
# e.g. `nan` needs to be encoded as `float("nan")`
return 'float("{x}")'
if isinstance(x, int) or np.issubdtype(type(x), np.integer):
return str(x)
if x is None:
return str(x)
raise RuntimeError(
f"Unexpected element type {fqname_for(x.__class__)}"
)
return _dump_code(encode(o))
# JSON Serialization/Deserialization
# ----------------------------------
# The canonical way to do this is to define and `default` and `object_hook`
# parameters to the json.dumps and json.loads methods. Unfortunately, due
# to https://bugs.python.org/issue12657 this is not possible at the moment,
# as support for custom NamedTuple serialization is broken.
#
# To circumvent the issue, we pass the input value through custom encode
# and decode functions that map nested object terms to JSON-serializable
# data structures with explicit recursion.
def dump_json(o: Any, indent: Optional[int] = None) -> str:
"""
Serializes an object to a JSON string.
Parameters
----------
o
The object to serialize.
indent
An optional number of spaced to use as an indent.
Returns
-------
str
A string representing the object in JSON format.
See Also
--------
load_json
Inverse function.
"""
return json.dumps(encode(o), indent=indent, sort_keys=True)
def load_json(s: str) -> Any:
"""
Deserializes an object from a JSON string.
Parameters
----------
s
A string representing the object in JSON format.
Returns
-------
Any
The deserialized object.
See Also
--------
dump_json
Inverse function.
"""
return decode(json.loads(s))
# Structural encoding/decoding
# ----------------------------
kind_type = "type"
kind_inst = "instance"
@singledispatch
def encode(v: Any) -> Any:
"""
Transforms a value `v` as a serializable intermediate representation (for
example, named tuples are encoded as dictionaries). The intermediate
representation is then recursively traversed and serialized either as
Python code or as JSON string.
This function is decorated with :func:`~functools.singledispatch` and can
be specialized by clients for families of types that are not supported by
the basic implementation (explained below).
Examples
--------
The conversion logic implemented by the basic implementation is used
as a fallback and is best explained by a series of examples.
Lists (as lists).
>>> encode([1, 2.0, '3'])
[1, 2.0, '3']
Tuples (as lists).
>>> encode((1, 2.0, '3'))
[1, 2.0, '3']
Dictionaries (as dictionaries).
>>> encode({'a': 1, 'b': 2.0, 'c': '3'})
{'a': 1, 'b': 2.0, 'c': '3'}
Named tuples (as dictionaries with a ``'__kind__': 'instance'`` member).
>>> from pprint import pprint
>>> from typing import NamedTuple
>>> class ComplexNumber(NamedTuple):
... x: float = 0.0
... y: float = 0.0
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': 'instance',
'class': 'gluonts.core.serde.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Classes with a :func:`~gluonts.core.component.validated` initializer (as
dictionaries with a ``'__kind__': 'instance'`` member).
>>> from gluonts.core.component import validated
>>> class ComplexNumber:
... @validated()
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': 'instance',
'args': [],
'class': 'gluonts.core.serde.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Classes with a ``__getnewargs_ex__`` magic method (as dictionaries with a
``'__kind__': 'instance'`` member).
>>> from gluonts.core.component import validated
>>> class ComplexNumber:
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
... def __getnewargs_ex__(self):
... return [], {'x': self.x, 'y': self.y}
>>> pprint(encode(ComplexNumber(4.0, 2.0)))
{'__kind__': 'instance',
'args': [],
'class': 'gluonts.core.serde.ComplexNumber',
'kwargs': {'x': 4.0, 'y': 2.0}}
Types (as dictionaries with a ``'__kind__': 'type' member``).
>>> encode(ComplexNumber)
{'__kind__': 'type', 'class': 'gluonts.core.serde.ComplexNumber'}
Parameters
----------
v
The value to be encoded.
Returns
-------
Any
An encoding of ``v`` that can be serialized to Python code or
JSON string.
See Also
--------
decode
Inverse function.
dump_json
Serializes an object to a JSON string.
dump_code
Serializes an object to a Python code string.
"""
if isinstance(v, type(None)):
return None
if isinstance(v, (float, int, str)):
return v
if np.issubdtype(type(v), np.inexact):
return float(v)
if np.issubdtype(type(v), np.integer):
return int(v)
# we have to check for namedtuples first, to encode them not as plain
# tuples (which would become lists)
if isinstance(v, tuple) and hasattr(v, "_asdict"):
v = cast(NamedTuple, v)
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"kwargs": encode(v._asdict()),
}
if isinstance(v, (list, set, tuple)):
return list(map(encode, v))
if isinstance(v, dict):
return {k: encode(v) for k, v in v.items()}
if isinstance(v, type):
return {"__kind__": kind_type, "class": fqname_for(v)}
if hasattr(v, "__getnewargs_ex__"):
args, kwargs = v.__getnewargs_ex__() # mypy: ignore
return {
"__kind__": kind_inst,
"class": fqname_for(v.__class__),
"args": encode(args),
"kwargs": encode(kwargs),
}
if isinstance(v, torch.device):
return None
raise RuntimeError(bad_type_msg.format(fqname_for(v.__class__)))
def decode(r: Any) -> Any:
"""
Decodes a value from an intermediate representation `r`.
Parameters
----------
r
An intermediate representation to be decoded.
Returns
-------
Any
A Python data structure corresponding to the decoded version of ``r``.
See Also
--------
encode
Inverse function.
"""
# structural recursion over the possible shapes of r
# r = { 'class': ..., 'args': ... }
# r = { 'class': ..., 'kwargs': ... }
if type(r) == dict and r.get("__kind__") == kind_inst:
cls = cast(Any, locate(r["class"]))
args = decode(r["args"]) if "args" in r else []
kwargs = decode(r["kwargs"]) if "kwargs" in r else {}
return cls(*args, **kwargs)
# r = { 'class': ..., 'args': ... }
# r = { 'class': ..., 'kwargs': ... }
if type(r) == dict and r.get("__kind__") == kind_type:
return locate(r["class"])
# r = { k1: v1, ..., kn: vn }
elif type(r) == dict:
return {k: decode(v) for k, v in r.items()}
# r = ( y1, ..., yn )
elif type(r) == tuple:
return tuple([decode(y) for y in r])
# r = [ y1, ..., yn ]
elif type(r) == list:
return [decode(y) for y in r]
# r = { y1, ..., yn }
elif type(r) == set:
return {decode(y) for y in r}
# r = a
else:
return r
| 10,036 | 26.49863 | 78 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/core/component.py | import functools
import inspect
from collections import OrderedDict
from typing import Any
import torch
from pydantic import BaseConfig, BaseModel, create_model
from pts.core.serde import dump_code
class BaseValidatedInitializerModel(BaseModel):
"""
Base Pydantic model for components with :func:`validated` initializers.
See Also
--------
validated
Decorates an initializer methods with argument validation logic.
"""
class Config(BaseConfig):
"""
`Config <https://pydantic-docs.helpmanual.io/#model-config>`_ for the
Pydantic model inherited by all :func:`validated` initializers.
Allows the use of arbitrary type annotations in initializer parameters.
"""
arbitrary_types_allowed = True
def validated(base_model=None):
"""
Decorates an ``__init__`` method with typed parameters with validation
and auto-conversion logic.
>>> class ComplexNumber:
... @validated()
... def __init__(self, x: float = 0.0, y: float = 0.0) -> None:
... self.x = x
... self.y = y
Classes with decorated initializers can be instantiated using arguments of
another type (e.g. an ``y`` argument of type ``str`` ). The decorator
handles the type conversion logic.
>>> c = ComplexNumber(y='42')
>>> (c.x, c.y)
(0.0, 42.0)
If the bound argument cannot be converted, the decorator throws an error.
>>> c = ComplexNumber(y=None)
Traceback (most recent call last):
...
pydantic.error_wrappers.ValidationError: 1 validation error for ComplexNumberModel
y
none is not an allowed value (type=type_error.none.not_allowed)
Internally, the decorator delegates all validation and conversion logic to
`a Pydantic model <https://pydantic-docs.helpmanual.io/>`_, which can be
accessed through the ``Model`` attribute of the decorated initiazlier.
>>> ComplexNumber.__init__.Model
<class 'ComplexNumberModel'>
The Pydantic model is synthesized automatically from on the parameter
names and types of the decorated initializer. In the ``ComplexNumber``
example, the synthesized Pydantic model corresponds to the following
definition.
>>> class ComplexNumberModel(BaseValidatedInitializerModel):
... x: float = 0.0
... y: float = 0.0
Clients can optionally customize the base class of the synthesized
Pydantic model using the ``base_model`` decorator parameter. The default
behavior uses :class:`BaseValidatedInitializerModel` and its
`model config <https://pydantic-docs.helpmanual.io/#config>`_.
See Also
--------
BaseValidatedInitializerModel
Default base class for all synthesized Pydantic models.
"""
def validator(init):
init_qualname = dict(inspect.getmembers(init))["__qualname__"]
init_clsnme = init_qualname.split(".")[0]
init_params = inspect.signature(init).parameters
init_fields = {
param.name: (
param.annotation
if param.annotation != inspect.Parameter.empty
else Any,
param.default
if param.default != inspect.Parameter.empty
else ...,
)
for param in init_params.values()
if param.name != "self"
and param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
}
if base_model is None:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__config__=BaseValidatedInitializerModel.Config,
**init_fields,
)
else:
PydanticModel = create_model(
model_name=f"{init_clsnme}Model",
__base__=base_model,
**init_fields,
)
def validated_repr(self) -> str:
return dump_code(self)
def validated_getnewargs_ex(self):
return (), self.__init_args__
@functools.wraps(init)
def init_wrapper(*args, **kwargs):
self, *args = args
nmargs = {
name: arg
for (name, param), arg in zip(
list(init_params.items()), [self] + args
)
if name != "self"
}
model = PydanticModel(**{**nmargs, **kwargs})
# merge nmargs, kwargs, and the model fields into a single dict
all_args = {**nmargs, **kwargs, **model.__dict__}
# save the merged dictionary for Representable use, but only of the
# __init_args__ is not already set in order to avoid overriding a
# value set by a subclass initializer in super().__init__ calls
if not getattr(self, "__init_args__", {}):
self.__init_args__ = OrderedDict(
{
name: arg
for name, arg in sorted(all_args.items())
if type(arg) != torch.nn.ParameterDict
}
)
self.__class__.__getnewargs_ex__ = validated_getnewargs_ex
self.__class__.__repr__ = validated_repr
return init(self, **all_args)
# attach the Pydantic model as the attribute of the initializer wrapper
setattr(init_wrapper, "Model", PydanticModel)
return init_wrapper
return validator | 5,487 | 32.668712 | 86 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/transformed_iterable_dataset.py | import itertools
from typing import Dict, Iterable, Iterator, Optional
import numpy as np
import torch
from pts.transform.transform import Transformation
from .common import DataEntry, Dataset
class TransformedIterableDataset(torch.utils.data.IterableDataset):
def __init__(
self, dataset: Dataset, is_train: bool, transform: Transformation, is_forever: bool = True,
) -> None:
super().__init__()
self.dataset = dataset
self.transform = transform
self.is_train = is_train
self._cur_iter: Optional[Iterator] = None
self.is_forever = is_forever
def _iterate_forever(self, collection: Iterable[DataEntry]) -> Iterator[DataEntry]:
# iterate forever over the collection, the collection must be non empty
while True:
try:
first = next(iter(collection))
except StopIteration:
raise Exception("empty dataset")
else:
for x in collection:
yield x
def __iter__(self) -> Iterator[Dict[str, np.ndarray]]:
if self.is_forever:
if self._cur_iter is None:
# only set once
self._cur_iter = self.transform(
self._iterate_forever(self.dataset), is_train=self.is_train
)
else:
# reset at start
self._cur_iter = self.transform(
self.dataset, is_train=self.is_train
)
assert self._cur_iter is not None
while True:
try:
data_entry = next(self._cur_iter)
except StopIteration:
return
yield {
k: (v.astype(np.float32) if v.dtype.kind == "f" else v)
for k, v in data_entry.items()
if isinstance(v, np.ndarray) == True
}
# def __len__(self) -> int:
# return len(self.dataset)
class TransformedListDataset(torch.utils.data.Dataset):
def __init__(
self, dataset: list, is_train: bool, transform: Transformation,
) -> None:
super().__init__()
self.dataset = dataset
self.transform = transform
self.is_train = is_train
def __getitem__(self, idx):
data_item = self.transform(
[self.dataset[idx]], is_train=self.is_train
)
data_entry = next(data_item)
return {
k: (v.astype(np.float32) if v.dtype.kind == "f" else v)
for k, v in data_entry.items()
if isinstance(v, np.ndarray) == True
}
def __len__(self) -> int:
return len(self.dataset)
| 2,707 | 30.488372 | 99 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/loader.py | import itertools
from collections import defaultdict
from typing import Any, Dict, Iterable, Iterator, List, Optional # noqa: F401
import numpy as np
# Third-party imports
import torch
from pts.transform.transform import Transformation
# First-party imports
from .common import DataEntry, Dataset
DataBatch = Dict[str, Any]
class BatchBuffer:
def __init__(
self, batch_size: int, device: torch.device, dtype: np.dtype = np.float32
) -> None:
self._buffers: Dict[Any, List[Any]] = defaultdict(list)
self.batch_size = batch_size
self._size = 0
self.device = device
self.dtype = dtype
def add(self, d: Dict[str, List[np.ndarray]]):
if self._buffers:
assert self._buffers.keys() == d.keys()
for k, v in d.items():
self._buffers[k].append(v)
self._size += 1
def __len__(self):
return self._size
def next_batch(self) -> DataBatch:
assert self._size > 0
n = min(self._size, self.batch_size)
batch = {k: self.stack(v[:n]) for k, v in self._buffers.items()}
for key in self._buffers.keys():
self._buffers[key] = self._buffers[key][n:]
self._size -= n
return batch
def stack(self, xs):
if isinstance(xs[0], np.ndarray):
data = np.asarray(xs)
if data.dtype.kind == "f":
data = data.astype(self.dtype)
return torch.from_numpy(data).to(device=self.device, non_blocking=True)
elif isinstance(xs[0], torch.Tensor):
return torch.stack(*xs)
else:
return xs # stack all other types as list
def shuffle(self):
perm = np.random.permutation(self._size)
for key in self._buffers.keys():
li = self._buffers[key]
self._buffers[key] = [li[i] for i in perm]
class DataLoader(Iterable[DataEntry]):
"""
An abstract Iterable type for iterating and transforming a dataset,
in batches of a prescribed size.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
device
device to use to store data on.
dtype
Floating point type to use.
"""
def __init__(
self,
dataset: Dataset,
transform: Transformation,
batch_size: int,
device: torch.device,
dtype: np.dtype = np.float32,
) -> None:
self.dataset = dataset
self.transform = transform
self.batch_size = batch_size
self.device = device
self.dtype = dtype
class TrainDataLoader(DataLoader):
"""
An Iterable type for iterating and transforming a dataset, in batches of a
prescribed size, until a given number of batches is reached.
The transformation are applied with in training mode, i.e. with the flag
`is_train = True`.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
device
device to use to store data on.
num_batches_per_epoch
Number of batches to return in one complete iteration over this object.
dtype
Floating point type to use.
"""
def __init__(
self,
dataset: Dataset,
transform: Transformation,
batch_size: int,
device: torch.device,
num_batches_per_epoch: int,
dtype: np.dtype = np.float32,
shuffle_for_training: bool = True,
num_batches_for_shuffling: int = 10,
) -> None:
super().__init__(dataset, transform, batch_size, device, dtype)
self.num_batches_per_epoch = num_batches_per_epoch
self.shuffle_for_training = shuffle_for_training
self._num_buffered_batches = (
num_batches_for_shuffling if shuffle_for_training else 1
)
self._cur_iter: Optional[Iterator] = None
self._buffer = BatchBuffer(self.batch_size, device, dtype)
def _emit_batches_while_buffer_larger_than(self, thresh) -> Iterator[DataBatch]:
if self.shuffle_for_training:
self._buffer.shuffle()
while len(self._buffer) > thresh:
yield self._buffer.next_batch()
def _iterate_forever(self, collection: Iterable[DataEntry]) -> Iterator[DataEntry]:
# iterate forever over the collection, the collection must be non empty
while True:
try:
first = next(iter(collection))
except StopIteration:
raise Exception("empty dataset")
else:
for x in itertools.chain([first], collection):
yield x
def __len__(self) -> int:
return self.num_batches_per_epoch
def __iter__(self) -> Iterator[DataBatch]:
batch_count = 0
if self._cur_iter is None:
self._cur_iter = self.transform(
self._iterate_forever(self.dataset), is_train=True
)
assert self._cur_iter is not None
while True:
data_entry = next(self._cur_iter)
self._buffer.add(data_entry)
if len(self._buffer) >= self._num_buffered_batches * self.batch_size:
for batch in self._emit_batches_while_buffer_larger_than(
self.batch_size - 1
):
yield batch
batch_count += 1
if batch_count >= self.num_batches_per_epoch:
return
class ValidationDataLoader(DataLoader):
"""
An Iterable type for iterating and transforming a dataset just once, in
batches of a prescribed size.
The transformation are applied with in training mode, i.e. with the flag
`is_train = True`.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
device
device to use to store data on.
dtype
Floating point type to use.
"""
def __iter__(self) -> Iterator[DataBatch]:
buffer = BatchBuffer(self.batch_size, self.device, self.dtype)
for data_entry in self.transform(iter(self.dataset), is_train=True):
buffer.add(data_entry)
if len(buffer) >= self.batch_size:
yield buffer.next_batch()
if len(buffer) > 0:
yield buffer.next_batch()
class InferenceDataLoader(DataLoader):
"""
An Iterable type for iterating and transforming a dataset just once, in
batches of a prescribed size.
The transformation are applied with in inference mode, i.e. with the flag
`is_train = False`.
Parameters
----------
dataset
The dataset from which to load data.
transform
A transformation to apply to each entry in the dataset.
batch_size
The size of the batches to emit.
device
device to use to store data on.
dtype
Floating point type to use.
"""
def __iter__(self) -> Iterator[DataBatch]:
buffer = BatchBuffer(self.batch_size, self.device, self.dtype)
for data_entry in self.transform(iter(self.dataset), is_train=False):
buffer.add(data_entry)
if len(buffer) >= self.batch_size:
yield buffer.next_batch()
if len(buffer) > 0:
yield buffer.next_batch()
| 7,684 | 30.756198 | 87 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/list_dataset.py | import random
import torch
from typing import Iterable
from .common import DataEntry, Dataset, SourceContext
from .process import ProcessDataEntry
class ListDataset(Dataset):
def __init__(
self,
data_iter: Iterable[DataEntry],
freq: str,
one_dim_target: bool = True,
shuffle: bool = False,
) -> None:
process = ProcessDataEntry(freq, one_dim_target)
self.list_data = [process(data) for data in data_iter]
self.shuffle = shuffle
if self.shuffle:
random.shuffle(self.list_data)
def __iter__(self):
source_name = "list_data"
for row_number, data in enumerate(self.list_data, start=1):
data["source"] = SourceContext(source=source_name, row=row_number)
yield data
if self.shuffle:
random.shuffle(self.list_data)
def __len__(self):
return len(self.list_data)
| 945 | 26.028571 | 78 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/dataset/repository/_util.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, Any
import numpy as np
def to_dict(
target_values: np.ndarray,
start: str,
cat: Optional[List[int]] = None,
item_id: Optional[Any] = None,
):
def serialize(x):
if np.isnan(x):
return "NaN"
else:
# return x
return float("{0:.6f}".format(float(x)))
res = {
"start": str(start),
"target": [serialize(x) for x in target_values],
}
if cat is not None:
res["feat_static_cat"] = cat
if item_id is not None:
res["item_id"] = item_id
return res
def save_to_file(path: Path, data: List[Dict]):
print(f"saving time-series into {path}")
path_dir = os.path.dirname(path)
os.makedirs(path_dir, exist_ok=True)
with open(path, "wb") as fp:
for d in data:
fp.write(json.dumps(d).encode("utf-8"))
fp.write("\n".encode("utf-8"))
def get_download_path() -> Path:
"""
Returns
-------
Path
default path to download datasets
/home/username/.pytorch/pytorch-ts/
"""
return Path(str(Path.home() / ".pytorch" / "pytorch-ts"))
def metadata(cardinality: int, freq: str, prediction_length: int):
return {
"freq": freq,
"prediction_length": prediction_length,
"feat_static_cat": [
{"name": "feat_static_cat", "cardinality": str(cardinality)}
],
}
| 2,063 | 25.126582 | 75 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/transform/convert.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Iterator, List, Tuple, Optional
import numpy as np
import torch
from scipy.special import erf, erfinv
from pts.core.component import validated
from pts.dataset import DataEntry
from pts.exception import assert_pts
from .transform import (
SimpleTransformation,
MapTransformation,
FlatMapTransformation,
)
class AsNumpyArray(SimpleTransformation):
"""
Converts the value of a field into a numpy array.
Parameters
----------
expected_ndim
Expected number of dimensions. Throws an exception if the number of
dimensions does not match.
dtype
numpy dtype to use.
"""
@validated()
def __init__(
self, field: str, expected_ndim: int, dtype: np.dtype = np.float32
) -> None:
self.field = field
self.expected_ndim = expected_ndim
self.dtype = dtype
def transform(self, data: DataEntry) -> DataEntry:
value = data[self.field]
if not isinstance(value, float):
# this lines produces "ValueError: setting an array element with a
# sequence" on our test
# value = np.asarray(value, dtype=np.float32)
# see https://stackoverflow.com/questions/43863748/
value = np.asarray(list(value), dtype=self.dtype)
else:
# ugly: required as list conversion will fail in the case of a
# float
value = np.asarray(value, dtype=self.dtype)
assert_pts(
value.ndim >= self.expected_ndim,
'Input for field "{self.field}" does not have the required'
"dimension (field: {self.field}, ndim observed: {value.ndim}, "
"expected ndim: {self.expected_ndim})",
value=value,
self=self,
)
data[self.field] = value
return data
class ExpandDimArray(SimpleTransformation):
"""
Expand dims in the axis specified, if the axis is not present does nothing.
(This essentially calls np.expand_dims)
Parameters
----------
field
Field in dictionary to use
axis
Axis to expand (see np.expand_dims for details)
"""
@validated()
def __init__(self, field: str, axis: Optional[int] = None) -> None:
self.field = field
self.axis = axis
def transform(self, data: DataEntry) -> DataEntry:
if self.axis is not None:
data[self.field] = np.expand_dims(data[self.field], axis=self.axis)
return data
class VstackFeatures(SimpleTransformation):
"""
Stack fields together using ``np.vstack``.
Fields with value ``None`` are ignored.
Parameters
----------
output_field
Field name to use for the output
input_fields
Fields to stack together
drop_inputs
If set to true the input fields will be dropped.
"""
@validated()
def __init__(
self, output_field: str, input_fields: List[str], drop_inputs: bool = True,
) -> None:
self.output_field = output_field
self.input_fields = input_fields
self.cols_to_drop = (
[]
if not drop_inputs
else [fname for fname in self.input_fields if fname != output_field]
)
def transform(self, data: DataEntry) -> DataEntry:
r = [data[fname] for fname in self.input_fields if data[fname] is not None]
output = np.vstack(r)
data[self.output_field] = output
for fname in self.cols_to_drop:
del data[fname]
return data
class ConcatFeatures(SimpleTransformation):
"""
Concatenate fields together using ``np.concatenate``.
Fields with value ``None`` are ignored.
Parameters
----------
output_field
Field name to use for the output
input_fields
Fields to stack together
drop_inputs
If set to true the input fields will be dropped.
"""
@validated()
def __init__(
self, output_field: str, input_fields: List[str], drop_inputs: bool = True,
) -> None:
self.output_field = output_field
self.input_fields = input_fields
self.cols_to_drop = (
[]
if not drop_inputs
else [fname for fname in self.input_fields if fname != output_field]
)
def transform(self, data: DataEntry) -> DataEntry:
r = [data[fname] for fname in self.input_fields if data[fname] is not None]
output = np.concatenate(r)
data[self.output_field] = output
for fname in self.cols_to_drop:
del data[fname]
return data
class SwapAxes(SimpleTransformation):
"""
Apply `np.swapaxes` to fields.
Parameters
----------
input_fields
Field to apply to
axes
Axes to use
"""
@validated()
def __init__(self, input_fields: List[str], axes: Tuple[int, int]) -> None:
self.input_fields = input_fields
self.axis1, self.axis2 = axes
def transform(self, data: DataEntry) -> DataEntry:
for field in self.input_fields:
data[field] = self.swap(data[field])
return data
def swap(self, v):
if isinstance(v, np.ndarray):
return np.swapaxes(v, self.axis1, self.axis2)
if isinstance(v, list):
return [self.swap(x) for x in v]
else:
raise ValueError(
f"Unexpected field type {type(v).__name__}, expected "
f"np.ndarray or list[np.ndarray]"
)
class ListFeatures(SimpleTransformation):
"""
Creates a new field which contains a list of features.
Parameters
----------
output_field
Field name for output
input_fields
Fields to combine into list
drop_inputs
If true the input fields will be removed from the result.
"""
@validated()
def __init__(
self, output_field: str, input_fields: List[str], drop_inputs: bool = True,
) -> None:
self.output_field = output_field
self.input_fields = input_fields
self.cols_to_drop = (
[]
if not drop_inputs
else [fname for fname in self.input_fields if fname != output_field]
)
def transform(self, data: DataEntry) -> DataEntry:
data[self.output_field] = [data[fname] for fname in self.input_fields]
for fname in self.cols_to_drop:
del data[fname]
return data
class TargetDimIndicator(SimpleTransformation):
"""
Label-encoding of the target dimensions.
"""
@validated()
def __init__(self, field_name: str, target_field: str) -> None:
self.field_name = field_name
self.target_field = target_field
def transform(self, data: DataEntry) -> DataEntry:
data[self.field_name] = np.arange(0, data[self.target_field].shape[0])
return data
class SampleTargetDim(FlatMapTransformation):
"""
Samples random dimensions from the target at training time.
"""
@validated()
def __init__(
self,
field_name: str,
target_field: str,
observed_values_field: str,
num_samples: int,
shuffle: bool = True,
) -> None:
self.field_name = field_name
self.target_field = target_field
self.observed_values_field = observed_values_field
self.num_samples = num_samples
self.shuffle = shuffle
def flatmap_transform(
self, data: DataEntry, is_train: bool, slice_future_target: bool = True
) -> Iterator[DataEntry]:
if not is_train:
yield data
else:
# (target_dim,)
target_dimensions = data[self.field_name]
if self.shuffle:
np.random.shuffle(target_dimensions)
target_dimensions = target_dimensions[: self.num_samples]
data[self.field_name] = target_dimensions
# (seq_len, target_dim) -> (seq_len, num_samples)
for field in [
f"past_{self.target_field}",
f"future_{self.target_field}",
f"past_{self.observed_values_field}",
f"future_{self.observed_values_field}",
]:
data[field] = data[field][:, target_dimensions]
yield data
class CDFtoGaussianTransform(MapTransformation):
"""
Marginal transformation that transforms the target via an empirical CDF
to a standard gaussian as described here: https://arxiv.org/abs/1910.03002
To be used in conjunction with a multivariate gaussian to from a copula.
Note that this transformation is currently intended for multivariate
targets only.
"""
@validated()
def __init__(
self,
target_dim: int,
target_field: str,
observed_values_field: str,
cdf_suffix="_cdf",
max_context_length: Optional[int] = None,
) -> None:
"""
Constructor for CDFtoGaussianTransform.
Parameters
----------
target_dim
Dimensionality of the target.
target_field
Field that will be transformed.
observed_values_field
Field that indicates observed values.
cdf_suffix
Suffix to mark the field with the transformed target.
max_context_length
Sets the maximum context length for the empirical CDF.
"""
self.target_field = target_field
self.past_target_field = "past_" + self.target_field
self.future_target_field = "future_" + self.target_field
self.past_observed_field = f"past_{observed_values_field}"
self.sort_target_field = f"past_{target_field}_sorted"
self.slopes_field = "slopes"
self.intercepts_field = "intercepts"
self.cdf_suffix = cdf_suffix
self.max_context_length = max_context_length
self.target_dim = target_dim
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
self._preprocess_data(data, is_train=is_train)
self._calc_pw_linear_params(data)
for target_field in [self.past_target_field, self.future_target_field]:
data[target_field + self.cdf_suffix] = self.standard_gaussian_ppf(
self._empirical_cdf_forward_transform(
data[self.sort_target_field],
data[target_field],
data[self.slopes_field],
data[self.intercepts_field],
)
)
return data
def _preprocess_data(self, data: DataEntry, is_train: bool):
"""
Performs several preprocess operations for computing the empirical CDF.
1) Reshaping the data.
2) Normalizing the target length.
3) Adding noise to avoid zero slopes (training only)
4) Sorting the target to compute the empirical CDF
Parameters
----------
data
DataEntry with input data.
is_train
if is_train is True, this function adds noise to the target to
avoid zero slopes in the piece-wise linear function.
Returns
-------
"""
# (target_length, target_dim)
past_target_vec = data[self.past_target_field].copy()
# pick only observed values
target_length, target_dim = past_target_vec.shape
# (target_length, target_dim)
past_observed = (data[self.past_observed_field] > 0) * (
data["past_is_pad"].reshape((-1, 1)) == 0
)
assert past_observed.ndim == 2
assert target_dim == self.target_dim
past_target_vec = past_target_vec[past_observed.min(axis=1)]
assert past_target_vec.ndim == 2
assert past_target_vec.shape[1] == self.target_dim
expected_length = (
target_length
if self.max_context_length is None
else self.max_context_length
)
if target_length != expected_length:
# Fills values in the case where past_target_vec.shape[-1] <
# target_length
# as dataset.loader.BatchBuffer does not support varying shapes
past_target_vec = CDFtoGaussianTransform._fill(
past_target_vec, expected_length
)
# sorts along the time dimension to compute empirical CDF of each
# dimension
if is_train:
past_target_vec = self._add_noise(past_target_vec)
past_target_vec.sort(axis=0)
assert past_target_vec.shape == (expected_length, self.target_dim)
data[self.sort_target_field] = past_target_vec
def _calc_pw_linear_params(self, data: DataEntry):
"""
Calculates the piece-wise linear parameters to interpolate between
the observed values in the empirical CDF.
Once current limitation is that we use a zero slope line as the last
piece. Thus, we cannot forecast anything higher than the highest
observed value.
Parameters
----------
data
Input data entry containing a sorted target field.
Returns
-------
"""
sorted_target = data[self.sort_target_field]
sorted_target_length, target_dim = sorted_target.shape
quantiles = np.stack(
[np.arange(sorted_target_length) for _ in range(target_dim)], axis=1,
) / float(sorted_target_length)
x_diff = np.diff(sorted_target, axis=0)
y_diff = np.diff(quantiles, axis=0)
# Calculate slopes of the pw-linear pieces.
slopes = np.where(x_diff == 0.0, np.zeros_like(x_diff), y_diff / x_diff)
zeroes = np.zeros_like(np.expand_dims(slopes[0, :], axis=0))
slopes = np.append(slopes, zeroes, axis=0)
# Calculate intercepts of the pw-linear pieces.
intercepts = quantiles - slopes * sorted_target
# Populate new fields with the piece-wise linear parameters.
data[self.slopes_field] = slopes
data[self.intercepts_field] = intercepts
def _empirical_cdf_forward_transform(
self,
sorted_values: np.ndarray,
values: np.ndarray,
slopes: np.ndarray,
intercepts: np.ndarray,
) -> np.ndarray:
"""
Applies the empirical CDF forward transformation.
Parameters
----------
sorted_values
Sorted target vector.
values
Values (real valued) that will be transformed to empirical CDF
values.
slopes
Slopes of the piece-wise linear function.
intercepts
Intercepts of the piece-wise linear function.
Returns
-------
quantiles
Empirical CDF quantiles in [0, 1] interval with winzorized cutoff.
"""
m = sorted_values.shape[0]
quantiles = self._forward_transform(sorted_values, values, slopes, intercepts)
quantiles = np.clip(
quantiles, self.winsorized_cutoff(m), 1 - self.winsorized_cutoff(m)
)
return quantiles
@staticmethod
def _add_noise(x: np.array) -> np.array:
scale_noise = 0.2
std = np.sqrt(
(np.square(x - x.mean(axis=1, keepdims=True))).mean(axis=1, keepdims=True)
)
noise = np.random.normal(
loc=np.zeros_like(x), scale=np.ones_like(x) * std * scale_noise
)
x = x + noise
return x
@staticmethod
def _search_sorted(sorted_vec: np.array, to_insert_vec: np.array) -> np.array:
"""
Finds the indices of the active piece-wise linear function.
Parameters
----------
sorted_vec
Sorted target vector.
to_insert_vec
Vector for which the indicies of the active linear functions
will be computed
Returns
-------
indices
Indices mapping to the active linear function.
"""
indices_left = np.searchsorted(sorted_vec, to_insert_vec, side="left")
indices_right = np.searchsorted(sorted_vec, to_insert_vec, side="right")
indices = indices_left + (indices_right - indices_left) // 2
indices = indices - 1
indices = np.minimum(indices, len(sorted_vec) - 1)
indices[indices < 0] = 0
return indices
def _forward_transform(
self,
sorted_vec: np.array,
target: np.array,
slopes: np.array,
intercepts: np.array,
) -> np.array:
"""
Applies the forward transformation to the marginals of the multivariate
target. Target (real valued) -> empirical cdf [0, 1]
Parameters
----------
sorted_vec
Sorted (past) target vector.
target
Target that will be transformed.
slopes
Slopes of the piece-wise linear function.
intercepts
Intercepts of the piece-wise linear function
Returns
-------
transformed_target
Transformed target vector.
"""
transformed = list()
for sorted, t, slope, intercept in zip(
sorted_vec.transpose(),
target.transpose(),
slopes.transpose(),
intercepts.transpose(),
):
indices = self._search_sorted(sorted, t)
transformed_value = slope[indices] * t + intercept[indices]
transformed.append(transformed_value)
return np.array(transformed).transpose()
@staticmethod
def standard_gaussian_cdf(x: np.array) -> np.array:
u = x / (np.sqrt(2.0))
return (erf(u) + 1.0) / 2.0
@staticmethod
def standard_gaussian_ppf(y: np.array) -> np.array:
y_clipped = np.clip(y, a_min=1.0e-6, a_max=1.0 - 1.0e-6)
return np.sqrt(2.0) * erfinv(2.0 * y_clipped - 1.0)
@staticmethod
def winsorized_cutoff(m: np.array) -> np.array:
"""
Apply truncation to the empirical CDF estimator to reduce variance as
described here: https://arxiv.org/abs/0903.0649
Parameters
----------
m
Input array with empirical CDF values.
Returns
-------
res
Truncated empirical CDf values.
"""
res = 1 / (4 * m ** 0.25 * np.sqrt(3.14 * np.log(m)))
assert 0 < res < 1
return res
@staticmethod
def _fill(target: np.ndarray, expected_length: int) -> np.ndarray:
"""
Makes sure target has at least expected_length time-units by repeating
it or using zeros.
Parameters
----------
target : shape (seq_len, dim)
expected_length
Returns
-------
array of shape (target_length, dim)
"""
current_length, target_dim = target.shape
if current_length == 0:
# todo handle the case with no observation better,
# we could use dataset statistics but for now we use zeros
filled_target = np.zeros((expected_length, target_dim))
elif current_length < expected_length:
filled_target = np.vstack(
[target for _ in range(expected_length // current_length + 1)]
)
filled_target = filled_target[:expected_length]
elif current_length > expected_length:
filled_target = target[-expected_length:]
else:
filled_target = target
assert filled_target.shape == (expected_length, target_dim)
return filled_target
def cdf_to_gaussian_forward_transform(
input_batch: DataEntry, outputs: torch.Tensor
) -> np.ndarray:
"""
Forward transformation of the CDFtoGaussianTransform.
Parameters
----------
input_batch
Input data to the predictor.
outputs
Predictor outputs.
Returns
-------
outputs
Forward transformed outputs.
"""
def _empirical_cdf_inverse_transform(
batch_target_sorted: torch.Tensor,
batch_predictions: torch.Tensor,
slopes: torch.Tensor,
intercepts: torch.Tensor,
) -> np.ndarray:
"""
Apply forward transformation of the empirical CDF.
Parameters
----------
batch_target_sorted
Sorted targets of the input batch.
batch_predictions
Predictions of the underlying probability distribution
slopes
Slopes of the piece-wise linear function.
intercepts
Intercepts of the piece-wise linear function.
Returns
-------
outputs
Forward transformed outputs.
"""
slopes = slopes.cpu().numpy()
intercepts = intercepts.cpu().numpy()
batch_target_sorted = batch_target_sorted.cpu().numpy()
_, num_timesteps, _ = batch_target_sorted.shape
indices = np.floor(batch_predictions * num_timesteps)
# indices = indices - 1
# for now project into [0, 1]
indices = np.clip(indices, 0, num_timesteps - 1)
indices = indices.astype(np.int)
transformed = np.where(
np.take_along_axis(slopes, indices, axis=1) != 0.0,
(batch_predictions - np.take_along_axis(intercepts, indices, axis=1))
/ np.take_along_axis(slopes, indices, axis=1),
np.take_along_axis(batch_target_sorted, indices, axis=1),
)
return transformed
# applies inverse cdf to all outputs
_, samples, _, _ = outputs.shape
for sample_index in range(0, samples):
outputs[:, sample_index, :, :] = _empirical_cdf_inverse_transform(
input_batch["past_target_sorted"],
CDFtoGaussianTransform.standard_gaussian_cdf(
outputs[:, sample_index, :, :]
),
input_batch["slopes"],
input_batch["intercepts"],
)
return outputs
| 22,563 | 30.602241 | 86 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/predictor.py | import json
from abc import ABC, abstractmethod
from pathlib import Path
from pydoc import locate
from typing import Iterator, Callable, Optional
import numpy as np
import torch
import torch.nn as nn
import pts
from pts.core.serde import dump_json, fqname_for, load_json
from pts.dataset import Dataset, DataEntry, InferenceDataLoader
from pts.transform import Transformation
from .forecast import Forecast
from .forecast_generator import ForecastGenerator, SampleForecastGenerator
from .utils import get_module_forward_input_names
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor(ABC):
__version__: str = pts.__version__
def __init__(self, prediction_length: int, freq: str) -> None:
self.prediction_length = prediction_length
self.freq = freq
@abstractmethod
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
pass
def serialize(self, path: Path) -> None:
# serialize Predictor type
with (path / "type.txt").open("w") as fp:
fp.write(fqname_for(self.__class__))
with (path / "version.json").open("w") as fp:
json.dump(
{"model": self.__version__, "pts": pts.__version__}, fp
)
@classmethod
def deserialize(
cls, path: Path, device: Optional[torch.device] = None
) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
device
Optional pytorch to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, device)
class PTSPredictor(Predictor):
def __init__(
self,
prediction_net: nn.Module,
batch_size: int,
prediction_length: int,
freq: str,
device: torch.device,
input_transform: Transformation,
forecast_generator: ForecastGenerator = SampleForecastGenerator(),
output_transform: Optional[OutputTransform] = None,
dtype: np.dtype = np.float32,
) -> None:
super().__init__(prediction_length, freq)
self.input_names = get_module_forward_input_names(prediction_net)
self.prediction_net = prediction_net
self.batch_size = batch_size
self.input_transform = input_transform
self.forecast_generator = forecast_generator
self.output_transform = output_transform
self.device = device
self.dtype = dtype
def predict(
self, dataset: Dataset, num_samples: Optional[int] = None
) -> Iterator[Forecast]:
inference_data_loader = InferenceDataLoader(
dataset,
self.input_transform,
self.batch_size,
device=self.device,
dtype=self.dtype,
)
self.prediction_net.eval()
with torch.no_grad():
yield from self.forecast_generator(
inference_data_loader=inference_data_loader,
prediction_net=self.prediction_net,
input_names=self.input_names,
freq=self.freq,
output_transform=self.output_transform,
num_samples=num_samples,
)
def serialize(self, path: Path) -> None:
super().serialize(path)
# serialize network
model_name = 'prediction_net'
with (path / f"{model_name}-network.json").open("w") as fp:
print(dump_json(self.prediction_net), file=fp)
torch.save(self.prediction_net.state_dict(), path / "prediction_net")
# serialize input transformation chain
with (path / "input_transform.json").open("w") as fp:
print(dump_json(self.input_transform), file=fp)
# serialize output transformation chain
with (path / "output_transform.json").open("w") as fp:
print(dump_json(self.output_transform), file=fp)
# serialize all remaining constructor parameters
with (path / "parameters.json").open("w") as fp:
parameters = dict(
batch_size=self.batch_size,
prediction_length=self.prediction_length,
freq=self.freq,
dtype=self.dtype,
forecast_generator=self.forecast_generator,
input_names=self.input_names,
)
print(dump_json(parameters), file=fp)
@classmethod
def deserialize(
cls, path: Path, device: Optional[torch.device] = None
) -> "PTSPredictor":
# deserialize constructor parameters
with (path / "parameters.json").open("r") as fp:
parameters = load_json(fp.read())
# deserialize transformation chain
with (path / "input_transform.json").open("r") as fp:
transformation = load_json(fp.read())
# deserialize prediction network
model_name = 'prediction_net'
with (path / f"{model_name}-network.json").open("r") as fp:
prediction_net = load_json(fp.read())
prediction_net.load_state_dict(torch.load(path / "prediction_net"))
# input_names is derived from the prediction_net
if "input_names" in parameters:
del parameters["input_names"]
parameters["device"] = device
return PTSPredictor(
input_transform=transformation,
prediction_net=prediction_net.to(device),
**parameters
)
| 6,040 | 33.129944 | 81 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/forecast_generator.py | from abc import ABC, abstractmethod
from typing import Any, Callable, Iterator, List, Optional
import numpy as np
import torch
import torch.nn as nn
from pts.core.component import validated
from pts.dataset import InferenceDataLoader, DataEntry, FieldName
from pts.modules import DistributionOutput
from .forecast import Forecast, DistributionForecast, QuantileForecast, SampleForecast
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
def _extract_instances(x: Any) -> Any:
"""
Helper function to extract individual instances from batched
mxnet results.
For a tensor `a`
_extract_instances(a) -> [a[0], a[1], ...]
For (nested) tuples of tensors `(a, (b, c))`
_extract_instances((a, (b, c)) -> [(a[0], (b[0], c[0])), (a[1], (b[1], c[1])), ...]
"""
if isinstance(x, (np.ndarray, torch.Tensor)):
for i in range(x.shape[0]):
# yield x[i: i + 1]
yield x[i]
elif isinstance(x, tuple):
for m in zip(*[_extract_instances(y) for y in x]):
yield tuple([r for r in m])
elif isinstance(x, list):
for m in zip(*[_extract_instances(y) for y in x]):
yield [r for r in m]
elif x is None:
while True:
yield None
else:
assert False
class ForecastGenerator(ABC):
"""
Classes used to bring the output of a network into a class.
"""
@abstractmethod
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: nn.Module,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[Forecast]:
pass
class DistributionForecastGenerator(ForecastGenerator):
def __init__(self, distr_output: DistributionOutput) -> None:
self.distr_output = distr_output
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: nn.Module,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[DistributionForecast]:
for batch in inference_data_loader:
inputs = [batch[k] for k in input_names]
outputs = prediction_net(*inputs)
if output_transform is not None:
outputs = output_transform(batch, outputs)
distributions = [
self.distr_output.distribution(*u) for u in _extract_instances(outputs)
]
i = -1
for i, distr in enumerate(distributions):
yield DistributionForecast(
distr,
start_date=batch["forecast_start"][i],
freq=freq,
item_id=batch[FieldName.ITEM_ID][i]
if FieldName.ITEM_ID in batch
else None,
info=batch["info"][i] if "info" in batch else None,
)
assert i + 1 == len(batch["forecast_start"])
class QuantileForecastGenerator(ForecastGenerator):
def __init__(self, quantiles: List[str]) -> None:
self.quantiles = quantiles
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: nn.Module,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[Forecast]:
for batch in inference_data_loader:
inputs = [batch[k] for k in input_names]
outputs = prediction_net(*inputs).cpu().numpy()
if output_transform is not None:
outputs = output_transform(batch, outputs)
i = -1
for i, output in enumerate(outputs):
yield QuantileForecast(
output,
start_date=batch["forecast_start"][i],
freq=freq,
item_id=batch[FieldName.ITEM_ID][i]
if FieldName.ITEM_ID in batch
else None,
info=batch["info"][i] if "info" in batch else None,
forecast_keys=self.quantiles,
)
assert i + 1 == len(batch["forecast_start"])
class SampleForecastGenerator(ForecastGenerator):
@validated()
def __init__(self):
pass
def __call__(
self,
inference_data_loader: InferenceDataLoader,
prediction_net: nn.Module,
input_names: List[str],
freq: str,
output_transform: Optional[OutputTransform],
num_samples: Optional[int],
**kwargs
) -> Iterator[Forecast]:
for batch in inference_data_loader:
inputs = [batch[k] for k in input_names]
outputs = prediction_net(*inputs).cpu().numpy()
if output_transform is not None:
outputs = output_transform(batch, outputs)
if num_samples:
num_collected_samples = outputs[0].shape[0]
collected_samples = [outputs]
while num_collected_samples < num_samples: # [Note] inference 빠르게 하려고 batch단위로 sample생성하는 경우도 있음(e.g. deepAR)
outputs = prediction_net(*inputs).cpu().numpy()
if output_transform is not None:
outputs = output_transform(batch, outputs)
collected_samples.append(outputs)
num_collected_samples += outputs[0].shape[0]
outputs = [
np.concatenate(s)[:num_samples] for s in zip(*collected_samples)
]
assert len(outputs[0]) == num_samples
i = -1
for i, output in enumerate(outputs):
yield SampleForecast(
output,
start_date=batch["forecast_start"][i],
freq=freq,
item_id=batch[FieldName.ITEM_ID][i]
if FieldName.ITEM_ID in batch
else None,
info=batch["info"][i] if "info" in batch else None,
)
assert i + 1 == len(batch["forecast_start"])
| 6,330 | 33.785714 | 125 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/utils.py | import inspect
from typing import Optional
import torch
import torch.nn as nn
def get_module_forward_input_names(module: nn.Module):
params = inspect.signature(module.forward).parameters
return list(params)
def copy_parameters(net_source: nn.Module, net_dest: nn.Module) -> None:
net_dest.load_state_dict(net_source.state_dict())
def weighted_average(
tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None
):
if weights is not None:
weighted_tensor = tensor * weights
if dim is not None:
sum_weights = torch.sum(weights, dim)
sum_weighted_tensor = torch.sum(weighted_tensor, dim)
else:
sum_weights = weights.sum()
sum_weighted_tensor = weighted_tensor.sum()
sum_weights = torch.max(torch.ones_like(sum_weights), sum_weights)
return sum_weighted_tensor / sum_weights
else:
if dim is not None:
return torch.mean(tensor, dim=dim)
else:
return tensor.mean()
| 1,032 | 26.918919 | 74 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/forecast.py | from abc import ABC, abstractmethod
from enum import Enum
from typing import Dict, List, Optional, Set, Union, Callable
import numpy as np
import pandas as pd
import torch
from pydantic import BaseModel, Field
from torch.distributions import Distribution
from .quantile import Quantile
class OutputType(str, Enum):
mean = "mean"
samples = "samples"
quantiles = "quantiles"
class Config(BaseModel):
num_samples: int = Field(100, alias="num_eval_samples")
output_types: Set[OutputType] = {"quantiles", "mean"}
# FIXME: validate list elements
quantiles: List[str] = ["0.1", "0.5", "0.9"]
class Config:
allow_population_by_field_name = True
# store additional fields
extra = "allow"
class Forecast(ABC):
start_date: pd.Timestamp
freq: str
item_id: Optional[str]
info: Optional[Dict]
prediction_length: int
mean: np.ndarray
_index = None
@abstractmethod
def quantile(self, q: Union[float, str]) -> np.ndarray:
"""
Computes a quantile from the predicted distribution.
Parameters
----------
q
Quantile to compute.
Returns
-------
numpy.ndarray
Value of the quantile across the prediction range.
"""
pass
def quantile_ts(self, q: Union[float, str]) -> pd.Series:
return pd.Series(data=self.quantile(q), index=self.index)
@property
def median(self) -> np.ndarray:
return self.quantile(0.5)
def plot(
self,
prediction_intervals=(50.0, 90.0),
show_mean=False,
color="b",
label=None,
output_file=None,
*args,
**kwargs,
):
"""
Plots the median of the forecast as well as confidence bounds.
(requires matplotlib and pandas).
Parameters
----------
prediction_intervals : float or list of floats in [0, 100]
Confidence interval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
show_mean : boolean
Whether to also show the mean of the forecast.
color : matplotlib color name or dictionary
The color used for plotting the forecast.
label : string
A label (prefix) that is used for the forecast
output_file : str or None, default None
Output path for the plot file. If None, plot is not saved to file.
args :
Other arguments are passed to main plot() call
kwargs :
Other keyword arguments are passed to main plot() call
"""
# matplotlib==2.0.* gives errors in Brazil builds and has to be
# imported locally
import matplotlib.pyplot as plt
label_prefix = "" if label is None else label + "-"
for c in prediction_intervals:
assert 0.0 <= c <= 100.0
ps = [50.0] + [
50.0 + f * c / 2.0 for c in prediction_intervals for f in [-1.0, +1.0]
]
percentiles_sorted = sorted(set(ps))
def alpha_for_percentile(p):
return (p / 100.0) ** 0.5
ps_data = [self.quantile(p / 100.0) for p in percentiles_sorted]
i_p50 = len(percentiles_sorted) // 2
p50_data = ps_data[i_p50]
p50_series = pd.Series(data=p50_data, index=self.index)
p50_series.plot(color=color, ls="-", label=f"{label_prefix}median")
if show_mean:
mean_data = np.mean(self._sorted_samples, axis=0)
pd.Series(data=mean_data, index=self.index).plot(
color=color, ls=":", label=f"{label_prefix}mean", *args, **kwargs,
)
for i in range(len(percentiles_sorted) // 2):
ptile = percentiles_sorted[i]
alpha = alpha_for_percentile(ptile)
plt.fill_between(
self.index,
ps_data[i],
ps_data[-i - 1],
facecolor=color,
alpha=alpha,
interpolate=True,
*args,
**kwargs,
)
# Hack to create labels for the error intervals.
# Doesn't actually plot anything, because we only pass a single data point
pd.Series(data=p50_data[:1], index=self.index[:1]).plot(
color=color,
alpha=alpha,
linewidth=10,
label=f"{label_prefix}{100 - ptile * 2}%",
*args,
**kwargs,
)
if output_file:
plt.savefig(output_file)
@property
def index(self) -> pd.DatetimeIndex:
if self._index is None:
self._index = pd.date_range(
self.start_date, periods=self.prediction_length, freq=self.freq
)
return self._index
def as_json_dict(self, config: "Config") -> dict:
result = {}
if OutputType.mean in config.output_types:
result["mean"] = self.mean.tolist()
if OutputType.quantiles in config.output_types:
quantiles = map(Quantile.parse, config.quantiles)
result["quantiles"] = {
quantile.name: self.quantile(quantile.value).tolist()
for quantile in quantiles
}
if OutputType.samples in config.output_types:
result["samples"] = []
return result
class SampleForecast(Forecast):
"""
A `Forecast` object, where the predicted distribution is represented
internally as samples.
Parameters
----------
samples
Array of size (num_samples, prediction_length)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
samples: Union[torch.Tensor, np.ndarray],
start_date: pd.Timestamp,
freq: str,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
assert isinstance(
samples, (np.ndarray, torch.Tensor)
), "samples should be either a numpy array or an torch tensor"
assert (
len(np.shape(samples)) == 2 or len(np.shape(samples)) == 3
), "samples should be a 2-dimensional or 3-dimensional array. Dimensions found: {}".format(
len(np.shape(samples))
)
self.samples = (
samples if (isinstance(samples, np.ndarray)) else samples.cpu().numpy()
)
self._sorted_samples_value = None
self._mean = None
self._dim = None
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
@property
def _sorted_samples(self):
if self._sorted_samples_value is None:
self._sorted_samples_value = np.sort(self.samples, axis=0)
return self._sorted_samples_value
@property
def num_samples(self):
"""
The number of samples representing the forecast.
"""
return self.samples.shape[0]
@property
def prediction_length(self):
"""
Time length of the forecast.
"""
return self.samples.shape[1]
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
return np.mean(self.samples, axis=0)
@property
def mean_ts(self) -> pd.Series:
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(data=self.mean, index=self.index)
def quantile(self, q: Union[float, str]) -> np.ndarray:
q = Quantile.parse(q).value
sample_idx = int(np.round((self.num_samples - 1) * q))
return self._sorted_samples[sample_idx, :]
def copy_dim(self, dim: int) -> "SampleForecast":
"""
Returns a new Forecast object with only the selected sub-dimension.
Parameters
----------
dim
The returned forecast object will only represent this dimension.
"""
if len(self.samples.shape) == 2:
samples = self.samples
else:
target_dim = self.samples.shape[2]
assert dim < target_dim, (
f"must set 0 <= dim < target_dim, but got dim={dim},"
f" target_dim={target_dim}"
)
samples = self.samples[:, :, dim]
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def copy_aggregate(self, agg_fun: Callable) -> "SampleForecast":
"""
Returns a new Forecast object with a time series aggregated over the
dimension axis.
Parameters
----------
agg_fun
Aggregation function that defines the aggregation operation
(typically mean or sum).
"""
if len(self.samples.shape) == 2:
samples = self.samples
else:
# Aggregate over target dimension axis
samples = agg_fun(self.samples, axis=2)
return SampleForecast(
samples=samples,
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
def dim(self) -> int:
"""
Returns the dimensionality of the forecast object.
"""
if self._dim is not None:
return self._dim
else:
if len(self.samples.shape) == 2:
# univariate target
# shape: (num_samples, prediction_length)
return 1
else:
# multivariate target
# shape: (num_samples, prediction_length, target_dim)
return self.samples.shape[2]
def as_json_dict(self, config: "Config") -> dict:
result = super().as_json_dict(config)
if OutputType.samples in config.output_types:
result["samples"] = self.samples.tolist()
return result
def __repr__(self):
return ", ".join(
[
f"SampleForecast({self.samples!r})",
f"{self.start_date!r}",
f"{self.freq!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class QuantileForecast(Forecast):
"""
A Forecast that contains arrays (i.e. time series) for quantiles and mean
Parameters
----------
forecast_arrays
An array of forecasts
start_date
start of the forecast
freq
forecast frequency
forecast_keys
A list of quantiles of the form '0.1', '0.9', etc.,
and potentially 'mean'. Each entry corresponds to one array in
forecast_arrays.
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
forecast_arrays: np.ndarray,
start_date: pd.Timestamp,
freq: str,
forecast_keys: List[str],
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
self.forecast_array = forecast_arrays
self.start_date = pd.Timestamp(start_date, freq=freq)
self.freq = freq
# normalize keys
self.forecast_keys = [
Quantile.from_str(key).name if key != "mean" else key
for key in forecast_keys
]
self.item_id = item_id
self.info = info
self._dim = None
shape = self.forecast_array.shape
assert shape[0] == len(self.forecast_keys), (
f"The forecast_array (shape={shape} should have the same "
f"length as the forecast_keys (len={len(self.forecast_keys)})."
)
self.prediction_length = shape[-1]
self._forecast_dict = {
k: self.forecast_array[i] for i, k in enumerate(self.forecast_keys)
}
self._nan_out = np.array([np.nan] * self.prediction_length)
def quantile(self, q: Union[float, str]) -> np.ndarray:
q_str = Quantile.parse(q).name
# We return nan here such that evaluation runs through
return self._forecast_dict.get(q_str, self._nan_out)
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
return self._forecast_dict.get("mean", self._nan_out)
def dim(self) -> int:
"""
Returns the dimensionality of the forecast object.
"""
if self._dim is not None:
return self._dim
else:
if (
len(self.forecast_array.shape) == 2
): # 1D target. shape: (num_samples, prediction_length)
return 1
else:
return self.forecast_array.shape[
1
] # 2D target. shape: (num_samples, target_dim, prediction_length)
def __repr__(self):
return ", ".join(
[
f"QuantileForecast({self.forecast_array!r})",
f"start_date={self.start_date!r}",
f"freq={self.freq!r}",
f"forecast_keys={self.forecast_keys!r}",
f"item_id={self.item_id!r}",
f"info={self.info!r})",
]
)
class DistributionForecast(Forecast):
"""
A `Forecast` object that uses a distribution directly.
This can for instance be used to represent marginal probability
distributions for each time point -- although joint distributions are
also possible, e.g. when using MultiVariateGaussian).
Parameters
----------
distribution
Distribution object. This should represent the entire prediction
length, i.e., if we draw `num_samples` samples from the distribution,
the sample shape should be
samples = trans_dist.sample(num_samples)
samples.shape -> (num_samples, prediction_length)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
distribution: Distribution,
start_date: pd.Timestamp,
freq: str,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
self.distribution = distribution
self.shape = self.distribution.batch_shape + self.distribution.event_shape
self.prediction_length = self.shape[0]
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
self._mean = None
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
self._mean = self.distribution.mean.cpu().numpy()
return self._mean
@property
def mean_ts(self) -> pd.Series:
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(data=self.mean, index=self.index)
def quantile(self, level: Union[float, str]) -> np.ndarray:
level = Quantile.parse(level).value
q = self.distribution.icdf(torch.tensor([level])).cpu().numpy()
return q
def to_sample_forecast(self, num_samples: int = 200) -> SampleForecast:
return SampleForecast(
samples=self.distribution.sample((num_samples,)),
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
| 16,436 | 29.495362 | 99 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/estimator.py | from abc import ABC, abstractmethod
from typing import NamedTuple, Optional
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from pts.core.component import validated
from pts import Trainer
from pts.dataset import Dataset, TransformedIterableDataset, TransformedListDataset
from pts.transform import Transformation
from .predictor import Predictor
from .utils import get_module_forward_input_names
class Estimator(ABC):
"""
An abstract class representing a trainable model.
The underlying model is trained by calling the `train` method with
a training `Dataset`, producing a `Predictor` object.
"""
prediction_length: int
freq: str
@abstractmethod
def train(
self, training_data: Dataset,
) -> Predictor:
"""
Train the estimator on the given data.
Parameters
----------
training_data
Dataset to train the model on.
Returns
-------
Predictor
The predictor containing the trained model.
"""
pass
class DummyEstimator(Estimator):
"""
An `Estimator` that, upon training, simply returns a pre-constructed
`Predictor`.
Parameters
----------
predictor_cls
`Predictor` class to instantiate.
**kwargs
Keyword arguments to pass to the predictor constructor.
"""
@validated()
def __init__(self, predictor_cls: type, **kwargs) -> None:
self.predictor = predictor_cls(**kwargs)
def train(
self,
training_data: Dataset,
) -> Predictor:
return self.predictor
class TrainOutput(NamedTuple):
transformation: Transformation
trained_net: nn.Module
predictor: Predictor
class PTSEstimator(Estimator):
def __init__(self, trainer: Trainer, dtype: np.dtype = np.float32) -> None:
self.trainer = trainer
self.dtype = dtype
@abstractmethod
def create_transformation(self) -> Transformation:
"""
Create and return the transformation needed for training and inference.
Returns
-------
Transformation
The transformation that will be applied entry-wise to datasets,
at training and inference time.
"""
pass
@abstractmethod
def create_training_network(self, device: torch.device) -> nn.Module:
"""
Create and return the network used for training (i.e., computing the
loss).
Returns
-------
nn.Module
The network that computes the loss given input data.
"""
pass
@abstractmethod
def create_predictor(
self,
transformation: Transformation,
trained_network: nn.Module,
device: torch.device,
) -> Predictor:
"""
Create and return a predictor object.
Returns
-------
Predictor
A predictor wrapping a `nn.Module` used for inference.
"""
pass
def train_model(
self, training_data: Dataset, validation_period: int = 1
) -> TrainOutput:
transformation = self.create_transformation()
transformation.estimate(iter(training_data))
training_iter_dataset = TransformedListDataset(
dataset=training_data.list_data,
is_train=True,
transform=transformation
)
training_data_loader = DataLoader(
training_iter_dataset,
batch_size=self.trainer.batch_size,
num_workers=self.trainer.num_workers,
pin_memory=self.trainer.pin_memory,
drop_last=True,
shuffle=True
)
# ensure that the training network is created on the same device
trained_net = self.create_training_network(self.trainer.device)
self.trainer(
net=trained_net,
input_names=get_module_forward_input_names(trained_net),
training_data_loader=training_data_loader,
validation_period=validation_period,
)
return TrainOutput(
transformation=transformation,
trained_net=trained_net,
predictor=self.create_predictor(
transformation, trained_net, self.trainer.device
),
)
def train(
self, training_data: Dataset, validation_period: int = 1
) -> Predictor:
return self.train_model(training_data, validation_period).predictor | 4,526 | 26.436364 | 83 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/deepar/deepar_network.py | from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Distribution
from pts.core.component import validated
from pts.model import weighted_average
from pts.modules import DistributionOutput, MeanScaler, NOPScaler, FeatureEmbedder
def prod(xs):
p = 1
for x in xs:
p *= x
return p
class DeepARNetwork(nn.Module):
@validated()
def __init__(
self,
input_size: int,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
cardinality: List[int],
embedding_dimension: List[int],
dc_cardinality: List[int],
dc_embedding_dimension: List[int],
lags_seq: List[int],
moving_avg_windows: List[int],
scaling: bool = True,
dtype: np.dtype = np.float32,
) -> None:
super().__init__()
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.history_length = history_length
self.context_length = context_length
self.prediction_length = prediction_length
self.dropout_rate = dropout_rate
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.dc_cardinality = dc_cardinality
self.dc_embedding_dimension = dc_embedding_dimension
self.num_cat = len(cardinality)
self.scaling = scaling
self.dtype = dtype
self.lags_seq = lags_seq
self.moving_avg_windows = moving_avg_windows
self.distr_output = distr_output
rnn = {"LSTM": nn.LSTM, "GRU": nn.GRU}[self.cell_type]
self.rnn = rnn(
input_size=input_size,
hidden_size=num_cells,
num_layers=num_layers,
dropout=dropout_rate,
batch_first=True,
)
# initialize LSTM forget gate bias to be 1 as recommanded by http://proceedings.mlr.press/v37/jozefowicz15.pdf
'''for names in self.rnn._all_weights:
for name in filter(lambda n: "bias" in n, names):
bias = getattr(self.rnn, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)'''
self.target_shape = distr_output.event_shape
self.proj_distr_args = distr_output.get_args_proj(num_cells)
self.embedder = FeatureEmbedder(
cardinalities=cardinality, embedding_dims=embedding_dimension
)
self.dc_embedder = FeatureEmbedder(
cardinalities=dc_cardinality, embedding_dims=dc_embedding_dimension
)
if scaling:
self.scaler = MeanScaler(keepdim=True)
else:
self.scaler = NOPScaler(keepdim=True)
@staticmethod
def get_lagged_subsequences(
sequence: torch.Tensor,
sequence_length: int,
indices: List[int],
subsequences_length: int = 1,
) -> torch.Tensor:
"""
Returns lagged subsequences of a given sequence.
Parameters
----------
sequence : Tensor
the sequence from which lagged subsequences should be extracted.
Shape: (N, T, C).
sequence_length : int
length of sequence in the T (time) dimension (axis = 1).
indices : List[int]
list of lag indices to be used.
subsequences_length : int
length of the subsequences to be extracted.
Returns
--------
lagged : Tensor
a tensor of shape (N, S, C, I), where S = subsequences_length and
I = len(indices), containing lagged subsequences. Specifically,
lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].
"""
assert max(indices) + subsequences_length <= sequence_length, (
f"lags cannot go further than history length, found lag {max(indices)} "
f"while history length is only {sequence_length}"
)
assert all(lag_index >= 0 for lag_index in indices)
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(sequence[:, begin_index:end_index, ...])
return torch.stack(lagged_values, dim=-1)
@staticmethod
def get_moving_average(
accumlated_sum: torch.Tensor,
moving_avg_windows: List[int],
) -> torch.Tensor:
"""
Returns lagged moving average of a given sequence.
Parameters
----------
accumlated_sum : Tensor
the accumulated sum of target sequence.
Shape: (N, T, C)
moving_avg_windows: List[int]
list of window size for averaging
Returns
--------
lagged : Tensor
a tensor of shape (N, T, C, I), where I = len(moving_avg_windows), containing moving_average sequences.
"""
averaged_seqs = []
for w in moving_avg_windows:
moving_avg = torch.zeros_like(accumlated_sum)
moving_avg[...] = np.nan
moving_avg[:, w:, ...] = accumlated_sum[:, w:, ...] - accumlated_sum[:, :-w, ...]
moving_avg /= w
averaged_seqs.append(moving_avg)
return torch.stack(averaged_seqs, dim=-1)
def unroll_encoder(
self,
feat_static_cat: torch.Tensor, # (batch_size, num_features)
feat_static_real: torch.Tensor, # (batch_size, num_features)
past_time_feat: torch.Tensor, # (batch_size, history_length, num_features)
past_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_accumulated_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: torch.Tensor, # (batch_size, history_length, *target_shape)
past_feat_dynamic_cat: torch.Tensor, # (batch_size, history_length, *target_shape)
past_feat_dynamic_past: torch.Tensor, # (batch_size, history_length, *target_shape)
future_feat_dynamic_past: torch.Tensor, # (batch_size, history_length, *target_shape)
future_feat_dynamic_cat: torch.Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Optional[
torch.Tensor
] = None, # (batch_size, prediction_length, num_features)
future_target: Optional[
torch.Tensor
] = None, # (batch_size, prediction_length, *target_shape)
future_accumulated_target: Optional[torch.Tensor] = None, # (batch_size, prediction_length, *target_shape)
) -> Tuple[torch.Tensor, Union[torch.Tensor, List], torch.Tensor, torch.Tensor]:
if future_time_feat is None or future_target is None:
time_feat = past_time_feat[
:, self.history_length - self.context_length :, ...
]
feat_dynamic_cat = past_feat_dynamic_cat[
:, self.history_length - self.context_length :, ...
]
feat_dynamic_past = past_feat_dynamic_past
accumlated_sequence = past_accumulated_target
sequence = past_target
sequence_length = self.history_length
subsequences_length = self.context_length
else:
time_feat = torch.cat(
(
past_time_feat[:, self.history_length - self.context_length :, ...],
future_time_feat,
),
dim=1
)
feat_dynamic_cat = torch.cat(
(
past_feat_dynamic_cat[:, self.history_length - self.context_length :, ...],
future_feat_dynamic_cat,
),
dim=1
)
feat_dynamic_past = torch.cat((past_feat_dynamic_past, future_feat_dynamic_past), dim=1)
accumlated_sequence = torch.cat((past_accumulated_target, future_accumulated_target), dim=1)
sequence = torch.cat((past_target, future_target), dim=1)
sequence_length = self.history_length + self.prediction_length
subsequences_length = self.context_length + self.prediction_length
#apply lag to feat_dynamic_past for aligning with target-lag
feat_dynamic_past_lags = self.get_lagged_subsequences(
sequence=feat_dynamic_past,
sequence_length=sequence_length,
indices=[min(self.lags_seq)],
subsequences_length=subsequences_length
).squeeze(-1) # (batch_size, subsequences_length, num_features)
# moving average
if len(self.moving_avg_windows) == 0:
merged_sequence = sequence
else:
moving_avg = self.get_moving_average(
accumlated_sum=accumlated_sequence,
moving_avg_windows=self.moving_avg_windows
)
merged_sequence = torch.cat((sequence.unsqueeze(-1) if len(self.target_shape) == 0 else sequence, moving_avg), dim=-1)
# apply lags
lags = self.get_lagged_subsequences(
sequence=merged_sequence,
sequence_length=sequence_length,
indices=self.lags_seq,
subsequences_length=subsequences_length
)
# from (batch_size, sub_seq_len, *target_shape, num_lags)
# to (batch_size, sub_seq_len, prod(target_shape) * num_lags)
input_lags = lags.reshape(
(-1, subsequences_length, len(self.lags_seq) * (1 + len(self.moving_avg_windows)) * prod(self.target_shape))
) # [Note] 모든 lags에 대한 sequence를 생성
# embdding dynamic category features
embedded_dynamic_cat = self.dc_embedder(feat_dynamic_cat)
# scale is computed on the context length last units of the past target
# scale shape is (batch_size, 1, *target_shape)
_, scale = self.scaler(
past_target[:, -self.context_length :, ...],
past_observed_values[:, -self.context_length :, ...],
)
# (batch_size, num_features)
embedded_cat = self.embedder(feat_static_cat)
# (batch_size, num_features + prod(target_shape))
static_feat = torch.cat( (embedded_cat, feat_static_real), dim=1)
if not self.scaling:
# use the log scale as it can help prediction
static_feat = torch.cat(
(
static_feat,
scale.log() if len(self.target_shape) == 0 else scale.squeeze(1).log(),
),
dim=1,
)
# (batch_size, subsequences_length, num_features + 1)
repeated_static_feat = static_feat.unsqueeze(1).expand(
-1, subsequences_length, -1
)
# (batch_size, sub_seq_len, *target_shape, num_lags)
input_lags = input_lags / scale.unsqueeze(-1)
# (batch_size, sub_seq_len, input_dim)
inputs = torch.cat((input_lags, time_feat, embedded_dynamic_cat, repeated_static_feat, feat_dynamic_past_lags), dim=-1)
# unroll encoder
self.rnn.flatten_parameters() # resovle warining on multi-gpu training
outputs, state = self.rnn(inputs) # [Note] (batch, seq_len, input_size) 순서 batch_first = True로 설정되어 있음
# outputs: (batch_size, seq_len, num_cells)
# state: list of (num_layers, batch_size, num_cells) tensors
# scale: (batch_size, 1, *target_shape)
# static_feat: (batch_size, num_features + prod(target_shape))
return outputs, state, scale, static_feat
class RolledDeepARTrainingNetwork(DeepARNetwork):
def forward(
self,
feat_static_cat: torch.Tensor,
feat_static_real: torch.Tensor,
past_time_feat: torch.Tensor,
past_target: torch.Tensor,
past_accumulated_target: torch.Tensor,
past_observed_values: torch.Tensor,
past_feat_dynamic_cat: torch.Tensor,
past_feat_dynamic_past: torch.Tensor,
future_feat_dynamic_past: torch.Tensor,
future_feat_dynamic_cat: torch.Tensor,
future_time_feat: torch.Tensor,
future_target: torch.Tensor,
future_accumulated_target: torch.Tensor,
future_observed_values: torch.Tensor
) -> torch.Tensor:
rnn_outputs, state, scale, static_feat = self.unroll_encoder(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_accumulated_target=past_accumulated_target,
past_observed_values=past_observed_values,
past_feat_dynamic_cat=past_feat_dynamic_cat,
past_feat_dynamic_past=past_feat_dynamic_past,
future_feat_dynamic_past=None,
future_feat_dynamic_cat=None,
future_time_feat=None,
future_target=None,
future_accumulated_target=None,
)
#distr_args = self.proj_distr_args(rnn_outputs)
#distr = self.distr_output.distribution(distr_args)
target_scaled = future_target / scale # use normalized target for training
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target
repeated_past_accumulated_target = past_accumulated_target
repeated_feat_dynamic_past = past_feat_dynamic_past
embedded_dynamic_cat = self.dc_embedder(future_feat_dynamic_cat)
repeated_embedded_dynamic_cat = embedded_dynamic_cat
repeated_time_feat = future_time_feat
repeated_static_feat = static_feat.unsqueeze(1)
repeated_scale = scale
repeated_states = state
future_samples = []
losses = []
self.shifted_lags = [l - 1 for l in self.lags_seq] # [Note] prediction을 위해 1-step forward (0-lag부터 시작
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
feat_dynamic_past_lags = self.get_lagged_subsequences(
sequence=repeated_feat_dynamic_past,
sequence_length=self.history_length + k,
indices=[min(self.shifted_lags)],
subsequences_length=1
).squeeze(-1) # (batch_size, 1, num_features)
if len(self.moving_avg_windows) == 0:
merged_repeated_past_target = repeated_past_target
else:
# moving average
repeated_moving_avg = self.get_moving_average(
accumlated_sum=repeated_past_accumulated_target,
moving_avg_windows=self.moving_avg_windows
)
merged_repeated_past_target = torch.cat((repeated_past_target.unsqueeze(-1) if len(self.target_shape) == 0 else repeated_past_target, repeated_moving_avg), dim=-1)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
sequence=merged_repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = lags.reshape(
(-1, 1, prod(self.target_shape) * len(self.lags_seq) * (1 + len(self.moving_avg_windows)))
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
input_lags = input_lags / repeated_scale.unsqueeze(-1)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = torch.cat(
(input_lags, repeated_time_feat[:, k : k + 1, :], repeated_embedded_dynamic_cat[:, k : k + 1, :], repeated_static_feat, feat_dynamic_past_lags),
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn(decoder_input, repeated_states)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(distr_args, scale=repeated_scale)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample()
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = torch.cat((repeated_past_target, new_samples), dim=1) # [Note] rolling prediction
future_samples.append(distr.mean)
# rolling feat_dynamic_past (zero-sale period)
future_feat_dynamic_past = repeated_feat_dynamic_past[:,[-1],:] + 1
future_feat_dynamic_past[new_samples > 0.5] = 0
repeated_feat_dynamic_past = torch.cat((repeated_feat_dynamic_past, future_feat_dynamic_past), dim=1)
# rolling accumulated target
future_accumulated_target = repeated_past_accumulated_target[:,[-1],...] + new_samples
repeated_past_accumulated_target = torch.cat((repeated_past_accumulated_target, future_accumulated_target), dim=1)
# loss
losses.append(-distr.log_prob(target_scaled[:,k:k+1]))
loss = torch.cat(losses, dim=1)
# mask the loss at one time step iff one or more observations is missing in the target dimensions
# (batch_size, seq_len)
loss_weights = (
future_observed_values
if (len(self.target_shape) == 0)
else future_observed_values.min(dim=-1, keepdim=False)[0]
)
weighted_loss = weighted_average(loss, weights=loss_weights)
# for mornitoring
predicted = torch.cat(future_samples, dim=1)
true_label = future_target # first true_label (assumed lag-1)
error = true_label - predicted
return weighted_loss, error.detach()
class DeepARPredictionNetwork(DeepARNetwork):
@validated()
def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:
super().__init__(**kwargs)
self.num_parallel_samples = num_parallel_samples
# for decoding the lags are shifted by one, at the first time-step
# of the decoder a lag of one corresponds to the last target value
self.shifted_lags = [l - 1 for l in self.lags_seq] # [Note] prediction을 위해 1-step forward (0-lag부터 시작)
def sampling_decoder(
self,
static_feat: torch.Tensor,
past_target: torch.Tensor,
past_accumulated_target: torch.Tensor,
past_feat_dynamic_past: torch.Tensor,
time_feat: torch.Tensor,
dynamic_cat_feat: torch.Tensor,
scale: torch.Tensor,
begin_states: Union[torch.Tensor, List[torch.Tensor]],
) -> torch.Tensor:
"""
Computes sample paths by unrolling the RNN starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length, num_time_features).
scale : Tensor
tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).
begin_states : List or Tensor
list of initial states for the LSTM layers or tensor for GRU.
the shape of each tensor of the list should be (num_layers, batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, prediction_length).
"""
# blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism
repeated_past_target = past_target.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_past_accumulated_target = past_accumulated_target.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_feat_dynamic_past = past_feat_dynamic_past.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
embedded_dynamic_cat = self.dc_embedder(dynamic_cat_feat)
repeated_embedded_dynamic_cat = embedded_dynamic_cat.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_time_feat = time_feat.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
repeated_static_feat = static_feat.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
).unsqueeze(1)
repeated_scale = scale.repeat_interleave(
repeats=self.num_parallel_samples, dim=0
)
if self.cell_type == "LSTM":
repeated_states = [
s.repeat_interleave(repeats=self.num_parallel_samples, dim=1)
for s in begin_states
]
else:
repeated_states = begin_states.repeat_interleave(
repeats=self.num_parallel_samples, dim=1
)
future_samples = []
# for each future time-units we draw new samples for this time-unit and update the state
for k in range(self.prediction_length):
feat_dynamic_past_lags = self.get_lagged_subsequences(
sequence=repeated_feat_dynamic_past,
sequence_length=self.history_length + k,
indices=[min(self.shifted_lags)],
subsequences_length=1
).squeeze(-1) # (batch_size, 1, num_features)
if len(self.moving_avg_windows) == 0:
merged_repeated_past_target = repeated_past_target
else:
# moving average
repeated_moving_avg = self.get_moving_average(
accumlated_sum=repeated_past_accumulated_target,
moving_avg_windows=self.moving_avg_windows
)
merged_repeated_past_target = torch.cat((repeated_past_target.unsqueeze(-1) if len(self.target_shape) == 0 else repeated_past_target, repeated_moving_avg), dim=-1)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = self.get_lagged_subsequences(
sequence=merged_repeated_past_target,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = lags.reshape(
(-1, 1, prod(self.target_shape) * len(self.lags_seq) * (1 + len(self.moving_avg_windows)))
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
input_lags = input_lags / repeated_scale.unsqueeze(-1)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)
decoder_input = torch.cat(
(input_lags, repeated_time_feat[:, k : k + 1, :], repeated_embedded_dynamic_cat[:, k : k + 1, :], repeated_static_feat, feat_dynamic_past_lags),
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, repeated_states = self.rnn(decoder_input, repeated_states)
distr_args = self.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = self.distr_output.distribution(distr_args, scale=repeated_scale)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample() # [Note] 샘플링
# (batch_size * num_samples, seq_len, *target_shape)
repeated_past_target = torch.cat((repeated_past_target, new_samples), dim=1) # [Note] rolling prediction
future_samples.append(new_samples)
# rolling feat_dynamic_past (zero-sale period)
future_feat_dynamic_past = repeated_feat_dynamic_past[:,[-1],:] + 1
future_feat_dynamic_past[new_samples > 0.5] = 0
repeated_feat_dynamic_past = torch.cat((repeated_feat_dynamic_past, future_feat_dynamic_past), dim=1)
# rolling accumulated target
future_accumulated_target = repeated_past_accumulated_target[:,[-1],...] + new_samples
repeated_past_accumulated_target = torch.cat((repeated_past_accumulated_target, future_accumulated_target), dim=1)
# (batch_size * num_samples, prediction_length, *target_shape)
samples = torch.cat(future_samples, dim=1)
# (batch_size, num_samples, prediction_length, *target_shape)
return samples.reshape(
(
(-1, self.num_parallel_samples)
+ (self.prediction_length,)
+ self.target_shape
)
)
# noinspection PyMethodOverriding,PyPep8Naming
def forward(
self,
feat_static_cat: torch.Tensor, # (batch_size, num_features)
feat_static_real: torch.Tensor, # (batch_size, num_features)
past_time_feat: torch.Tensor, # (batch_size, history_length, num_features)
past_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_accumulated_target: torch.Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: torch.Tensor, # (batch_size, history_length, *target_shape)
past_feat_dynamic_cat: torch.Tensor, # (batch_size, history_length, *target_shape)
past_feat_dynamic_past: torch.Tensor, # (batch_size, history_length, *target_shape)
future_feat_dynamic_cat: torch.Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: torch.Tensor, # (batch_size, prediction_length, num_features)
) -> torch.Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
Returns
-------
Tensor
Predicted samples
"""
# unroll the decoder in "prediction mode", i.e. with past data only
_, state, scale, static_feat = self.unroll_encoder(
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat,
past_target=past_target,
past_accumulated_target=past_accumulated_target,
past_observed_values=past_observed_values,
past_feat_dynamic_cat=past_feat_dynamic_cat,
past_feat_dynamic_past=past_feat_dynamic_past,
future_feat_dynamic_past=None,
future_feat_dynamic_cat=None,
future_time_feat=None,
future_target=None,
future_accumulated_target=None,
)
return self.sampling_decoder(
past_target=past_target,
past_accumulated_target=past_accumulated_target,
past_feat_dynamic_past=past_feat_dynamic_past,
time_feat=future_time_feat,
dynamic_cat_feat=future_feat_dynamic_cat,
static_feat=static_feat,
scale=scale,
begin_states=state,
)
| 28,509 | 41.936747 | 179 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/model/deepar/deepar_estimator.py | from typing import List, Optional
import numpy as np
import torch
import torch.nn as nn
from pts.core.component import validated
from pts import Trainer
from pts.dataset import FieldName
from pts.feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from pts.model import PTSEstimator, Predictor, PTSPredictor, copy_parameters
from pts.modules import DistributionOutput, StudentTOutput
from pts.transform import (
Transformation,
Chain,
RemoveFields,
SetField,
AsNumpyArray,
AddObservedValuesIndicator,
AddTimeFeatures,
AddAgeFeature,
VstackFeatures,
InstanceSplitter,
#ExpectedNumInstanceSampler,
ExactNumInstanceSampler
)
from .deepar_network import RolledDeepARTrainingNetwork, DeepARPredictionNetwork
class DeepAREstimator(PTSEstimator):
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
input_size: int,
trainer: Trainer = Trainer(),
context_length: Optional[int] = None, # [Note] past data길이
num_layers: int = 2,
num_cells: int = 40,
cell_type: str = "LSTM",
dropout_rate: float = 0.1,
use_feat_dynamic_real: bool = False,
use_feat_dynamic_cat: bool = False,
use_feat_static_cat: bool = False,
use_feat_static_real: bool = False,
cardinality: Optional[List[int]] = None,
embedding_dimension: Optional[List[int]] = None,
dc_cardinality: Optional[List[int]] = None,
dc_embedding_dimension: Optional[List[int]] = None,
distr_output: DistributionOutput = StudentTOutput(),
scaling: bool = True,
lags_seq: Optional[List[int]] = None, # [Note] lags_seq : 반복되는 주기
moving_avg_windows: Optional[List[int]] = [], # moving average 적용할 window크기
time_features: Optional[List[TimeFeature]] = None,
pick_incomplete: bool = True,
num_parallel_samples: int = 100,
dtype: np.dtype = np.float32,
) -> None:
super().__init__(trainer=trainer)
self.freq = freq
self.context_length = (
context_length if context_length is not None else prediction_length
)
self.prediction_length = prediction_length
self.distr_output = distr_output
self.distr_output.dtype = dtype
self.input_size = input_size
self.num_layers = num_layers
self.num_cells = num_cells
self.cell_type = cell_type
self.dropout_rate = dropout_rate
self.use_feat_dynamic_real = use_feat_dynamic_real
self.use_feat_dynamic_cat = use_feat_dynamic_cat
self.use_feat_static_cat = use_feat_static_cat
self.use_feat_static_real = use_feat_static_real
self.cardinality = cardinality if cardinality and use_feat_static_cat else [1]
self.embedding_dimension = (
embedding_dimension
if embedding_dimension is not None
else [min(50, (cat + 1) // 2) for cat in self.cardinality]
)
self.dc_cardinality = dc_cardinality
self.dc_embedding_dimension = dc_embedding_dimension
self.scaling = scaling
self.lags_seq = (
lags_seq if lags_seq is not None else get_lags_for_frequency(freq_str=freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.moving_avg_windows = moving_avg_windows
self.history_length = self.context_length + max(self.lags_seq) + (max(self.moving_avg_windows) if len(self.moving_avg_windows)>0 else 0)
self.num_parallel_samples = num_parallel_samples
self.pick_incomplete = pick_incomplete
def create_transformation(self) -> Transformation:
remove_field_names = []
if not self.use_feat_static_real:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if not self.use_feat_dynamic_real:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
if not self.use_feat_dynamic_cat:
remove_field_names.append(FieldName.FEAT_DYNAMIC_CAT)
return Chain(
[RemoveFields(field_names=remove_field_names)]
+ (
[SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0])]
if not self.use_feat_static_cat
else []
)
+ (
[SetField(output_field=FieldName.FEAT_STATIC_REAL, value=[0.0])]
if not self.use_feat_static_real
else []
)
+ [
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT, expected_ndim=1, dtype=np.long,
),
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL, expected_ndim=1, dtype=self.dtype,
),
AsNumpyArray(
field=FieldName.TARGET,
# in the following line, we add 1 for the time dimension
expected_ndim=1 + len(self.distr_output.event_shape),
dtype=self.dtype,
),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=self.time_features,
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
log_scale=True, # [Note] log scale적용
dtype=self.dtype,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if self.use_feat_dynamic_real
else []
)
),
InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
train_sampler=ExactNumInstanceSampler(num_instances=1),
past_length=self.history_length,
future_length=self.prediction_length,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.ACC_TARGET_SUM,
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_DYNAMIC_PAST,
FieldName.OBSERVED_VALUES,
],
pick_incomplete=self.pick_incomplete,
),
]
)
def create_training_network(self, device: torch.device) -> RolledDeepARTrainingNetwork:
return RolledDeepARTrainingNetwork(
input_size=self.input_size,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
dc_cardinality=self.dc_cardinality,
dc_embedding_dimension=self.dc_embedding_dimension,
lags_seq=self.lags_seq,
moving_avg_windows=self.moving_avg_windows,
scaling=self.scaling,
dtype=self.dtype,
).to(device)
def create_predictor(
self,
transformation: Transformation,
trained_network: nn.Module,
device: torch.device,
) -> Predictor:
prediction_network = DeepARPredictionNetwork(
num_parallel_samples=self.num_parallel_samples,
input_size=self.input_size,
num_layers=self.num_layers,
num_cells=self.num_cells,
cell_type=self.cell_type,
history_length=self.history_length,
context_length=self.context_length,
prediction_length=self.prediction_length,
distr_output=self.distr_output,
dropout_rate=self.dropout_rate,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
dc_cardinality=self.dc_cardinality,
dc_embedding_dimension=self.dc_embedding_dimension,
lags_seq=self.lags_seq,
moving_avg_windows=self.moving_avg_windows,
scaling=self.scaling,
dtype=self.dtype,
).to(device)
copy_parameters(trained_network, prediction_network)
return PTSPredictor(
input_transform=transformation,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
device=device,
dtype=self.dtype,
#forecast_generator = SampleForecastGenerator() # [Note] Default는 샘플링하는 forecaster
)
| 9,762 | 38.686992 | 144 | py |
NM-sparsity | NM-sparsity-main/devkit/core/dist_utils.py | import os
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
__all__ = [
'init_dist', 'broadcast_params','average_gradients']
def init_dist(backend='nccl',
master_ip='127.0.0.1',
port=29500):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
os.environ['MASTER_ADDR'] = master_ip
os.environ['MASTER_PORT'] = str(port)
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend)
return rank, world_size
def average_gradients(model):
for param in model.parameters():
if param.requires_grad and not (param.grad is None):
dist.all_reduce(param.grad.data)
def broadcast_params(model):
for p in model.state_dict().values():
dist.broadcast(p, 0)
| 945 | 28.5625 | 60 | py |
NM-sparsity | NM-sparsity-main/devkit/core/utils.py | import torch
import os
import shutil
def save_checkpoint(model_dir, state, is_best):
epoch = state['epoch']
path = os.path.join(model_dir, 'model.pth-' + str(epoch))
torch.save(state, path)
checkpoint_file = os.path.join(model_dir, 'checkpoint')
checkpoint = open(checkpoint_file, 'w+')
checkpoint.write('model_checkpoint_path:%s\n' % path)
checkpoint.close()
if is_best:
shutil.copyfile(path, os.path.join(model_dir, 'model-best.pth'))
def load_state(model_dir, model, optimizer=None):
if not os.path.exists(model_dir + '/checkpoint'):
print("=> no checkpoint found at '{}', train from scratch".format(model_dir))
return 0, 0
else:
ckpt = open(model_dir + '/checkpoint')
model_path = ckpt.readlines()[0].split(':')[1].strip('\n')
checkpoint = torch.load(model_path,map_location='cuda:{}'.format(torch.cuda.current_device()))
model.load_state_dict(checkpoint['state_dict'], strict=False)
ckpt_keys = set(checkpoint['state_dict'].keys())
own_keys = set(model.state_dict().keys())
missing_keys = own_keys - ckpt_keys
for k in missing_keys:
print('missing keys from checkpoint {}: {}'.format(model_dir, k))
print("=> loaded model from checkpoint '{}'".format(model_dir))
if optimizer != None:
best_prec1 = 0
if 'best_prec1' in checkpoint.keys():
best_prec1 = checkpoint['best_prec1']
start_epoch = checkpoint['epoch']
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> also loaded optimizer from checkpoint '{}' (epoch {})"
.format(model_dir, start_epoch))
return best_prec1, start_epoch
def load_state_epoch(model_dir, model, epoch):
model_path = model_dir + '/model.pth-' + str(epoch)
checkpoint = torch.load(model_path,map_location='cuda:{}'.format(torch.cuda.current_device()))
model.load_state_dict(checkpoint['state_dict'], strict=False)
ckpt_keys = set(checkpoint['state_dict'].keys())
own_keys = set(model.state_dict().keys())
missing_keys = own_keys - ckpt_keys
for k in missing_keys:
print('missing keys from checkpoint {}: {}'.format(model_dir, k))
print("=> loaded model from checkpoint '{}'".format(model_dir))
def load_state_ckpt(model_path, model):
checkpoint = torch.load(model_path, map_location='cuda:{}'.format(torch.cuda.current_device()))
model.load_state_dict(checkpoint['state_dict'], strict=False)
ckpt_keys = set(checkpoint['state_dict'].keys())
own_keys = set(model.state_dict().keys())
missing_keys = own_keys - ckpt_keys
for k in missing_keys:
print('missing keys from checkpoint {}: {}'.format(model_path, k))
print("=> loaded model from checkpoint '{}'".format(model_path))
| 2,861 | 40.478261 | 102 | py |
NM-sparsity | NM-sparsity-main/devkit/dataset/imagenet_dataset.py | from torch.utils.data import Dataset
from PIL import Image
import torch
def pil_loader(filename):
with Image.open(filename) as img:
img = img.convert('RGB')
return img
class ImagenetDataset(Dataset):
def __init__(self, root_dir, meta_file, transform=None):
self.root_dir = root_dir
self.transform = transform
with open(meta_file) as f:
lines = f.readlines()
print("building dataset from %s"%meta_file)
self.num = len(lines)
self.metas = []
for line in lines:
path, cls = line.rstrip().split()
self.metas.append((path, int(cls)))
print("read meta done")
def __len__(self):
return self.num
def __getitem__(self, idx):
filename = self.root_dir + '/' + self.metas[idx][0]
cls = self.metas[idx][1]
img = pil_loader(filename)
## transform
if self.transform is not None:
img = self.transform(img)
return img, cls
class ColorAugmentation(object):
def __init__(self, eig_vec=None, eig_val=None):
if eig_vec == None:
eig_vec = torch.Tensor([
[0.4009, 0.7192, -0.5675],
[-0.8140, -0.0045, -0.5808],
[0.4203, -0.6948, -0.5836],
])
if eig_val == None:
eig_val = torch.Tensor([[0.2175, 0.0188, 0.0045]])
self.eig_val = eig_val # 1*3
self.eig_vec = eig_vec # 3*3
def __call__(self, tensor):
assert tensor.size(0) == 3
alpha = torch.normal(mean=torch.zeros_like(self.eig_val)) * 0.1
quatity = torch.mm(self.eig_val * alpha, self.eig_vec)
tensor = tensor + quatity.view(3, 1, 1)
return tensor
| 1,758 | 29.327586 | 71 | py |
NM-sparsity | NM-sparsity-main/devkit/sparse_ops/sparse_ops.py | import torch
from torch import autograd, nn
import torch.nn.functional as F
from itertools import repeat
from torch._six import container_abcs
class Sparse(autograd.Function):
"""" Prune the unimprotant weight for the forwards phase but pass the gradient to dense weight using SR-STE in the backwards phase"""
@staticmethod
def forward(ctx, weight, N, M, decay = 0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length/M)
weight_temp = weight.detach().abs().reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M-N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.shape)
ctx.mask = w_b
ctx.decay = decay
return output*w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1-ctx.mask) * weight, None, None
class Sparse_NHWC(autograd.Function):
"""" Prune the unimprotant edges for the forwards phase but pass the gradient to dense weight using SR-STE in the backwards phase"""
@staticmethod
def forward(ctx, weight, N, M, decay = 0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length/M)
weight_temp = weight.detach().abs().permute(0,2,3,1).reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M-N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.permute(0,2,3,1).shape)
w_b = w_b.permute(0,3,1,2)
ctx.mask = w_b
ctx.decay = decay
return output*w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1-ctx.mask) * weight, None, None
class SparseConv(nn.Conv2d):
"""" implement N:M sparse convolution layer """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N=2, M=4, **kwargs):
self.N = N
self.M = M
super(SparseConv, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, **kwargs)
def get_sparse_weights(self):
return Sparse_NHWC.apply(self.weight, self.N, self.M)
def forward(self, x):
w = self.get_sparse_weights()
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
class SparseLinear(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool = True, N=2, M=2, decay = 0.0002, **kwargs):
self.N = N
self.M = M
super(SparseLinear, self).__init__(in_features, out_features, bias = True)
def get_sparse_weights(self):
return Sparse.apply(self.weight, self.N, self.M)
def forward(self, x):
w = self.get_sparse_weights()
x = F.linear(x, w, self.bias)
return x
| 3,245 | 26.982759 | 159 | py |
NM-sparsity | NM-sparsity-main/devkit/sparse_ops/syncbn_layer.py | import torch
from torch.autograd import Function
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.distributed as dist
import torch.nn as nn
class SyncBNFunc(Function):
@staticmethod
def forward(ctx, in_data, scale_data, shift_data, running_mean, running_var, eps, momentum, training):
if in_data.is_cuda:
ctx.eps =eps
N, C, H, W = in_data.size()
in_data = in_data.view(N, C, -1)
mean_in = in_data.mean(-1, keepdim=True)
var_in = in_data.var(-1, keepdim=True)
temp = var_in + mean_in ** 2
if training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
sum_x = mean_bn ** 2 + var_bn
dist.all_reduce(mean_bn)
mean_bn /= dist.get_world_size()
dist.all_reduce(sum_x)
sum_x /= dist.get_world_size()
var_bn = sum_x - mean_bn ** 2
running_mean.mul_(momentum)
running_mean.add_((1 - momentum) * mean_bn.data)
running_var.mul_(momentum)
running_var.add_((1 - momentum) * var_bn.data)
else:
mean_bn = torch.autograd.Variable(running_mean)
var_bn = torch.autograd.Variable(running_var)
x_hat = (in_data - mean_bn) / (var_bn+ ctx.eps).sqrt()
x_hat = x_hat.view(N, C, H, W)
out_data = x_hat * scale_data + shift_data
ctx.save_for_backward(in_data.data, scale_data.data, x_hat.data, mean_bn.data, var_bn.data)
else:
raise RuntimeError('SyncBNFunc only support CUDA computation!')
return out_data
@staticmethod
def backward(ctx, grad_outdata):
if grad_outdata.is_cuda:
in_data, scale_data, x_hat, mean_bn, var_bn = ctx.saved_tensors
N, C, H, W = grad_outdata.size()
scaleDiff = torch.sum(grad_outdata * x_hat,[0,2,3],keepdim=True)
shiftDiff = torch.sum(grad_outdata,[0,2,3],keepdim=True)
dist.all_reduce(scaleDiff)
dist.all_reduce(shiftDiff)
inDiff = scale_data / (var_bn.view(1,C,1,1) + ctx.eps).sqrt() *(grad_outdata - 1 / (N*H*W*dist.get_world_size()) * (scaleDiff * x_hat + shiftDiff))
else:
raise RuntimeError('SyncBNFunc only support CUDA computation!')
return inDiff, scaleDiff, shiftDiff, None, None, None, None, None
class SyncBatchNorm2d(Module):
def __init__(self, num_features, eps=1e-5, momentum=0.9,last_gamma=False):
super(SyncBatchNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.last_gamma = last_gamma
self.weight = Parameter(torch.Tensor(1, num_features, 1, 1))
self.bias = Parameter(torch.Tensor(1, num_features, 1, 1))
self.register_buffer('running_mean', torch.zeros(1, num_features, 1))
self.register_buffer('running_var', torch.ones(1, num_features, 1))
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
def __repr__(self):
return ('{name}({num_features}, eps={eps}, momentum={momentum},'
' affine={affine})'
.format(name=self.__class__.__name__, **self.__dict__))
def forward(self, in_data):
return SyncBNFunc.apply(
in_data, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.momentum, self.training)
| 3,824 | 37.25 | 159 | py |
NM-sparsity | NM-sparsity-main/classification/train_imagenet.py | from __future__ import division
import argparse
import os
import time
import torch.distributed as dist
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data.distributed import DistributedSampler
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import yaml
import sys
from tensorboardX import SummaryWriter
import models
import os.path as osp
sys.path.append(osp.abspath(osp.join(__file__, '../')))
from devkit.core import (init_dist, broadcast_params, average_gradients, load_state_ckpt, load_state, save_checkpoint, LRScheduler)
from devkit.dataset.imagenet_dataset import ColorAugmentation, ImagenetDataset
parser = argparse.ArgumentParser(
description='Pytorch Imagenet Training')
parser.add_argument('--config', default='configs/config_resnet50_2:4.yaml')
parser.add_argument("--local_rank", type=int)
parser.add_argument(
'--port', default=29500, type=int, help='port of server')
parser.add_argument('--world-size', default=1, type=int)
parser.add_argument('--rank', default=0, type=int)
parser.add_argument('--model_dir', type=str)
parser.add_argument('--resume_from', default='', help='resume_from')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
args = parser.parse_args()
def main():
global args, best_prec1
args = parser.parse_args()
with open(args.config) as f:
config = yaml.load(f)
for key in config:
for k, v in config[key].items():
setattr(args, k, v)
print('Enabled distributed training.')
rank, world_size = init_dist(
backend='nccl', port=args.port)
args.rank = rank
args.world_size = world_size
# create model
print("=> creating model '{}'".format(args.model))
model = models.__dict__[args.model](N = args.N, M = args.M)
model.cuda()
broadcast_params(model)
print(model)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.base_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# auto resume from a checkpoint
model_dir = args.model_dir
start_epoch = 0
if args.rank == 0 and not os.path.exists(model_dir):
os.makedirs(model_dir)
if args.evaluate:
load_state_ckpt(args.checkpoint_path, model)
else:
best_prec1, start_epoch = load_state(model_dir, model, optimizer=optimizer)
if args.rank == 0:
writer = SummaryWriter(model_dir)
else:
writer = None
cudnn.benchmark = True
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = ImagenetDataset(
args.train_root,
args.train_source,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
ColorAugmentation(),
normalize,
]))
val_dataset = ImagenetDataset(
args.val_root,
args.val_source,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_sampler = DistributedSampler(train_dataset)
val_sampler = DistributedSampler(val_dataset)
train_loader = DataLoader(
train_dataset, batch_size=args.batch_size//args.world_size, shuffle=False,
num_workers=args.workers, pin_memory=False, sampler=train_sampler)
val_loader = DataLoader(
val_dataset, batch_size=args.batch_size//args.world_size, shuffle=False,
num_workers=args.workers, pin_memory=False, sampler=val_sampler)
if args.evaluate:
validate(val_loader, model, criterion, 0, writer)
return
niters = len(train_loader)
lr_scheduler = LRScheduler(optimizer, niters, args)
for epoch in range(start_epoch, args.epochs):
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, lr_scheduler, epoch, writer)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, epoch, writer)
if rank == 0:
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint(model_dir, {
'epoch': epoch + 1,
'model': args.model,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, lr_scheduler, epoch, writer):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
SAD = AverageMeter()
# switch to train mode
model.train()
world_size = args.world_size
rank = args.rank
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
lr_scheduler.update(i, epoch)
target = target.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input.cuda())
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var) / world_size
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
reduced_loss = loss.data.clone()
reduced_prec1 = prec1.clone() / world_size
reduced_prec5 = prec5.clone() / world_size
dist.all_reduce_multigpu([reduced_loss])
dist.all_reduce_multigpu([reduced_prec1])
dist.all_reduce_multigpu([reduced_prec5])
losses.update(reduced_loss.item(), input.size(0))
top1.update(reduced_prec1.item(), input.size(0))
top5.update(reduced_prec5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
average_gradients(model)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and rank == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
niter = epoch * len(train_loader) + i
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], niter)
writer.add_scalar('Train/Avg_Loss', losses.avg, niter)
writer.add_scalar('Train/Avg_Top1', top1.avg / 100.0, niter)
writer.add_scalar('Train/Avg_Top5', top5.avg / 100.0, niter)
def validate(val_loader, model, criterion, epoch, writer):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
world_size = args.world_size
rank = args.rank
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(non_blocking=True)
input_var = torch.autograd.Variable(input.cuda(), volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var) / world_size
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
reduced_loss = loss.data.clone()
reduced_prec1 = prec1.clone() / world_size
reduced_prec5 = prec5.clone() / world_size
dist.all_reduce_multigpu([reduced_loss])
dist.all_reduce_multigpu([reduced_prec1])
dist.all_reduce_multigpu([reduced_prec5])
losses.update(reduced_loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and rank == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
if rank == 0:
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
niter = (epoch + 1)
writer.add_scalar('Eval/Avg_Loss', losses.avg, niter)
writer.add_scalar('Eval/Avg_Top1', top1.avg / 100.0, niter)
writer.add_scalar('Eval/Avg_Top5', top5.avg / 100.0, niter)
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 10,642 | 33.003195 | 131 | py |
NM-sparsity | NM-sparsity-main/classification/models/resnet.py | import torch.nn as nn
import math
import sys
import os.path as osp
sys.path.append(osp.abspath(osp.join(__file__, '../../../')))
#from devkit.ops import SyncBatchNorm2d
import torch
import torch.nn.functional as F
from torch import autograd
from torch.nn.modules.utils import _pair as pair
from torch.nn import init
from devkit.sparse_ops import SparseConv
__all__ = ['ResNetV1', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
def conv3x3(in_planes, out_planes, stride=1, N=2, M=4):
"""3x3 convolution with padding"""
return SparseConv(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, N=N, M=M)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, N=2, M=4):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, N=N, M=M)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, N=N, M=M)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, N=2, M=4):
super(Bottleneck, self).__init__()
self.conv1 = SparseConv(inplanes, planes, kernel_size=1, bias=False, N=N, M=M)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = SparseConv(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False, N=N, M=M)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = SparseConv(planes, planes * 4, kernel_size=1, bias=False, N=N, M=M)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetV1(nn.Module):
def __init__(self, block, layers, num_classes=1000, N=2, M=4):
super(ResNetV1, self).__init__()
self.N = N
self.M = M
self.inplanes = 64
self.conv1 = SparseConv(3, 64, kernel_size=7, stride=2, padding=3,
bias=False, N=self.N, M=self.M)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], N = self.N, M = self.M)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, N = self.N, M = self.M)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, N = self.N, M = self.M)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, N = self.N, M = self.M)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, SparseConv):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
def _make_layer(self, block, planes, blocks, stride=1, N = 2, M = 4):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
SparseConv(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False, N=N, M=M),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, N=N, M=M))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, N=N, M=M))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(**kwargs):
model = ResNetV1(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
model = ResNetV1(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
model = ResNetV1(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
model = ResNetV1(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
model = ResNetV1(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
| 5,446 | 28.603261 | 95 | py |
NM-sparsity | NM-sparsity-main/RAFT/evaluate.py | import sys
sys.path.append('core')
from PIL import Image
import argparse
import os
import time
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import datasets
from utils import flow_viz
from utils import frame_utils
from raft import RAFT
from utils.utils import InputPadder, forward_interpolate
@torch.no_grad()
def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'):
""" Create submission for the Sintel leaderboard """
model.eval()
for dstype in ['clean', 'final']:
test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype)
flow_prev, sequence_prev = None, None
for test_id in range(len(test_dataset)):
image1, image2, (sequence, frame) = test_dataset[test_id]
if sequence != sequence_prev:
flow_prev = None
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True)
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
if warm_start:
flow_prev = forward_interpolate(flow_low[0])[None].cuda()
output_dir = os.path.join(output_path, dstype, sequence)
output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frame_utils.writeFlow(output_file, flow)
sequence_prev = sequence
@torch.no_grad()
def create_kitti_submission(model, iters=24, output_path='kitti_submission'):
""" Create submission for the Sintel leaderboard """
model.eval()
test_dataset = datasets.KITTI(split='testing', aug_params=None)
if not os.path.exists(output_path):
os.makedirs(output_path)
for test_id in range(len(test_dataset)):
image1, image2, (frame_id, ) = test_dataset[test_id]
padder = InputPadder(image1.shape, mode='kitti')
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
_, flow_pr = model(image1, image2, iters=iters, test_mode=True)
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
output_filename = os.path.join(output_path, frame_id)
frame_utils.writeFlowKITTI(output_filename, flow)
@torch.no_grad()
def validate_chairs(model, iters=24):
""" Perform evaluation on the FlyingChairs (test) split """
model.eval()
epe_list = []
val_dataset = datasets.FlyingChairs(split='validation')
for val_id in range(len(val_dataset)):
image1, image2, flow_gt, _ = val_dataset[val_id]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
_, flow_pr = model(image1, image2, iters=iters, test_mode=True)
epe = torch.sum((flow_pr[0].cpu() - flow_gt)**2, dim=0).sqrt()
epe_list.append(epe.view(-1).numpy())
epe = np.mean(np.concatenate(epe_list))
print("Validation Chairs EPE: %f" % epe)
return {'chairs': epe}
@torch.no_grad()
def validate_sintel(model, iters=32):
""" Peform validation using the Sintel (train) split """
model.eval()
results = {}
for dstype in ['clean', 'final']:
val_dataset = datasets.MpiSintel(split='training', dstype=dstype)
epe_list = []
for val_id in range(len(val_dataset)):
image1, image2, flow_gt, _ = val_dataset[val_id]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
flow = padder.unpad(flow_pr[0]).cpu()
epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
epe_list.append(epe.view(-1).numpy())
epe_all = np.concatenate(epe_list)
epe = np.mean(epe_all)
px1 = np.mean(epe_all<1)
px3 = np.mean(epe_all<3)
px5 = np.mean(epe_all<5)
print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" % (dstype, epe, px1, px3, px5))
results[dstype] = np.mean(epe_list)
return results
@torch.no_grad()
def validate_kitti(model, iters=24):
""" Peform validation using the KITTI-2015 (train) split """
model.eval()
val_dataset = datasets.KITTI(split='training')
out_list, epe_list = [], []
for val_id in range(len(val_dataset)):
image1, image2, flow_gt, valid_gt = val_dataset[val_id]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
padder = InputPadder(image1.shape, mode='kitti')
image1, image2 = padder.pad(image1, image2)
flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
flow = padder.unpad(flow_pr[0]).cpu()
epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
mag = torch.sum(flow_gt**2, dim=0).sqrt()
epe = epe.view(-1)
mag = mag.view(-1)
val = valid_gt.view(-1) >= 0.5
out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()
epe_list.append(epe[val].mean().item())
out_list.append(out[val].cpu().numpy())
epe_list = np.array(epe_list)
out_list = np.concatenate(out_list)
epe = np.mean(epe_list)
f1 = 100 * np.mean(out_list)
print("Validation KITTI: %f, %f" % (epe, f1))
return {'kitti-epe': epe, 'kitti-f1': f1}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--dataset', help="dataset for evaluation")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model))
model.cuda()
model.eval()
# create_sintel_submission(model.module, warm_start=True)
# create_kitti_submission(model.module)
with torch.no_grad():
if args.dataset == 'chairs':
validate_chairs(model.module)
elif args.dataset == 'sintel':
validate_sintel(model.module)
elif args.dataset == 'kitti':
validate_kitti(model.module)
| 6,618 | 32.429293 | 112 | py |
NM-sparsity | NM-sparsity-main/RAFT/demo.py | import sys
sys.path.append('core')
import argparse
import os
import cv2
import glob
import numpy as np
import torch
from PIL import Image
from raft import RAFT
from utils import flow_viz
from utils.utils import InputPadder
DEVICE = 'cuda'
def load_image(imfile):
img = np.array(Image.open(imfile)).astype(np.uint8)
img = torch.from_numpy(img).permute(2, 0, 1).float()
return img[None].to(DEVICE)
def viz(img, flo):
img = img[0].permute(1,2,0).cpu().numpy()
flo = flo[0].permute(1,2,0).cpu().numpy()
# map flow to rgb image
flo = flow_viz.flow_to_image(flo)
img_flo = np.concatenate([img, flo], axis=0)
# import matplotlib.pyplot as plt
# plt.imshow(img_flo / 255.0)
# plt.show()
cv2.imshow('image', img_flo[:, :, [2,1,0]]/255.0)
cv2.waitKey()
def demo(args):
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model))
model = model.module
model.to(DEVICE)
model.eval()
with torch.no_grad():
images = glob.glob(os.path.join(args.path, '*.png')) + \
glob.glob(os.path.join(args.path, '*.jpg'))
images = sorted(images)
for imfile1, imfile2 in zip(images[:-1], images[1:]):
image1 = load_image(imfile1)
image2 = load_image(imfile2)
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)
viz(image1, flow_up)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--path', help="dataset for evaluation")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
demo(args)
| 2,073 | 26.289474 | 112 | py |
NM-sparsity | NM-sparsity-main/RAFT/train.py | from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from raft import RAFT
from sparse_raft import SparseRAFT
import evaluate
import datasets
from torch.utils.tensorboard import SummaryWriter
from lr_scheduler import OneCycleLR
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
#def __init__(self):
# pass
def __init__(self, enabled=None):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 100
VAL_FREQ = 5000
def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds)
flow_loss = 0.0
# exlude invalid pixels and extremely large diplacements
mag = torch.sum(flow_gt**2, dim=1).sqrt()
valid = (valid >= 0.5) & (mag < max_flow)
for i in range(n_predictions):
i_weight = gamma**(n_predictions - i - 1)
i_loss = (flow_preds[i] - flow_gt).abs()
flow_loss += i_weight * (valid[:, None] * i_loss).mean()
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)]
metrics = {
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, metrics
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = OneCycleLR(optimizer, args.lr, args.num_steps+100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler
class Logger:
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = 0
self.running_loss = {}
self.writer = None
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
# print the training status
print(training_str+metrics_str)
#if self.writer is None:
# self.writer = SummaryWriter()
for k in self.running_loss:
#self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
self.running_loss[k] = 0.0
def push(self, metrics):
self.total_steps += 1
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter()
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def close(self):
self.writer.close()
def train(args):
model = nn.DataParallel(SparseRAFT(args), device_ids=args.gpus)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
model.cuda()
model.train()
if args.stage != 'chairs':
model.module.freeze_bn()
train_loader = datasets.fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
scaler = GradScaler(enabled=args.mixed_precision)
logger = Logger(model, scheduler)
VAL_FREQ = 5000
add_noise = True
should_keep_training = True
while should_keep_training:
for i_batch, data_blob in enumerate(train_loader):
optimizer.zero_grad()
image1, image2, flow, valid = [x.cuda() for x in data_blob]
if args.add_noise:
stdv = np.random.uniform(0.0, 5.0)
image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
flow_predictions = model(image1, image2, iters=args.iters)
loss, metrics = sequence_loss(flow_predictions, flow, valid, args.gamma)
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
scaler.step(optimizer)
scheduler.step()
scaler.update()
logger.push(metrics)
if total_steps % VAL_FREQ == VAL_FREQ - 1:
PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name)
torch.save(model.state_dict(), PATH)
results = {}
for val_dataset in args.validation:
if val_dataset == 'chairs':
results.update(evaluate.validate_chairs(model.module))
elif val_dataset == 'sintel':
results.update(evaluate.validate_sintel(model.module))
elif val_dataset == 'kitti':
results.update(evaluate.validate_kitti(model.module))
#logger.write_dict(results)
model.train()
if args.stage != 'chairs':
model.module.freeze_bn()
total_steps += 1
if total_steps > args.num_steps:
should_keep_training = False
break
#logger.close()
PATH = 'checkpoints/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--stage', help="determines which dataset to use for training")
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--validation', type=str, nargs='+')
parser.add_argument('--lr', type=float, default=0.00002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--gpus', type=int, nargs='+', default=[0,1])
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--iters', type=int, default=12)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
parser.add_argument('--add_noise', action='store_true')
parser.add_argument('--local_rank', default=-1, type=int,
help='node rank for distributed training')
args = parser.parse_args()
torch.manual_seed(1234)
np.random.seed(1234)
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
train(args)
| 8,244 | 31.333333 | 103 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/lr_scheduler.py | import types
import math
from torch._six import inf
from functools import wraps
import warnings
import weakref
from collections import Counter
from bisect import bisect_right
#from torch.optim.optimizer import Optimizer
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1, verbose=False):
# Attach optimizer
#if not isinstance(optimizer, Optimizer):
# raise TypeError('{} is not an Optimizer'.format(
# type(optimizer).__name__))
self.optimizer = optimizer
# Initialize epoch and base learning rates
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups]
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(method):
if getattr(method, '_with_counter', False):
# `optimizer.step()` has already been replaced, return.
return method
# Keep a weak reference to the optimizer instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True
return wrapper
self.optimizer.step = with_counter(self.optimizer.step)
self.optimizer._step_count = 0
self._step_count = 0
self.verbose = verbose
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
return self._last_lr
def get_lr(self):
# Compute learning rate using chainable form of the scheduler
raise NotImplementedError
def print_lr(self, is_verbose, group, lr, epoch=None):
"""Display the current learning rate.
"""
if is_verbose:
if epoch is None:
print('Adjusting learning rate'
' of group {} to {:.4e}.'.format(group, lr))
else:
print('Epoch {:5d}: adjusting learning rate'
' of group {} to {:.4e}.'.format(epoch, group, lr))
def step(self, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule. "
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
with _enable_get_lr_call(self):
if epoch is None:
self.last_epoch += 1
values = self.get_lr()
else:
warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)
self.last_epoch = epoch
if hasattr(self, "_get_closed_form_lr"):
values = self._get_closed_form_lr()
else:
values = self.get_lr()
for i, data in enumerate(zip(self.optimizer.param_groups, values)):
param_group, lr = data
param_group['lr'] = lr
self.print_lr(self.verbose, i, lr, epoch)
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
class OneCycleLR(_LRScheduler):
r"""Sets the learning rate of each parameter group according to the
1cycle learning rate policy. The 1cycle policy anneals the learning
rate from an initial learning rate to some maximum learning rate and then
from that maximum learning rate to some minimum learning rate much lower
than the initial learning rate.
This policy was initially described in the paper `Super-Convergence:
Very Fast Training of Neural Networks Using Large Learning Rates`_.
The 1cycle learning rate policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This scheduler is not chainable.
Note also that the total number of steps in the cycle can be determined in one
of two ways (listed in order of precedence):
#. A value for total_steps is explicitly provided.
#. A number of epochs (epochs) and a number of steps per epoch
(steps_per_epoch) are provided.
In this case, the number of total steps is inferred by
total_steps = epochs * steps_per_epoch
You must either provide a value for total_steps or provide a value for both
epochs and steps_per_epoch.
The default behaviour of this scheduler follows the fastai implementation of 1cycle, which
claims that "unpublished work has shown even better results by using only two phases". To
mimic the behaviour of the original paper instead, set ``three_phase=True``.
Args:
optimizer (Optimizer): Wrapped optimizer.
max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group.
total_steps (int): The total number of steps in the cycle. Note that
if a value is not provided here, then it must be inferred by providing
a value for epochs and steps_per_epoch.
Default: None
epochs (int): The number of epochs to train for. This is used along
with steps_per_epoch in order to infer the total number of steps in the cycle
if a value for total_steps is not provided.
Default: None
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the
cycle if a value for total_steps is not provided.
Default: None
pct_start (float): The percentage of the cycle (in number of steps) spent
increasing the learning rate.
Default: 0.3
anneal_strategy (str): {'cos', 'linear'}
Specifies the annealing strategy: "cos" for cosine annealing, "linear" for
linear annealing.
Default: 'cos'
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'base_momentum' and 'max_momentum'.
Default: True
base_momentum (float or list): Lower momentum boundaries in the cycle
for each parameter group. Note that momentum is cycled inversely
to learning rate; at the peak of a cycle, momentum is
'base_momentum' and learning rate is 'max_lr'.
Default: 0.85
max_momentum (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_momentum - base_momentum).
Note that momentum is cycled inversely
to learning rate; at the start of a cycle, momentum is 'max_momentum'
and learning rate is 'base_lr'
Default: 0.95
div_factor (float): Determines the initial learning rate via
initial_lr = max_lr/div_factor
Default: 25
final_div_factor (float): Determines the minimum learning rate via
min_lr = initial_lr/final_div_factor
Default: 1e4
three_phase (bool): If ``True``, use a third phase of the schedule to annihilate the
learning rate according to 'final_div_factor' instead of modifying the second
phase (the first two phases will be symmetrical about the step indicated by
'pct_start').
last_epoch (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning.
Default: -1
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
Example:
>>> data_loader = torch.utils.data.DataLoader(...)
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
.. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates:
https://arxiv.org/abs/1708.07120
"""
def __init__(self,
optimizer,
max_lr,
total_steps=None,
epochs=None,
steps_per_epoch=None,
pct_start=0.3,
anneal_strategy='cos',
cycle_momentum=True,
base_momentum=0.85,
max_momentum=0.95,
div_factor=25.,
final_div_factor=1e4,
three_phase=False,
last_epoch=-1,
verbose=False):
# Validate optimizer
#if not isinstance(optimizer, Optimizer):
# raise TypeError('{} is not an Optimizer'.format(
# type(optimizer).__name__))
self.optimizer = optimizer
# Validate total_steps
if total_steps is None and epochs is None and steps_per_epoch is None:
raise ValueError("You must define either total_steps OR (epochs AND steps_per_epoch)")
elif total_steps is not None:
if total_steps <= 0 or not isinstance(total_steps, int):
raise ValueError("Expected positive integer total_steps, but got {}".format(total_steps))
self.total_steps = total_steps
else:
if epochs <= 0 or not isinstance(epochs, int):
raise ValueError("Expected positive integer epochs, but got {}".format(epochs))
if steps_per_epoch <= 0 or not isinstance(steps_per_epoch, int):
raise ValueError("Expected positive integer steps_per_epoch, but got {}".format(steps_per_epoch))
self.total_steps = epochs * steps_per_epoch
if three_phase:
self._schedule_phases = [
{
'end_step': float(pct_start * self.total_steps) - 1,
'start_lr': 'initial_lr',
'end_lr': 'max_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'base_momentum',
},
{
'end_step': float(2 * pct_start * self.total_steps) - 2,
'start_lr': 'max_lr',
'end_lr': 'initial_lr',
'start_momentum': 'base_momentum',
'end_momentum': 'max_momentum',
},
{
'end_step': self.total_steps - 1,
'start_lr': 'initial_lr',
'end_lr': 'min_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'max_momentum',
},
]
else:
self._schedule_phases = [
{
'end_step': float(pct_start * self.total_steps) - 1,
'start_lr': 'initial_lr',
'end_lr': 'max_lr',
'start_momentum': 'max_momentum',
'end_momentum': 'base_momentum',
},
{
'end_step': self.total_steps - 1,
'start_lr': 'max_lr',
'end_lr': 'min_lr',
'start_momentum': 'base_momentum',
'end_momentum': 'max_momentum',
},
]
# Validate pct_start
if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
raise ValueError("Expected float between 0 and 1 pct_start, but got {}".format(pct_start))
# Validate anneal_strategy
if anneal_strategy not in ['cos', 'linear']:
raise ValueError("anneal_strategy must by one of 'cos' or 'linear', instead got {}".format(anneal_strategy))
elif anneal_strategy == 'cos':
self.anneal_func = self._annealing_cos
elif anneal_strategy == 'linear':
self.anneal_func = self._annealing_linear
# Initialize learning rate variables
max_lrs = self._format_param('max_lr', self.optimizer, max_lr)
if last_epoch == -1:
for idx, group in enumerate(self.optimizer.param_groups):
group['initial_lr'] = max_lrs[idx] / div_factor
group['max_lr'] = max_lrs[idx]
group['min_lr'] = group['initial_lr'] / final_div_factor
# Initialize momentum variables
self.cycle_momentum = cycle_momentum
if self.cycle_momentum:
if 'momentum' not in self.optimizer.defaults and 'betas' not in self.optimizer.defaults:
raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled')
self.use_beta1 = 'betas' in self.optimizer.defaults
max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
if last_epoch == -1:
for m_momentum, b_momentum, group in zip(max_momentums, base_momentums, optimizer.param_groups):
if self.use_beta1:
_, beta2 = group['betas']
group['betas'] = (m_momentum, beta2)
else:
group['momentum'] = m_momentum
group['max_momentum'] = m_momentum
group['base_momentum'] = b_momentum
super(OneCycleLR, self).__init__(optimizer, last_epoch, verbose)
def _format_param(self, name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError("expected {} values for {}, got {}".format(
len(optimizer.param_groups), name, len(param)))
return param
else:
return [param] * len(optimizer.param_groups)
def _annealing_cos(self, start, end, pct):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
def _annealing_linear(self, start, end, pct):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return (end - start) * pct + start
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
lrs = []
step_num = self.last_epoch
if step_num > self.total_steps:
raise ValueError("Tried to step {} times. The specified number of total steps is {}"
.format(step_num + 1, self.total_steps))
for group in self.optimizer.param_groups:
start_step = 0
for i, phase in enumerate(self._schedule_phases):
end_step = phase['end_step']
if step_num <= end_step or i == len(self._schedule_phases) - 1:
pct = (step_num - start_step) / (end_step - start_step)
computed_lr = self.anneal_func(group[phase['start_lr']], group[phase['end_lr']], pct)
if self.cycle_momentum:
computed_momentum = self.anneal_func(group[phase['start_momentum']], group[phase['end_momentum']], pct)
break
start_step = phase['end_step']
lrs.append(computed_lr)
if self.cycle_momentum:
if self.use_beta1:
_, beta2 = group['betas']
group['betas'] = (computed_momentum, beta2)
else:
group['momentum'] = computed_momentum
return lrs
| 19,353 | 43.800926 | 128 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/sparse_update.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import os.path as osp
sys.path.append(osp.abspath(osp.join(__file__, '../../../')))
from devkit.sparse_ops import SparseConv
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = SparseConv(input_dim, hidden_dim, 3, padding=1)
self.conv2 = SparseConv(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(ConvGRU, self).__init__()
self.convz = SparseConv(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = SparseConv(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = SparseConv(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
self.convz1 = SparseConv(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = SparseConv(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = SparseConv(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz2 = SparseConv(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = SparseConv(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = SparseConv(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SmallMotionEncoder(nn.Module):
def __init__(self, args):
super(SmallMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = SparseConv(cor_planes, 96, 1, padding=0)
self.convf1 = SparseConv(2, 64, 7, padding=3)
self.convf2 = SparseConv(64, 32, 3, padding=1)
self.conv = SparseConv(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = SparseConv(cor_planes, 256, 1, padding=0)
self.convc2 = SparseConv(256, 192, 3, padding=1)
self.convf1 = SparseConv(2, 128, 7, padding=3)
self.convf2 = SparseConv(128, 64, 3, padding=1)
self.conv = SparseConv(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class SmallUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return net, None, delta_flow
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
SparseConv(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
SparseConv(256, 64*9, 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = .25 * self.mask(net)
return net, mask, delta_flow
| 5,385 | 36.402778 | 88 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/sparse_raft.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sparse_update import BasicUpdateBlock, SmallUpdateBlock
from sparse_extractor import BasicEncoder, SmallEncoder
from corr import CorrBlock, AlternateCorrBlock
from utils.utils import bilinear_sampler, coords_grid, upflow8
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class SparseRAFT(nn.Module):
def __init__(self, args):
super(SparseRAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if 'dropout' not in self.args:
self.args.dropout = 0
if 'alternate_corr' not in self.args:
self.args.alternate_corr = False
# feature network, context network, and update block
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
N, C, H, W = img.shape
coords0 = coords_grid(N, H//8, W//8, device=img.device)
coords1 = coords_grid(N, H//8, W//8, device=img.device)
# optical flow computed as difference: flow = coords1 - coords0
return coords0, coords1
def upsample_flow(self, flow, mask):
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(8 * flow, [3,3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, 8*H, 8*W)
def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
""" Estimate optical flow between pair of frames """
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
# run the context network
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
net, inp = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
coords0, coords1 = self.initialize_flow(image1)
if flow_init is not None:
coords1 = coords1 + flow_init
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1) # index correlation volume
flow = coords1 - coords0
with autocast(enabled=self.args.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
# F(t+1) = F(t) + \Delta(t)
coords1 = coords1 + delta_flow
# upsample predictions
if up_mask is None:
flow_up = upflow8(coords1 - coords0)
else:
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
flow_predictions.append(flow_up)
if test_mode:
return coords1 - coords0, flow_up
return flow_predictions
| 4,950 | 33.144828 | 102 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/corr.py | import torch
import torch.nn.functional as F
from utils.utils import bilinear_sampler, coords_grid
try:
import alt_cuda_corr
except:
# alt_cuda_corr is not compiled
pass
class CorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid = []
# all pairs correlation
corr = CorrBlock.corr(fmap1, fmap2)
batch, h1, w1, dim, h2, w2 = corr.shape
corr = corr.reshape(batch*h1*w1, dim, h2, w2)
self.corr_pyramid.append(corr)
for i in range(self.num_levels-1):
corr = F.avg_pool2d(corr, 2, stride=2)
self.corr_pyramid.append(corr)
def __call__(self, coords):
r = self.radius
coords = coords.permute(0, 2, 3, 1)
batch, h1, w1, _ = coords.shape
out_pyramid = []
for i in range(self.num_levels):
corr = self.corr_pyramid[i]
dx = torch.linspace(-r, r, 2*r+1, device=coords.device)
dy = torch.linspace(-r, r, 2*r+1, device=coords.device)
delta = torch.stack(torch.meshgrid(dy, dx), axis=-1)
centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i
delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2)
coords_lvl = centroid_lvl + delta_lvl
corr = bilinear_sampler(corr, coords_lvl)
corr = corr.view(batch, h1, w1, -1)
out_pyramid.append(corr)
out = torch.cat(out_pyramid, dim=-1)
return out.permute(0, 3, 1, 2).contiguous().float()
@staticmethod
def corr(fmap1, fmap2):
batch, dim, ht, wd = fmap1.shape
fmap1 = fmap1.view(batch, dim, ht*wd)
fmap2 = fmap2.view(batch, dim, ht*wd)
corr = torch.matmul(fmap1.transpose(1,2), fmap2)
corr = corr.view(batch, ht, wd, 1, ht, wd)
return corr / torch.sqrt(torch.tensor(dim).float())
class AlternateCorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.pyramid = [(fmap1, fmap2)]
for i in range(self.num_levels):
fmap1 = F.avg_pool2d(fmap1, 2, stride=2)
fmap2 = F.avg_pool2d(fmap2, 2, stride=2)
self.pyramid.append((fmap1, fmap2))
def __call__(self, coords):
coords = coords.permute(0, 2, 3, 1)
B, H, W, _ = coords.shape
dim = self.pyramid[0][0].shape[1]
corr_list = []
for i in range(self.num_levels):
r = self.radius
fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous()
fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous()
coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous()
corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r)
corr_list.append(corr.squeeze(1))
corr = torch.stack(corr_list, dim=1)
corr = corr.reshape(B, -1, H, W)
return corr / torch.sqrt(torch.tensor(dim).float())
| 3,085 | 32.543478 | 74 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/update.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SmallMotionEncoder(nn.Module):
def __init__(self, args):
super(SmallMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
self.conv = nn.Conv2d(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class SmallUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return net, None, delta_flow
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = .25 * self.mask(net)
return net, mask, delta_flow
| 5,227 | 36.342857 | 87 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes//4)
self.norm2 = nn.BatchNorm2d(planes//4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes//4)
self.norm2 = nn.InstanceNorm2d(planes//4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
| 8,847 | 32.014925 | 93 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/datasets.py | # Data loading based on https://github.com/NVIDIA/flownet2-pytorch
import numpy as np
import torch
import torch.utils.data as data
import torch.nn.functional as F
import os
import math
import random
from glob import glob
import os.path as osp
from utils import frame_utils
from utils.augmentor import FlowAugmentor, SparseFlowAugmentor
class FlowDataset(data.Dataset):
def __init__(self, aug_params=None, sparse=False):
self.augmentor = None
self.sparse = sparse
if aug_params is not None:
if sparse:
self.augmentor = SparseFlowAugmentor(**aug_params)
else:
self.augmentor = FlowAugmentor(**aug_params)
self.is_test = False
self.init_seed = False
self.flow_list = []
self.image_list = []
self.extra_info = []
def __getitem__(self, index):
if self.is_test:
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
img1 = np.array(img1).astype(np.uint8)[..., :3]
img2 = np.array(img2).astype(np.uint8)[..., :3]
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
return img1, img2, self.extra_info[index]
if not self.init_seed:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
torch.manual_seed(worker_info.id)
np.random.seed(worker_info.id)
random.seed(worker_info.id)
self.init_seed = True
index = index % len(self.image_list)
valid = None
if self.sparse:
flow, valid = frame_utils.readFlowKITTI(self.flow_list[index])
else:
flow = frame_utils.read_gen(self.flow_list[index])
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = np.array(flow).astype(np.float32)
img1 = np.array(img1).astype(np.uint8)
img2 = np.array(img2).astype(np.uint8)
# grayscale images
if len(img1.shape) == 2:
img1 = np.tile(img1[...,None], (1, 1, 3))
img2 = np.tile(img2[...,None], (1, 1, 3))
else:
img1 = img1[..., :3]
img2 = img2[..., :3]
if self.augmentor is not None:
if self.sparse:
img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid)
else:
img1, img2, flow = self.augmentor(img1, img2, flow)
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
flow = torch.from_numpy(flow).permute(2, 0, 1).float()
if valid is not None:
valid = torch.from_numpy(valid)
else:
valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000)
return img1, img2, flow, valid.float()
def __rmul__(self, v):
self.flow_list = v * self.flow_list
self.image_list = v * self.image_list
return self
def __len__(self):
return len(self.image_list)
class MpiSintel(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'):
super(MpiSintel, self).__init__(aug_params)
flow_root = osp.join(root, split, 'flow')
image_root = osp.join(root, split, dstype)
if split == 'test':
self.is_test = True
for scene in os.listdir(image_root):
image_list = sorted(glob(osp.join(image_root, scene, '*.png')))
for i in range(len(image_list)-1):
self.image_list += [ [image_list[i], image_list[i+1]] ]
self.extra_info += [ (scene, i) ] # scene and frame_id
if split != 'test':
self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo')))
class FlyingChairs(FlowDataset):
def __init__(self, aug_params=None, split='train', root='xxxxx/FlyingChairs_release/data'):
super(FlyingChairs, self).__init__(aug_params)
images = sorted(glob(osp.join(root, '*.ppm')))
flows = sorted(glob(osp.join(root, '*.flo')))
assert (len(images)//2 == len(flows))
split_list = np.loadtxt('chairs_split.txt', dtype=np.int32)
for i in range(len(flows)):
xid = split_list[i]
if (split=='training' and xid==1) or (split=='validation' and xid==2):
self.flow_list += [ flows[i] ]
self.image_list += [ [images[2*i], images[2*i+1]] ]
class FlyingThings3D(FlowDataset):
def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'):
super(FlyingThings3D, self).__init__(aug_params)
for cam in ['left']:
for direction in ['into_future', 'into_past']:
image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*')))
image_dirs = sorted([osp.join(f, cam) for f in image_dirs])
flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*')))
flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs])
for idir, fdir in zip(image_dirs, flow_dirs):
images = sorted(glob(osp.join(idir, '*.png')) )
flows = sorted(glob(osp.join(fdir, '*.pfm')) )
for i in range(len(flows)-1):
if direction == 'into_future':
self.image_list += [ [images[i], images[i+1]] ]
self.flow_list += [ flows[i] ]
elif direction == 'into_past':
self.image_list += [ [images[i+1], images[i]] ]
self.flow_list += [ flows[i+1] ]
class KITTI(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/KITTI'):
super(KITTI, self).__init__(aug_params, sparse=True)
if split == 'testing':
self.is_test = True
root = osp.join(root, split)
images1 = sorted(glob(osp.join(root, 'image_2/*_10.png')))
images2 = sorted(glob(osp.join(root, 'image_2/*_11.png')))
for img1, img2 in zip(images1, images2):
frame_id = img1.split('/')[-1]
self.extra_info += [ [frame_id] ]
self.image_list += [ [img1, img2] ]
if split == 'training':
self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png')))
class HD1K(FlowDataset):
def __init__(self, aug_params=None, root='datasets/HD1k'):
super(HD1K, self).__init__(aug_params, sparse=True)
seq_ix = 0
while 1:
flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix)))
images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix)))
if len(flows) == 0:
break
for i in range(len(flows)-1):
self.flow_list += [flows[i]]
self.image_list += [ [images[i], images[i+1]] ]
seq_ix += 1
def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'):
""" Create the data loader for the corresponding trainign set """
if args.stage == 'chairs':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True}
train_dataset = FlyingChairs(aug_params, split='training')
elif args.stage == 'things':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True}
clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass')
final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass')
train_dataset = clean_dataset + final_dataset
elif args.stage == 'sintel':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True}
things = FlyingThings3D(aug_params, dstype='frames_cleanpass')
sintel_clean = MpiSintel(aug_params, split='training', dstype='clean')
sintel_final = MpiSintel(aug_params, split='training', dstype='final')
if TRAIN_DS == 'C+T+K+S+H':
kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True})
hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True})
train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things
elif TRAIN_DS == 'C+T+K/S':
train_dataset = 100*sintel_clean + 100*sintel_final + things
elif args.stage == 'kitti':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False}
train_dataset = KITTI(aug_params, split='training')
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size,
pin_memory=False, shuffle=True, num_workers=4, drop_last=True)
print('Training with %d image pairs' % len(train_dataset))
return train_loader
| 9,242 | 38.165254 | 111 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/raft.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from update import BasicUpdateBlock, SmallUpdateBlock
from extractor import BasicEncoder, SmallEncoder
from corr import CorrBlock, AlternateCorrBlock
from utils.utils import bilinear_sampler, coords_grid, upflow8
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class RAFT(nn.Module):
def __init__(self, args):
super(RAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if 'dropout' not in self.args:
self.args.dropout = 0
if 'alternate_corr' not in self.args:
self.args.alternate_corr = False
# feature network, context network, and update block
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
N, C, H, W = img.shape
coords0 = coords_grid(N, H//8, W//8, device=img.device)
coords1 = coords_grid(N, H//8, W//8, device=img.device)
# optical flow computed as difference: flow = coords1 - coords0
return coords0, coords1
def upsample_flow(self, flow, mask):
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(8 * flow, [3,3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, 8*H, 8*W)
def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
""" Estimate optical flow between pair of frames """
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
# run the context network
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
net, inp = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
coords0, coords1 = self.initialize_flow(image1)
if flow_init is not None:
coords1 = coords1 + flow_init
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1) # index correlation volume
flow = coords1 - coords0
with autocast(enabled=self.args.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
# F(t+1) = F(t) + \Delta(t)
coords1 = coords1 + delta_flow
# upsample predictions
if up_mask is None:
flow_up = upflow8(coords1 - coords0)
else:
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
flow_predictions.append(flow_up)
if test_mode:
return coords1 - coords0, flow_up
return flow_predictions
| 4,924 | 32.965517 | 102 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/sparse_extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import os.path as osp
sys.path.append(osp.abspath(osp.join(__file__, '../../../')))
from devkit.sparse_ops import SparseConv
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = SparseConv(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = SparseConv(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
SparseConv(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = SparseConv(in_planes, planes//4, kernel_size=1, padding=0)
self.conv2 = SparseConv(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
self.conv3 = SparseConv(planes//4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes//4)
self.norm2 = nn.BatchNorm2d(planes//4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes//4)
self.norm2 = nn.InstanceNorm2d(planes//4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
SparseConv(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = SparseConv(3, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = SparseConv(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, SparseConv):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = SparseConv(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = SparseConv(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, SparseConv):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
| 8,997 | 31.959707 | 94 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/utils/utils.py | import torch
import torch.nn.functional as F
import numpy as np
from scipy import interpolate
class InputPadder:
""" Pads images such that dimensions are divisible by 8 """
def __init__(self, dims, mode='sintel'):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8
pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8
if mode == 'sintel':
self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2]
else:
self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht]
def pad(self, *inputs):
return [F.pad(x, self._pad, mode='replicate') for x in inputs]
def unpad(self,x):
ht, wd = x.shape[-2:]
c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]]
return x[..., c[0]:c[1], c[2]:c[3]]
def forward_interpolate(flow):
flow = flow.detach().cpu().numpy()
dx, dy = flow[0], flow[1]
ht, wd = dx.shape
x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht))
x1 = x0 + dx
y1 = y0 + dy
x1 = x1.reshape(-1)
y1 = y1.reshape(-1)
dx = dx.reshape(-1)
dy = dy.reshape(-1)
valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
x1 = x1[valid]
y1 = y1[valid]
dx = dx[valid]
dy = dy[valid]
flow_x = interpolate.griddata(
(x1, y1), dx, (x0, y0), method='nearest', fill_value=0)
flow_y = interpolate.griddata(
(x1, y1), dy, (x0, y0), method='nearest', fill_value=0)
flow = np.stack([flow_x, flow_y], axis=0)
return torch.from_numpy(flow).float()
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
""" Wrapper for grid_sample, uses pixel coordinates """
H, W = img.shape[-2:]
xgrid, ygrid = coords.split([1,1], dim=-1)
xgrid = 2*xgrid/(W-1) - 1
ygrid = 2*ygrid/(H-1) - 1
grid = torch.cat([xgrid, ygrid], dim=-1)
img = F.grid_sample(img, grid, align_corners=True)
if mask:
mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
return img, mask.float()
return img
def coords_grid(batch, ht, wd, device):
coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))
coords = torch.stack(coords[::-1], dim=0).float()
return coords[None].repeat(batch, 1, 1, 1)
def upflow8(flow, mode='bilinear'):
new_size = (8 * flow.shape[2], 8 * flow.shape[3])
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
| 2,489 | 29 | 93 | py |
NM-sparsity | NM-sparsity-main/RAFT/core/utils/augmentor.py | import numpy as np
import random
import math
from PIL import Image
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import torch
from torchvision.transforms import ColorJitter
import torch.nn.functional as F
class FlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
""" Photometric augmentation """
# asymmetric
if np.random.rand() < self.asymmetric_color_aug_prob:
img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8)
img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8)
# symmetric
else:
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2, bounds=[50, 100]):
""" Occlusion augmentation """
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(bounds[0], bounds[1])
dy = np.random.randint(bounds[0], bounds[1])
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def spatial_transform(self, img1, img2, flow):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 8) / float(ht),
(self.crop_size[1] + 8) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = scale
scale_y = scale
if np.random.rand() < self.stretch_prob:
scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_x = np.clip(scale_x, min_scale, None)
scale_y = np.clip(scale_y, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = flow * [scale_x, scale_y]
if self.do_flip:
if np.random.rand() < self.h_flip_prob: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < self.v_flip_prob: # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0])
x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow
def __call__(self, img1, img2, flow):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow = self.spatial_transform(img1, img2, flow)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
return img1, img2, flow
class SparseFlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2):
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(50, 100)
dy = np.random.randint(50, 100)
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
ht, wd = flow.shape[:2]
coords = np.meshgrid(np.arange(wd), np.arange(ht))
coords = np.stack(coords, axis=-1)
coords = coords.reshape(-1, 2).astype(np.float32)
flow = flow.reshape(-1, 2).astype(np.float32)
valid = valid.reshape(-1).astype(np.float32)
coords0 = coords[valid>=1]
flow0 = flow[valid>=1]
ht1 = int(round(ht * fy))
wd1 = int(round(wd * fx))
coords1 = coords0 * [fx, fy]
flow1 = flow0 * [fx, fy]
xx = np.round(coords1[:,0]).astype(np.int32)
yy = np.round(coords1[:,1]).astype(np.int32)
v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
xx = xx[v]
yy = yy[v]
flow1 = flow1[v]
flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
valid_img = np.zeros([ht1, wd1], dtype=np.int32)
flow_img[yy, xx] = flow1
valid_img[yy, xx] = 1
return flow_img, valid_img
def spatial_transform(self, img1, img2, flow, valid):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 1) / float(ht),
(self.crop_size[1] + 1) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = np.clip(scale, min_scale, None)
scale_y = np.clip(scale, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
if self.do_flip:
if np.random.rand() < 0.5: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
valid = valid[:, ::-1]
margin_y = 20
margin_x = 50
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow, valid
def __call__(self, img1, img2, flow, valid):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
valid = np.ascontiguousarray(valid)
return img1, img2, flow, valid
| 9,108 | 35.878543 | 97 | py |
NM-sparsity | NM-sparsity-main/RAFT/alt_cuda_corr/setup.py | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='correlation',
ext_modules=[
CUDAExtension('alt_cuda_corr',
sources=['correlation.cpp', 'correlation_kernel.cu'],
extra_compile_args={'cxx': [], 'nvcc': ['-O3']}),
],
cmdclass={
'build_ext': BuildExtension
})
| 381 | 22.875 | 67 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/code/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.4/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.4/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.4/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.3/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.3/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.3/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.3/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.3/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.3/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.2/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.2/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.2/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.1/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.1/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.1/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.0/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.0/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.0/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.6/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.6/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.6/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.6/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.6/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.6/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.5/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.5/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.5/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.7/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.7/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.7/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.1/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.1/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.1/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.6/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.6/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_0.6/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.7/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.7/scipy_optimizer.py | import numpy, copy, scipy, typing, numbers
from tequila import BitString, BitNumbering, BitStringLSB
from tequila.utils.keymap import KeyMapRegisterToSubregister
from tequila.circuit.compiler import change_basis
from tequila.utils import to_float
import tequila as tq
from tequila.objective import Objective
from tequila.optimizers.optimizer_scipy import OptimizerSciPy, SciPyResults
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from tequila.circuit.noise import NoiseModel
#from tequila.optimizers._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from vqe_utils import *
class _EvalContainer:
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
Attributes
---------
objective:
the objective to evaluate.
param_keys:
the dictionary mapping parameter keys to positions in a numpy array.
samples:
the number of samples to evaluate objective with.
save_history:
whether or not to save, in a history, information about each time __call__ occurs.
print_level
dictates the verbosity of printing during call.
N:
the length of param_keys.
history:
if save_history, a list of energies received from every __call__
history_angles:
if save_history, a list of angles sent to __call__.
"""
def __init__(self, Hamiltonian, unitary, param_keys, Ham_derivatives= None, Eval=None, passive_angles=None, samples=1024, save_history=True,
print_level: int = 3):
self.Hamiltonian = Hamiltonian
self.unitary = unitary
self.samples = samples
self.param_keys = param_keys
self.N = len(param_keys)
self.save_history = save_history
self.print_level = print_level
self.passive_angles = passive_angles
self.Eval = Eval
self.infostring = None
self.Ham_derivatives = Ham_derivatives
if save_history:
self.history = []
self.history_angles = []
def __call__(self, p, *args, **kwargs):
"""
call a wrapped objective.
Parameters
----------
p: numpy array:
Parameters with which to call the objective.
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
angles = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(self.N):
if self.param_keys[i] in self.unitary.extract_variables():
angles[self.param_keys[i]] = p[i]
else:
angles[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
angles = {**angles, **self.passive_angles}
vars = format_variable_dictionary(angles)
Hamiltonian = self.Hamiltonian(vars)
#print(Hamiltonian)
#print(self.unitary)
#print(vars)
Expval = tq.ExpectationValue(H=Hamiltonian, U=self.unitary)
#print(Expval)
E = tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
self.infostring = "{:15} : {} expectationvalues\n".format("Objective", Expval.count_expectationvalues())
if self.print_level > 2:
print("E={:+2.8f}".format(E), " angles=", angles, " samples=", self.samples)
elif self.print_level > 1:
print("E={:+2.8f}".format(E))
if self.save_history:
self.history.append(E)
self.history_angles.append(angles)
return complex(E) # jax types confuses optimizers
class _GradContainer(_EvalContainer):
"""
Overwrite the call function
Container Class to access scipy and keep the optimization history.
This class is used by the SciPy optimizer and should not be used elsewhere.
see _EvalContainer for details.
"""
def __call__(self, p, *args, **kwargs):
"""
call the wrapped qng.
Parameters
----------
p: numpy array:
Parameters with which to call gradient
args
kwargs
Returns
-------
numpy.array:
value of self.objective with p translated into variables, as a numpy array.
"""
Ham_derivatives = self.Ham_derivatives
Hamiltonian = self.Hamiltonian
unitary = self.unitary
dE_vec = numpy.zeros(self.N)
memory = dict()
#variables = dict((self.param_keys[i], p[i]) for i in range(len(self.param_keys)))
variables = {}#dict((self.param_keys[i], p[i]) for i in range(self.N))
for i in range(len(self.param_keys)):
if self.param_keys[i] in self.unitary.extract_variables():
variables[self.param_keys[i]] = p[i]
else:
variables[self.param_keys[i]] = complex(p[i])
if self.passive_angles is not None:
variables = {**variables, **self.passive_angles}
vars = format_variable_dictionary(variables)
expvals = 0
for i in range(self.N):
derivative = 0.0
if self.param_keys[i] in list(unitary.extract_variables()):
Ham = Hamiltonian(vars)
Expval = tq.ExpectationValue(H=Ham, U=unitary)
temp_derivative = tq.compile(objective = tq.grad(objective = Expval, variable = self.param_keys[i]),backend='qulacs')
expvals += temp_derivative.count_expectationvalues()
derivative += temp_derivative
if self.param_keys[i] in list(Ham_derivatives.keys()):
#print(self.param_keys[i])
Ham = Ham_derivatives[self.param_keys[i]]
Ham = convert_PQH_to_tq_QH(Ham)
H = Ham(vars)
#print(H)
#raise Exception("testing")
Expval = tq.ExpectationValue(H=H, U=unitary)
expvals += Expval.count_expectationvalues()
derivative += tq.simulate(Expval, vars, backend='qulacs', samples=self.samples)
#print(derivative)
#print(type(H))
if isinstance(derivative, float) or isinstance(derivative, numpy.complex64) :
dE_vec[i] = derivative
else:
dE_vec[i] = derivative(variables=variables, samples=self.samples)
memory[self.param_keys[i]] = dE_vec[i]
self.infostring = "{:15} : {} expectationvalues\n".format("gradient", expvals)
self.history.append(memory)
return numpy.asarray(dE_vec, dtype=numpy.complex64)
class optimize_scipy(OptimizerSciPy):
"""
overwrite the expectation and gradient container objects
"""
def initialize_variables(self, all_variables, initial_values, variables):
"""
Convenience function to format the variables of some objective recieved in calls to optimzers.
Parameters
----------
objective: Objective:
the objective being optimized.
initial_values: dict or string:
initial values for the variables of objective, as a dictionary.
if string: can be `zero` or `random`
if callable: custom function that initializes when keys are passed
if None: random initialization between 0 and 2pi (not recommended)
variables: list:
the variables being optimized over.
Returns
-------
tuple:
active_angles, a dict of those variables being optimized.
passive_angles, a dict of those variables NOT being optimized.
variables: formatted list of the variables being optimized.
"""
# bring into right format
variables = format_variable_list(variables)
initial_values = format_variable_dictionary(initial_values)
all_variables = all_variables
if variables is None:
variables = all_variables
if initial_values is None:
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
elif hasattr(initial_values, "lower"):
if initial_values.lower() == "zero":
initial_values = {k:0.0 for k in all_variables}
elif initial_values.lower() == "random":
initial_values = {k: numpy.random.uniform(0, 2 * numpy.pi) for k in all_variables}
else:
raise TequilaOptimizerException("unknown initialization instruction: {}".format(initial_values))
elif callable(initial_values):
initial_values = {k: initial_values(k) for k in all_variables}
elif isinstance(initial_values, numbers.Number):
initial_values = {k: initial_values for k in all_variables}
else:
# autocomplete initial values, warn if you did
detected = False
for k in all_variables:
if k not in initial_values:
initial_values[k] = 0.0
detected = True
if detected and not self.silent:
warnings.warn("initial_variables given but not complete: Autocompleted with zeroes", TequilaWarning)
active_angles = {}
for v in variables:
active_angles[v] = initial_values[v]
passive_angles = {}
for k, v in initial_values.items():
if k not in active_angles.keys():
passive_angles[k] = v
return active_angles, passive_angles, variables
def __call__(self, Hamiltonian, unitary,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
Perform optimization using scipy optimizers.
Parameters
----------
objective: Objective:
the objective to optimize.
variables: list, optional:
the variables of objective to optimize. If None: optimize all.
initial_values: dict, optional:
a starting point from which to begin optimization. Will be generated if None.
gradient: optional:
Information or object used to calculate the gradient of objective. Defaults to None: get analytically.
hessian: optional:
Information or object used to calculate the hessian of objective. Defaults to None: get analytically.
reset_history: bool: Default = True:
whether or not to reset all history before optimizing.
args
kwargs
Returns
-------
ScipyReturnType:
the results of optimization.
"""
H = convert_PQH_to_tq_QH(Hamiltonian)
Ham_variables, Ham_derivatives = H._construct_derivatives()
#print("hamvars",Ham_variables)
all_variables = copy.deepcopy(Ham_variables)
#print(all_variables)
for var in unitary.extract_variables():
all_variables.append(var)
#print(all_variables)
infostring = "{:15} : {}\n".format("Method", self.method)
#infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(all_variables, initial_values, variables)
#print(active_angles, passive_angles, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
#print(param_keys, param_values)
# do the compilation here to avoid costly recompilation during the optimization
#compiled_objective = self.compile_objective(objective=objective, *args, **kwargs)
E = _EvalContainer(Hamiltonian = H,
unitary = unitary,
Eval=None,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
E.print_level = 0
(E(param_values))
E.print_level = self.print_level
infostring += E.infostring
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(gradient,dict):
if gradient['method'] == 'qng':
func = gradient['function']
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective,func=func, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
dE =_GradContainer(Ham_derivatives = Ham_derivatives,
unitary = unitary,
Hamiltonian = H,
Eval= E,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level)
dE.print_level = 0
(dE(param_values))
dE.print_level = self.print_level
infostring += dE.infostring
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
optimizer_instance = self
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
if 'callback' in optimizer_instance.kwargs:
optimizer_instance.kwargs['callback'](E.history_angles[-1])
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
# some scipy methods always give back the last value and not the minimum (e.g. cobyla)
ea = sorted(zip(E.history, E.history_angles), key=lambda x: x[0])
E_final = ea[0][0]
angles_final = ea[0][1] #dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyResults(energy=E_final, history=self.history, variables=format_variable_dictionary(angles_final), scipy_result=res)
def minimize(Hamiltonian, unitary,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
device: str = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyResults:
"""
calls the local optimize_scipy scipy funtion instead and pass down the objective construction
down
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : Default value = None):
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None], optional:
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real], optional:
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable], optional:
List of Variables to optimize
samples: int, optional:
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int : (Default value = 100):
max iters to use.
backend: str, optional:
Simulator backend, will be automatically chosen if set to None
backend_options: dict, optional:
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel, optional:
a NoiseModel to apply to all expectation values in the objective.
method: str : (Default = "BFGS"):
Optimization method (see scipy documentation, or 'available methods')
tol: float : (Default = 1.e-3):
Convergence tolerance for optimization (see scipy documentation)
method_options: dict, optional:
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]], optional:
bounds for the variables (see scipy documentation)
method_constraints: optional:
(see scipy documentation
silent: bool :
No printout if True
save_history: bool:
Save the history throughout the optimization
Returns
-------
SciPyReturnType:
the results of optimization
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = optimize_scipy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
device=device,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(Hamiltonian, unitary,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 24,489 | 42.732143 | 144 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_2.7/grad_hacked.py | from tequila.circuit.compiler import CircuitCompiler
from tequila.objective.objective import Objective, ExpectationValueImpl, Variable, \
assign_variable, identity, FixedVariable
from tequila import TequilaException
from tequila.objective import QTensor
from tequila.simulators.simulator_api import compile
import typing
from numpy import vectorize
from tequila.autograd_imports import jax, __AUTOGRAD__BACKEND__
def grad(objective: typing.Union[Objective, QTensor], variable: Variable = None, no_compile=False, *args, **kwargs):
'''
wrapper function for getting the gradients of Objectives,ExpectationValues, Unitaries (including single gates), and Transforms.
:param obj (QCircuit,ParametrizedGateImpl,Objective,ExpectationValue,Transform,Variable): structure to be differentiated
:param variables (list of Variable): parameter with respect to which obj should be differentiated.
default None: total gradient.
return: dictionary of Objectives, if called on gate, circuit, exp.value, or objective; if Variable or Transform, returns number.
'''
if variable is None:
# None means that all components are created
variables = objective.extract_variables()
result = {}
if len(variables) == 0:
raise TequilaException("Error in gradient: Objective has no variables")
for k in variables:
assert (k is not None)
result[k] = grad(objective, k, no_compile=no_compile)
return result
else:
variable = assign_variable(variable)
if isinstance(objective, QTensor):
f = lambda x: grad(objective=x, variable=variable, *args, **kwargs)
ff = vectorize(f)
return ff(objective)
if variable not in objective.extract_variables():
return Objective()
if no_compile:
compiled = objective
else:
compiler = CircuitCompiler(multitarget=True,
trotterized=True,
hadamard_power=True,
power=True,
controlled_phase=True,
controlled_rotation=True,
gradient_mode=True)
compiled = compiler(objective, variables=[variable])
if variable not in compiled.extract_variables():
raise TequilaException("Error in taking gradient. Objective does not depend on variable {} ".format(variable))
if isinstance(objective, ExpectationValueImpl):
return __grad_expectationvalue(E=objective, variable=variable)
elif objective.is_expectationvalue():
return __grad_expectationvalue(E=compiled.args[-1], variable=variable)
elif isinstance(compiled, Objective) or (hasattr(compiled, "args") and hasattr(compiled, "transformation")):
return __grad_objective(objective=compiled, variable=variable)
else:
raise TequilaException("Gradient not implemented for other types than ExpectationValue and Objective.")
def __grad_objective(objective: Objective, variable: Variable):
args = objective.args
transformation = objective.transformation
dO = None
processed_expectationvalues = {}
for i, arg in enumerate(args):
if __AUTOGRAD__BACKEND__ == "jax":
df = jax.grad(transformation, argnums=i, holomorphic=True)
elif __AUTOGRAD__BACKEND__ == "autograd":
df = jax.grad(transformation, argnum=i)
else:
raise TequilaException("Can't differentiate without autograd or jax")
# We can detect one simple case where the outer derivative is const=1
if transformation is None or transformation == identity:
outer = 1.0
else:
outer = Objective(args=args, transformation=df)
if hasattr(arg, "U"):
# save redundancies
if arg in processed_expectationvalues:
inner = processed_expectationvalues[arg]
else:
inner = __grad_inner(arg=arg, variable=variable)
processed_expectationvalues[arg] = inner
else:
# this means this inner derivative is purely variable dependent
inner = __grad_inner(arg=arg, variable=variable)
if inner == 0.0:
# don't pile up zero expectationvalues
continue
if dO is None:
dO = outer * inner
else:
dO = dO + outer * inner
if dO is None:
raise TequilaException("caught None in __grad_objective")
return dO
# def __grad_vector_objective(objective: Objective, variable: Variable):
# argsets = objective.argsets
# transformations = objective._transformations
# outputs = []
# for pos in range(len(objective)):
# args = argsets[pos]
# transformation = transformations[pos]
# dO = None
#
# processed_expectationvalues = {}
# for i, arg in enumerate(args):
# if __AUTOGRAD__BACKEND__ == "jax":
# df = jax.grad(transformation, argnums=i)
# elif __AUTOGRAD__BACKEND__ == "autograd":
# df = jax.grad(transformation, argnum=i)
# else:
# raise TequilaException("Can't differentiate without autograd or jax")
#
# # We can detect one simple case where the outer derivative is const=1
# if transformation is None or transformation == identity:
# outer = 1.0
# else:
# outer = Objective(args=args, transformation=df)
#
# if hasattr(arg, "U"):
# # save redundancies
# if arg in processed_expectationvalues:
# inner = processed_expectationvalues[arg]
# else:
# inner = __grad_inner(arg=arg, variable=variable)
# processed_expectationvalues[arg] = inner
# else:
# # this means this inner derivative is purely variable dependent
# inner = __grad_inner(arg=arg, variable=variable)
#
# if inner == 0.0:
# # don't pile up zero expectationvalues
# continue
#
# if dO is None:
# dO = outer * inner
# else:
# dO = dO + outer * inner
#
# if dO is None:
# dO = Objective()
# outputs.append(dO)
# if len(outputs) == 1:
# return outputs[0]
# return outputs
def __grad_inner(arg, variable):
'''
a modified loop over __grad_objective, which gets derivatives
all the way down to variables, return 1 or 0 when a variable is (isnt) identical to var.
:param arg: a transform or variable object, to be differentiated
:param variable: the Variable with respect to which par should be differentiated.
:ivar var: the string representation of variable
'''
assert (isinstance(variable, Variable))
if isinstance(arg, Variable):
if arg == variable:
return 1.0
else:
return 0.0
elif isinstance(arg, FixedVariable):
return 0.0
elif isinstance(arg, ExpectationValueImpl):
return __grad_expectationvalue(arg, variable=variable)
elif hasattr(arg, "abstract_expectationvalue"):
E = arg.abstract_expectationvalue
dE = __grad_expectationvalue(E, variable=variable)
return compile(dE, **arg._input_args)
else:
return __grad_objective(objective=arg, variable=variable)
def __grad_expectationvalue(E: ExpectationValueImpl, variable: Variable):
'''
implements the analytic partial derivative of a unitary as it would appear in an expectation value. See the paper.
:param unitary: the unitary whose gradient should be obtained
:param variables (list, dict, str): the variables with respect to which differentiation should be performed.
:return: vector (as dict) of dU/dpi as Objective (without hamiltonian)
'''
hamiltonian = E.H
unitary = E.U
if not (unitary.verify()):
raise TequilaException("error in grad_expectationvalue unitary is {}".format(unitary))
# fast return if possible
if variable not in unitary.extract_variables():
return 0.0
param_gates = unitary._parameter_map[variable]
dO = Objective()
for idx_g in param_gates:
idx, g = idx_g
dOinc = __grad_shift_rule(unitary, g, idx, variable, hamiltonian)
dO += dOinc
assert dO is not None
return dO
def __grad_shift_rule(unitary, g, i, variable, hamiltonian):
'''
function for getting the gradients of directly differentiable gates. Expects precompiled circuits.
:param unitary: QCircuit: the QCircuit object containing the gate to be differentiated
:param g: a parametrized: the gate being differentiated
:param i: Int: the position in unitary at which g appears
:param variable: Variable or String: the variable with respect to which gate g is being differentiated
:param hamiltonian: the hamiltonian with respect to which unitary is to be measured, in the case that unitary
is contained within an ExpectationValue
:return: an Objective, whose calculation yields the gradient of g w.r.t variable
'''
# possibility for overwride in custom gate construction
if hasattr(g, "shifted_gates"):
inner_grad = __grad_inner(g.parameter, variable)
shifted = g.shifted_gates()
dOinc = Objective()
for x in shifted:
w, g = x
Ux = unitary.replace_gates(positions=[i], circuits=[g])
wx = w * inner_grad
Ex = Objective.ExpectationValue(U=Ux, H=hamiltonian)
dOinc += wx * Ex
return dOinc
else:
raise TequilaException('No shift found for gate {}\nWas the compiler called?'.format(g))
| 9,886 | 38.548 | 132 | py |
partitioning-with-cliffords | partitioning-with-cliffords-main/data/h2/h2_bl_1.2/my_mpo.py | import numpy as np
import tensornetwork as tn
from tensornetwork.backends.abstract_backend import AbstractBackend
tn.set_default_backend("pytorch")
#tn.set_default_backend("numpy")
from typing import List, Union, Text, Optional, Any, Type
Tensor = Any
import tequila as tq
import torch
EPS = 1e-12
class SubOperator:
"""
This is just a helper class to store coefficient,
operators and positions in an intermediate format
"""
def __init__(self,
coefficient: float,
operators: List,
positions: List
):
self._coefficient = coefficient
self._operators = operators
self._positions = positions
@property
def coefficient(self):
return self._coefficient
@property
def operators(self):
return self._operators
@property
def positions(self):
return self._positions
class MPOContainer:
"""
Class that handles the MPO. Is able to set values at certain positions,
update containers (wannabe-equivalent to dynamic arrays) and compress the MPO
"""
def __init__(self,
n_qubits: int,
):
self.n_qubits = n_qubits
self.container = [ np.zeros((1,1,2,2), dtype=np.complex)
for q in range(self.n_qubits) ]
def get_dim(self):
""" Returns max dimension of container """
d = 1
for q in range(len(self.container)):
d = max(d, self.container[q].shape[0])
return d
def set_tensor(self, qubit: int, set_at: list, add_operator: Union[np.ndarray, float]):
"""
set_at: where to put data
"""
# Set a matrix
if len(set_at) == 2:
self.container[qubit][set_at[0],set_at[1],:,:] = add_operator[:,:]
# Set specific values
elif len(set_at) == 4:
self.container[qubit][set_at[0],set_at[1],set_at[2],set_at[3]] =\
add_operator
else:
raise Exception("set_at needs to be either of length 2 or 4")
def update_container(self, qubit: int, update_dir: list, add_operator: np.ndarray):
"""
This should mimick a dynamic array
update_dir: e.g. [1,1,0,0] -> extend dimension along where there's a 1
the last two dimensions are always 2x2 only
"""
old_shape = self.container[qubit].shape
# print(old_shape)
if not len(update_dir) == 4:
if len(update_dir) == 2:
update_dir += [0, 0]
else:
raise Exception("update_dir needs to be either of length 2 or 4")
if update_dir[2] or update_dir[3]:
raise Exception("Last two dims must be zero.")
new_shape = tuple(update_dir[i]+old_shape[i] for i in range(len(update_dir)))
new_tensor = np.zeros(new_shape, dtype=np.complex)
# Copy old values
new_tensor[:old_shape[0],:old_shape[1],:,:] = self.container[qubit][:,:,:,:]
# Add new values
new_tensor[new_shape[0]-1,new_shape[1]-1,:,:] = add_operator[:,:]
# Overwrite container
self.container[qubit] = new_tensor
def compress_mpo(self):
"""
Compression of MPO via SVD
"""
n_qubits = len(self.container)
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] =\
self.container[q].reshape((my_shape[0], my_shape[1], -1))
# Go forwards
for q in range(n_qubits-1):
# Apply permutation [0 1 2] -> [0 2 1]
my_tensor = np.swapaxes(self.container[q], 1, 2)
my_tensor = my_tensor.reshape((-1, my_tensor.shape[2]))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors (@ = np.matmul)
u = u @ s
vh = s @ vh
# Apply permutation [0 1 2] -> [0 2 1]
u = u.reshape((self.container[q].shape[0],\
self.container[q].shape[2], -1))
self.container[q] = np.swapaxes(u, 1, 2)
self.container[q+1] = tn.ncon([vh, self.container[q+1]], [(-1, 1),(1, -2, -3)])
# Go backwards
for q in range(n_qubits-1, 0, -1):
my_tensor = self.container[q]
my_tensor = my_tensor.reshape((self.container[q].shape[0], -1))
# full_matrices flag corresponds to 'econ' -> no zero-singular values
u, s, vh = np.linalg.svd(my_tensor, full_matrices=False)
# Count the non-zero singular values
num_nonzeros = len(np.argwhere(s>EPS))
# Construct matrix from square root of singular values
s = np.diag(np.sqrt(s[:num_nonzeros]))
u = u[:,:num_nonzeros]
vh = vh[:num_nonzeros,:]
# Distribute weights to left- and right singular vectors
u = u @ s
vh = s @ vh
self.container[q] = np.reshape(vh, (num_nonzeros,
self.container[q].shape[1],
self.container[q].shape[2]))
self.container[q-1] = tn.ncon([self.container[q-1], u], [(-1, 1, -3),(1, -2)])
for q in range(n_qubits):
my_shape = self.container[q].shape
self.container[q] = self.container[q].reshape((my_shape[0],\
my_shape[1],2,2))
# TODO maybe make subclass of tn.FiniteMPO if it makes sense
#class my_MPO(tn.FiniteMPO):
class MyMPO:
"""
Class building up on tensornetwork FiniteMPO to handle
MPO-Hamiltonians
"""
def __init__(self,
hamiltonian: Union[tq.QubitHamiltonian, Text],
# tensors: List[Tensor],
backend: Optional[Union[AbstractBackend, Text]] = None,
n_qubits: Optional[int] = None,
name: Optional[Text] = None,
maxdim: Optional[int] = 10000) -> None:
# TODO: modifiy docstring
"""
Initialize a finite MPO object
Args:
tensors: The mpo tensors.
backend: An optional backend. Defaults to the defaulf backend
of TensorNetwork.
name: An optional name for the MPO.
"""
self.hamiltonian = hamiltonian
self.maxdim = maxdim
if n_qubits:
self._n_qubits = n_qubits
else:
self._n_qubits = self.get_n_qubits()
@property
def n_qubits(self):
return self._n_qubits
def make_mpo_from_hamiltonian(self):
intermediate = self.openfermion_to_intermediate()
# for i in range(len(intermediate)):
# print(intermediate[i].coefficient)
# print(intermediate[i].operators)
# print(intermediate[i].positions)
self.mpo = self.intermediate_to_mpo(intermediate)
def openfermion_to_intermediate(self):
# Here, have either a QubitHamiltonian or a file with a of-operator
# Start with Qubithamiltonian
def get_pauli_matrix(string):
pauli_matrices = {
'I': np.array([[1, 0], [0, 1]], dtype=np.complex),
'Z': np.array([[1, 0], [0, -1]], dtype=np.complex),
'X': np.array([[0, 1], [1, 0]], dtype=np.complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=np.complex)
}
return pauli_matrices[string.upper()]
intermediate = []
first = True
# Store all paulistrings in intermediate format
for paulistring in self.hamiltonian.paulistrings:
coefficient = paulistring.coeff
# print(coefficient)
operators = []
positions = []
# Only first one should be identity -> distribute over all
if first and not paulistring.items():
positions += []
operators += []
first = False
elif not first and not paulistring.items():
raise Exception("Only first Pauli should be identity.")
# Get operators and where they act
for k,v in paulistring.items():
positions += [k]
operators += [get_pauli_matrix(v)]
tmp_op = SubOperator(coefficient=coefficient, operators=operators, positions=positions)
intermediate += [tmp_op]
# print("len intermediate = num Pauli strings", len(intermediate))
return intermediate
def build_single_mpo(self, intermediate, j):
# Set MPO Container
n_qubits = self._n_qubits
mpo = MPOContainer(n_qubits=n_qubits)
# ***********************************************************************
# Set first entries (of which we know that they are 2x2-matrices)
# Typically, this is an identity
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
if not q in my_positions:
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
elif q in my_positions:
my_pos_index = my_positions.index(q)
mpo.set_tensor(qubit=q, set_at=[0,0],
add_operator=np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# ***********************************************************************
# All other entries
# while (j smaller than number of intermediates left) and mpo.dim() <= self.maxdim
# Re-write this based on positions keyword!
j += 1
while j < len(intermediate) and mpo.get_dim() < self.maxdim:
# """
my_coefficient = intermediate[j].coefficient
my_positions = intermediate[j].positions
my_operators = intermediate[j].operators
for q in range(n_qubits):
# It is guaranteed that every index appears only once in positions
if q == 0:
update_dir = [0,1]
elif q == n_qubits-1:
update_dir = [1,0]
else:
update_dir = [1,1]
# If there's an operator on my position, add that
if q in my_positions:
my_pos_index = my_positions.index(q)
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
my_operators[my_pos_index])
# Else add an identity
else:
mpo.update_container(qubit=q, update_dir=update_dir,
add_operator=
np.complex(my_coefficient)**(1/n_qubits)*
np.eye(2))
if not j % 100:
mpo.compress_mpo()
#print("\t\tAt iteration ", j, " MPO has dimension ", mpo.get_dim())
j += 1
mpo.compress_mpo()
#print("\tAt final iteration ", j-1, " MPO has dimension ", mpo.get_dim())
return mpo, j
def intermediate_to_mpo(self, intermediate):
n_qubits = self._n_qubits
# TODO Change to multiple MPOs
mpo_list = []
j_global = 0
num_mpos = 0 # Start with 0, then final one is correct
while j_global < len(intermediate):
current_mpo, j_global = self.build_single_mpo(intermediate, j_global)
mpo_list += [current_mpo]
num_mpos += 1
return mpo_list
def construct_matrix(self):
# TODO extend to lists of MPOs
''' Recover matrix, e.g. to compare with Hamiltonian that we get from tq '''
mpo = self.mpo
# Contract over all bond indices
# mpo.container has indices [bond, bond, physical, physical]
n_qubits = self._n_qubits
d = int(2**(n_qubits/2))
first = True
H = None
#H = np.zeros((d,d,d,d), dtype='complex')
# Define network nodes
# | | | |
# -O--O--...--O--O-
# | | | |
for m in mpo:
assert(n_qubits == len(m.container))
nodes = [tn.Node(m.container[q], name=str(q))
for q in range(n_qubits)]
# Connect network (along double -- above)
for q in range(n_qubits-1):
nodes[q][1] ^ nodes[q+1][0]
# Collect dangling edges (free indices)
edges = []
# Left dangling edge
edges += [nodes[0].get_edge(0)]
# Right dangling edge
edges += [nodes[-1].get_edge(1)]
# Upper dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(2)]
# Lower dangling edges
for q in range(n_qubits):
edges += [nodes[q].get_edge(3)]
# Contract between all nodes along non-dangling edges
res = tn.contractors.auto(nodes, output_edge_order=edges)
# Reshape to get tensor of order 4 (get rid of left- and right open indices
# and combine top&bottom into one)
if isinstance(res.tensor, torch.Tensor):
H_m = res.tensor.numpy()
if not first:
H += H_m
else:
H = H_m
first = False
return H.reshape((d,d,d,d))
| 14,354 | 36.480418 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.