repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/train_utils.py | import argparse
import os
import logging
import numpy as np
import random
import sys
import torch
from datetime import datetime
from torch.serialization import default_restore_location
def add_logging_arguments(parser):
parser.add_argument("--seed", default=0, type=int, help="random number generator seed")
parser.add_argument("--output-dir", default="experiments", help="path to experiment directories")
parser.add_argument("--experiment", default=None, help="experiment name to be used with Tensorboard")
parser.add_argument("--resume-training", action="store_true", help="whether to resume training")
parser.add_argument("--restore-mode", default=None, help="Either 'best' 'last' or '\path\to\checkpoint\dir'")
parser.add_argument("--restore-file", default=None, help="filename to load checkpoint")
parser.add_argument("--test-mode", default=None, help="Evaluate on which test set.")
parser.add_argument("--no-save", action="store_true", help="don't save models or checkpoints")
parser.add_argument("--step-checkpoints", action="store_true", help="store all step checkpoints")
parser.add_argument("--no-log", action="store_true", help="don't save logs to file or Tensorboard directory")
parser.add_argument("--log-interval", type=int, default=100, help="log every N steps")
parser.add_argument("--no-visual", action="store_true", help="don't use Tensorboard")
parser.add_argument("--visual-interval", type=int, default=100, help="log every N steps")
parser.add_argument("--no-progress", action="store_true", help="don't use progress bar")
parser.add_argument("--draft", action="store_true", help="save experiment results to draft directory")
parser.add_argument("--dry-run", action="store_true", help="no log, no save, no visualization")
return parser
def init_logging(args):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
if not args.no_log and args.log_file is not None:
mode = "a" if args.resume_training else "w"
handlers.append(logging.FileHandler(args.log_file, mode=mode))
logging.basicConfig(handlers=handlers, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
logging.info("Arguments: {}".format(vars(args)))
def setup_experiment(args):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.dry_run:
args.no_save = args.no_log = args.no_visual = True
return
args.experiment = args.experiment or f"{args.model.replace('_', '-')}" #unet
args.experiment = "-".join([args.experiment, 'std'+str(args.noise_std)])
if not args.resume_training:
args.experiment = "-".join([args.experiment, datetime.now().strftime("%b-%d-%H:%M:%S")])
args.experiment = "-".join([args.experiment, 'tr'+str(args.train_size)])
args.experiment_dir = os.path.join(args.output_dir, args.experiment)
os.makedirs(args.experiment_dir, exist_ok=True) #dir is only created if it not already exists. If it already exists no error is raised
if not args.no_save:
args.checkpoint_dir = os.path.join(args.experiment_dir, "checkpoints")
os.makedirs(args.checkpoint_dir, exist_ok=True)
if not args.no_log:
args.log_dir = os.path.join(args.experiment_dir, "logs")
os.makedirs(args.log_dir, exist_ok=True)
args.log_file = os.path.join(args.log_dir, "train.log")
def save_checkpoint(args, step, epoch, model, optimizer=None, scheduler=None, score=None, mode="min"):
assert mode == "min" or mode == "max"
last_step = getattr(save_checkpoint, "last_step", -1) #-1 as default argument that is given if attribute does not exist
save_checkpoint.last_step = max(last_step, step)
default_score = float("inf") if mode == "min" else float("-inf")
best_score = getattr(save_checkpoint, "best_score", default_score)
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
save_checkpoint.best_step = step
save_checkpoint.best_epoch = epoch
save_checkpoint.best_score = score
if not args.no_save and step % args.save_interval == 0:
os.makedirs(args.checkpoint_dir, exist_ok=True)
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"step": step,
"epoch": epoch,
"score": score,
"last_step": save_checkpoint.last_step,
"best_step": save_checkpoint.best_step,
"best_epoch": save_checkpoint.best_epoch,
"best_score": getattr(save_checkpoint, "best_score", None),
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
"args": argparse.Namespace(**{k: v for k, v in vars(args).items() if not callable(v)}),
}
if args.step_checkpoints:
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint{}.pt".format(step)))
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint_best.pt"))
if step > last_step:
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint_last.pt"))
def load_checkpoint(args, model=None, optimizer=None, scheduler=None):
if args.restore_file is not None and os.path.isfile(args.restore_file):
print('restoring model..')
state_dict = torch.load(args.restore_file, map_location=lambda s, l: default_restore_location(s, "cpu"))
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
if "best_score" in state_dict:
save_checkpoint.best_score = state_dict["best_score"]
save_checkpoint.best_step = state_dict["best_step"]
if "last_step" in state_dict:
save_checkpoint.last_step = state_dict["last_step"]
if model is not None and state_dict.get("model", None) is not None:
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
if optimizer is not None and state_dict.get("optimizer", None) is not None:
for o, state in zip(optimizer, state_dict["optimizer"]):
o.load_state_dict(state)
if scheduler is not None and state_dict.get("scheduler", None) is not None:
for s, state in zip(scheduler, state_dict["scheduler"]):
milestones = s.milestones
state['milestones'] = milestones
s.load_state_dict(state)
s.milestones = milestones
logging.info("Loaded checkpoint {}".format(args.restore_file))
return state_dict
| 7,573 | 52.716312 | 138 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/main_function_helpers.py | import torch
import argparse
import os
import yaml
import pathlib
import pickle
import logging
import sys
import time
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import torchvision
import glob
from torch.serialization import default_restore_location
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
from tensorboard.backend.event_processing import event_accumulator
import utils
import models
from utils.data_helpers.load_datasets_helpers import *
from utils.meters import *
from utils.progress_bar import *
from utils.noise_model import get_noise
from utils.metrics import ssim,psnr
from utils.util_calculate_psnr_ssim import calculate_psnr,calculate_ssim
from utils.test_metrics import *
def load_model(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
checkpoint_path = glob.glob(args.output_dir +'/unet*')
if len(checkpoint_path) != 1:
raise ValueError("There is either no or more than one model to load")
checkpoint_path = pathlib.Path(checkpoint_path[0] + f"/checkpoints/checkpoint_{args.restore_mode}.pt")
state_dict = torch.load(checkpoint_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
args = argparse.Namespace(**{ **vars(state_dict["args"]), "no_log": True})
#model = models.build_model(args).to(device)
model = models.unet_fastMRI(
in_chans=args.in_chans,
chans = args.chans,
num_pool_layers = args.num_pool_layers,
drop_prob = 0.0,
residual_connection = args.residual,
).to(device)
model.load_state_dict(state_dict["model"][0])
model.eval()
return model
def cli_main_test(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
model = load_model(args)
# evaluate test performance over following noise range
noise_std_range = np.linspace(args.test_noise_std_min, args.test_noise_std_max,
((args.test_noise_std_max-args.test_noise_std_min)//args.test_noise_stepsize)+1,dtype=int)/255.
metrics_path = os.path.join(args.output_dir, args.test_mode + '_' + str(args.test_noise_std_min)+'-'+str(args.test_noise_std_max)+f'_metrics_{args.restore_mode}.p')
metrics_dict = metrics_avg_on_noise_range(model, args, noise_std_range, device = device)
pickle.dump( metrics_dict, open(metrics_path, "wb" ) )
def cli_main(args):
available_models = glob.glob(f'{args.output_dir}/*')
if not args.resume_training and available_models:
raise ValueError('There exists already a trained model and resume_training is set False')
if args.resume_training:
f_restore_file(args)
# reset the attributes of the function save_checkpoint
mode = "max"
default_score = float("inf") if mode == "min" else float("-inf")
utils.save_checkpoint.best_score = default_score
utils.save_checkpoint.best_step = -1
utils.save_checkpoint.best_epoch = -1
utils.save_checkpoint.last_step = -1
utils.save_checkpoint.current_lr = args.lr
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Set the name of the directory for saving results
utils.setup_experiment(args)
utils.init_logging(args)
# Build data loaders, a model and an optimizer
model = models.unet_fastMRI(
in_chans=args.in_chans,
chans = args.chans,
num_pool_layers = args.num_pool_layers,
drop_prob = 0.0,
residual_connection = args.residual,
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
#scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50, 60, 70, 80, 90, 100], gamma=0.5)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='max', factor=args.lr_gamma, patience=args.lr_patience,
threshold=args.lr_threshold, threshold_mode='abs', cooldown=0,
min_lr=args.lr_min, eps=1e-08, verbose=True
)
logging.info(f"Built a model consisting of {sum(p.numel() for p in model.parameters()):,} parameters")
trainset = ImagenetSubdataset(args.train_size,args.path_to_ImageNet_train,mode='train',patch_size=args.patch_size,val_crop=args.val_crop)
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True,generator=torch.Generator().manual_seed(args.seed))
valset = ImagenetSubdataset(args.val_size,args.path_to_ImageNet_train,mode='val',patch_size=args.patch_size,val_crop=args.val_crop)
val_loader = DataLoader(valset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True,generator=torch.Generator().manual_seed(args.seed))
print(optimizer.param_groups[0]["lr"])
if args.resume_training:
state_dict = utils.load_checkpoint(args, model, optimizer, scheduler)
global_step = state_dict['last_step']
start_epoch = int(state_dict['last_step']/(len(train_loader)))+1
start_decay = True
elif args.no_annealing:
global_step = -1
start_epoch = 0
start_decay = True
else:
global_step = -1
start_epoch = 0
start_decay = False
print(optimizer.param_groups[0]["lr"])
args.log_interval = min(len(trainset), 100) # len(train_loader)=log once per epoch
args.no_visual = False # True for not logging to tensorboard
# Track moving average of loss values
train_meters = { "train_loss":RunningAverageMeter(0.98)}
valid_meters = {name: AverageMeter() for name in (["valid_psnr", "valid_ssim", "valid_psnr_self_supervised", "valid_ssim_self_supervised"])}
# Create tensorflow event file
writer = SummaryWriter(log_dir=args.experiment_dir) if not args.no_visual else None
break_counter = 0
# store the best val performance from lr-interval before the last lr decay
best_val_last = 0
# track the best val performance for the current lr-inerval
best_val_current = 0
# count for how many lr intervals there was no improvement and break only if there was no improvement for 2
lr_interval_counter = 0
# if best_val_current at the end of the current lr interval is smaller than best_val_last we perform early stopping
for epoch in range(start_epoch, args.num_epochs):
start = time.process_time()
train_bar = ProgressBar(train_loader, epoch)
# At beginning of each epoch reset the train meters
for meter in train_meters.values():
meter.reset()
for inputs, noise_seed in train_bar:
model.train() #Sets the module in training mode.
global_step += 1
inputs = inputs.to(device)
noise = get_noise(inputs,noise_seed, fix_noise = args.fix_noise, noise_std = args.noise_std/255.)
noise_target = get_noise(inputs,torch.mul(noise_seed,10),fix_noise = args.fix_noise, noise_std = args.noise_std_target/255.)
noisy_targets = noise_target + inputs
noisy_inputs = noise + inputs
outputs = model(noisy_inputs)
# In loss function, I changed outputs to noisy_targets for self-supervision
loss = F.mse_loss(outputs, noisy_targets, reduction="sum") / torch.prod(torch.tensor(inputs.size())) #(inputs.size(0) * 2)
model.zero_grad()
loss.backward()
optimizer.step()
train_meters["train_loss"].update(loss.item())
train_bar.log(dict(**train_meters, lr=optimizer.param_groups[0]["lr"]), verbose=True)
# Add to tensorflow event file:
if writer is not None:
writer.add_scalar("lr", optimizer.param_groups[0]["lr"], global_step)
writer.add_scalar("loss/train", train_meters["train_loss"].avg, global_step)
sys.stdout.flush()
if epoch % args.valid_interval == 0:
model.eval()
gen_val = torch.Generator()
gen_val = gen_val.manual_seed(10)
for meter in valid_meters.values():
meter.reset()
valid_bar = ProgressBar(val_loader)
for sample, noise_seed in valid_bar:
with torch.no_grad():
sample = sample.to(device)
# Self-supervised validation with fixed noise
noise_self_supervised = get_noise(sample,noise_seed, fix_noise = args.fix_noise, noise_std = args.noise_std/255.)
noise_target_self_supervised = get_noise(sample,torch.mul(noise_seed,10),fix_noise = args.fix_noise, noise_std = args.noise_std_target/255.)
noisy_input_fixed = sample + noise_self_supervised
noisy_target = sample + noise_target_self_supervised
output_self_supervised = model(noisy_input_fixed)
valid_psnr_self_supervised = psnr(output_self_supervised, noisy_target)
valid_ssim_self_supervised = ssim(output_self_supervised, noisy_target)
valid_meters["valid_psnr_self_supervised"].update(valid_psnr_self_supervised.item())
valid_meters["valid_ssim_self_supervised"].update(valid_ssim_self_supervised.item())
# Ground truth validation wit fixed noise
# It uses the same input and output as in the self-supervised case since the noise seed is fixed
valid_psnr = psnr(output_self_supervised, sample)
valid_ssim = ssim(output_self_supervised, sample)
valid_meters["valid_psnr"].update(valid_psnr.item())
valid_meters["valid_ssim"].update(valid_ssim.item())
if writer is not None:
# Average is correct valid_meters['valid_psnr'].avg since .val would be just the psnr of last sample in val set.
writer.add_scalar("psnr/valid", valid_meters['valid_psnr'].avg, global_step)
writer.add_scalar("ssim/valid", valid_meters['valid_ssim'].avg, global_step)
writer.add_scalar("psnr_selfsupervised/valid", valid_meters['valid_psnr_self_supervised'].avg, global_step)
writer.add_scalar("ssim_selfsupervised/valid", valid_meters["valid_ssim_self_supervised"].avg, global_step)
writer.add_scalar("lr", optimizer.param_groups[0]["lr"], global_step)
sys.stdout.flush()
if args.val_flag == 0: # if we do self-supervised validation
val_loss = valid_meters["valid_psnr_self_supervised"].avg
else: # if we do supervised validation
val_loss = valid_meters["valid_psnr"].avg
if utils.save_checkpoint.best_score < val_loss and not start_decay:
utils.save_checkpoint(args, global_step, epoch, model, optimizer, score=val_loss, mode="max")
current_lr = utils.save_checkpoint.current_lr
optimizer.param_groups[0]["lr"] = current_lr*args.lr_beta
utils.save_checkpoint.current_lr = current_lr*args.lr_beta
annealing_counter = 0
elif not start_decay:
annealing_counter += 1
current_lr = utils.save_checkpoint.current_lr
if annealing_counter == args.lr_patience_annealing:
available_models = glob.glob(f'{args.output_dir}/*')
if not available_models:
raise ValueError('No file to restore')
elif len(available_models)>1:
raise ValueError('Too many files to restore from')
model_path = os.path.join(available_models[0], "checkpoints/checkpoint_best.pt")
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
model = [model] if model is not None and not isinstance(model, list) else model
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
model = model[0]
optimizer.param_groups[0]["lr"] = current_lr/(args.lr_beta*args.inital_decay_factor)
start_decay = True
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='max', factor=args.lr_gamma, patience=args.lr_patience,
threshold=args.lr_threshold, threshold_mode='abs', cooldown=0,
min_lr=args.lr_min, eps=1e-08, verbose=True
)
else:
utils.save_checkpoint(args, global_step, epoch, model, optimizer, score=val_loss, mode="max")
current_lr = optimizer.param_groups[0]["lr"]
if val_loss > best_val_current:
best_val_current = val_loss
if writer is not None:
writer.add_scalar("epoch", epoch, global_step)
sys.stdout.flush()
if start_decay:
current_lr = optimizer.param_groups[0]["lr"]
scheduler.step(val_loss)
new_lr = optimizer.param_groups[0]["lr"]
#At every lr decay check if the model did not improve during the current or the previous lr interval and break if it didn't.
if new_lr < current_lr:
if best_val_current < best_val_last and lr_interval_counter==1:
logging.info('Break training due to convergence of val loss!')
break
elif best_val_current < best_val_last and lr_interval_counter==0:
lr_interval_counter += 1
logging.info('Do not yet break due to convergence of val loss!')
else:
best_val_last = best_val_current
best_val_current = 0
lr_interval_counter = 0
end = time.process_time() - start
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, time=np.round(end/60,3))))
if optimizer.param_groups[0]["lr"] == args.lr_min and start_decay:
break_counter += 1
if break_counter == args.break_counter:
print('Break training due to minimal learning rate constraint!')
break
logging.info(f"Done training! Best PSNR {utils.save_checkpoint.best_score:.3f} obtained after step {utils.save_checkpoint.best_step} (epoch {utils.save_checkpoint.best_epoch}).")
def get_args(hp,ee,rr):
parser = argparse.ArgumentParser(allow_abbrev=False)
# Add data arguments
parser.add_argument("--train-size", default=None, help="number of examples in training set")
parser.add_argument("--val-size", default=40, help="number of examples in validation set")
parser.add_argument("--test-size", default=100, help="number of examples in test set")
parser.add_argument("--val-crop", default=True, type=bool, help="Crop validation images to train size.")
parser.add_argument("--patch-size", default=128, help="size of the center cropped HR image")
parser.add_argument("--batch-size", default=128, type=int, help="train batch size")
# Add model arguments
parser.add_argument("--model", default="unet", help="model architecture")
# Add noise arguments
parser.add_argument('--noise_std', default = 15, type = float,
help = 'noise level')
parser.add_argument('--test_noise_std_min', default = 15, type = float,
help = 'minimal noise level for testing')
parser.add_argument('--test_noise_std_max', default = 15, type = float,
help = 'maximal noise level for testing')
parser.add_argument('--test_noise_stepsize', default = 5, type = float,
help = 'Stepsize between test_noise_std_min and test_noise_std_max')
# Add optimization arguments
parser.add_argument("--lr", default=1e-3, type=float, help="learning rate")
parser.add_argument("--lr-gamma", default=0.5, type=float, help="factor by which to reduce learning rate")
parser.add_argument("--lr-beta", default=2, type=float, help="factor by which to increase learning rate")
parser.add_argument("--lr-patience", default=5, type=int, help="epochs without improvement before lr decay")
parser.add_argument("--no_annealing", default=True, type=bool, help="Use lr annealing or not.")
parser.add_argument("--lr-patience-annealing", default=3, type=int, help="epochs without improvement before lr annealing stops")
parser.add_argument("--lr-min", default=1e-5, type=float, help="Once we reach this learning rate continue for break_counter many epochs then stop.")
parser.add_argument("--lr-threshold", default=0.003, type=float, help="Improvements by less than this threshold are not counted for decay patience.")
parser.add_argument("--break-counter", default=9, type=int, help="Once smallest learning rate is reached, continue for so many epochs before stopping.")
parser.add_argument("--inital-decay-factor", default=2, type=int, help="After annealing found a lr for which val loss does not improve, go back initial_decay_factor many lrs")
parser.add_argument("--num-epochs", default=100, type=int, help="force stop training at specified epoch")
parser.add_argument("--valid-interval", default=1, type=int, help="evaluate every N epochs")
parser.add_argument("--save-interval", default=1, type=int, help="save a checkpoint every N steps")
# Add model arguments
parser = models.unet_fastMRI.add_args(parser)
parser = utils.add_logging_arguments(parser)
#args = parser.parse_args()
args, _ = parser.parse_known_args()
# Set arguments specific for this experiment
dargs = vars(args)
for key in hp.keys():
dargs[key] = hp[key][ee]
args.seed = int(42 + 10*rr)
return args
def f_restore_file(args):
#available_models = glob.glob(f'{args.output_dir}/{args.experiment}-*')
available_models = glob.glob(f'{args.output_dir}/*')
if not available_models:
raise ValueError('No file to restore')
if not args.restore_mode:
raise ValueError("Pick restore mode either 'best' 'last' or '\path\to\checkpoint\dir'")
if args.restore_mode=='best':
mode = "max"
best_score = float("inf") if mode == "min" else float("-inf")
best_model = None
for modelp in available_models:
model_path = os.path.join(modelp, "checkpoints/checkpoint_best.pt")
if os.path.isfile(model_path):
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
score = state_dict["best_score"]
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
best_score = score
best_model = model_path
best_modelp = modelp
best_step = state_dict["best_step"]
best_epoch = state_dict["best_epoch"]
args.restore_file = best_model
args.experiment_dir = best_modelp
#logging.info(f"Prepare to restore best model {best_model} with PSNR {best_score} at step {best_step}, epoch {best_epoch}")
elif args.restore_mode=='last':
last_step = -1
last_model = None
for modelp in available_models:
model_path = os.path.join(modelp, "checkpoints/checkpoint_last.pt")
if os.path.isfile(model_path):
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
step = state_dict["last_step"]
if step > last_step:
last_step = step
last_model = model_path
last_modelp = modelp
score = state_dict["score"]
last_epoch = state_dict["epoch"]
args.restore_file = last_model
args.experiment_dir = last_modelp
#logging.info(f"Prepare to restore last model {last_model} with PSNR {score} at step {last_step}, epoch {last_epoch}")
else:
args.restore_file = args.restore_mode
args.experiment_dir = args.restore_mode[:args.restore_mode.find('/checkpoints')]
def infer_images(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
net = load_model(args) # the denoiser
seed_dict = {
"val":10,
"test":20,
"cbsd68":30,
"urban100":40,
"mcmaster18":50,
"kodak24":60,
"CBSD68":70,
}
gen = torch.Generator()
gen = gen.manual_seed(seed_dict[args.test_mode])
# Load the test images
load_path = '../training_set_lists/'
if args.test_mode == 'test':
files_source = torch.load(load_path+f'ImageNetTest{args.test_size}_filepaths.pt')
#files_source.sort()
elif args.test_mode == 'val':
files_source = torch.load(load_path+f'ImageNetVal{args.val_size}_filepaths.pt')
#files_source.sort()
else:
files_source = torch.load(load_path+f'{args.test_mode}_filepaths.pt')
if not os.path.isdir(args.output_dir+'/test_images'):
os.mkdir(args.output_dir+'/test_images')
counter = 0
transformT = transforms.ToTensor()
transformIm = transforms.ToPILImage()
for f in files_source:
counter = counter + 1
if counter > 3:
break
# Create noise
ISource = torch.unsqueeze(transformT(Image.open(f).convert("RGB")),0).to(device)
noise = torch.randn(ISource.shape,generator = gen) * args.noise_std/255.
INoisy = noise.to(device) + ISource
out = torch.clamp(net(INoisy), 0., 1.).cpu()
out = torch.squeeze(out,0) # Get rid of the 1 in dim 0.
im = transformIm(out)
INoisy = torch.clamp(torch.squeeze(INoisy,0), 0., 1.).cpu()
#INoisy = torch.squeeze(INoisy,0).cpu()
INoisy = transformIm(INoisy)
clean_image = Image.open(f).convert("RGB")
im.save(args.output_dir+f'/test_images/im{counter}_denoised_notclamped.png')
clean_image.save(args.output_dir+f'/test_images/im{counter}_ground_truth_notclamped.png')
INoisy.save(args.output_dir+f'/test_images/im{counter}_noisy_notclamped.png')
im.save(args.output_dir+f'/test_images/im{counter}_denoised_notclamped.pdf')
clean_image.save(args.output_dir+f'/test_images/im{counter}_ground_truth_notclamped.pdf')
INoisy.save(args.output_dir+f'/test_images/im{counter}_noisy_notclamped.pdf')
| 23,245 | 45.772636 | 182 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/util_calculate_psnr_ssim.py | import cv2
import numpy as np
import torch
# from https://github.com/JingyunLiang/SwinIR/blob/328dda0f4768772e6d8c5aa3d5aa8e24f1ad903b/utils/util_calculate_psnr_ssim.py#L80
def calculate_psnr(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse))
def _ssim(img1, img2):
"""Calculate SSIM (structural similarity) for one channel images.
It is called by func:`calculate_ssim`.
Args:
img1 (ndarray): Images with range [0, 255] with order 'HWC'.
img2 (ndarray): Images with range [0, 255] with order 'HWC'.
Returns:
float: ssim result.
"""
C1 = (0.01 * 255) ** 2
C2 = (0.03 * 255) ** 2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1 ** 2
mu2_sq = mu2 ** 2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the SSIM calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
ssims = []
for i in range(img1.shape[2]):
ssims.append(_ssim(img1[..., i], img2[..., i]))
return np.array(ssims).mean()
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'")
if len(img.shape) == 2:
img = img[..., None]
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type)
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img | 9,023 | 37.564103 | 129 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/test_metrics.py | import torch
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
#import cv2
from utils.noise_model import get_noise
from utils.metrics import ssim,psnr
from utils.util_calculate_psnr_ssim import calculate_psnr,calculate_ssim
from skimage import color
import PIL.Image as Image
import torchvision.transforms as transforms
from utils.utils_image import *
metrics_key = ['psnr_m', 'psnr_s', 'psnr_delta_m', 'psnr_delta_s', 'ssim_m', 'ssim_s', 'ssim_delta_m', 'ssim_delta_s'];
def tensor_to_image(torch_image, low=0.0, high = 1.0, clamp = True):
if clamp:
torch_image = torch.clamp(torch_image, low, high);
return torch_image[0,0].cpu().data.numpy()
def normalize(data):
return data/255.
def convert_dict_to_string(metrics):
return_string = '';
for x in metrics.keys():
return_string += x+': '+str(round(metrics[x], 3))+' ';
return return_string
def get_all_comparison_metrics(denoised, source, noisy = None, scale=None, return_title_string = False, clamp = True):
metrics = {};
metrics['psnr'] = np.zeros(len(denoised))
metrics['ssim'] = np.zeros(len(denoised))
if noisy is not None:
metrics['psnr_delta'] = np.zeros(len(denoised))
metrics['ssim_delta'] = np.zeros(len(denoised))
if clamp:
denoised = torch.clamp(denoised, 0.0, 1.0)
metrics['psnr'] = psnr(source, denoised);
metrics['ssim'] = ssim(source, denoised);
if noisy is not None:
metrics['psnr_delta'] = metrics['psnr'] - psnr(source, noisy);
metrics['ssim_delta'] = metrics['ssim'] - ssim(source, noisy);
if return_title_string:
return convert_dict_to_string(metrics)
else:
return metrics
def average_on_folder(args, net, noise_std,
verbose=True, device = torch.device('cuda')):
#if verbose:
#print('Loading data info ...\n')
print(f'\n Dataset: {args.test_mode}, Restore mode: {args.restore_mode}')
load_path = '../training_set_lists/'
seed_dict = {
"val":10,
"test":20,
}
gen = torch.Generator()
gen = gen.manual_seed(seed_dict[args.test_mode])
if args.test_mode == 'test':
files_source = torch.load(load_path+f'ImageNetTest{args.test_size}_filepaths.pt')
#files_source.sort()
elif args.test_mode == 'val':
files_source = torch.load(load_path+f'ImageNetVal{args.val_size}_filepaths.pt')
#files_source.sort()
avreage_metrics_key = ['psnr', 'psnr_delta', 'ssim', 'ssim_delta']
avg_metrics = {};
for x in avreage_metrics_key:
avg_metrics[x] = [];
psnr_list = []
ssim_list = []
#print(files_source)
for f in files_source:
transformT = transforms.ToTensor()
ISource = torch.unsqueeze(transformT(Image.open(args.path_to_ImageNet_train + f).convert("RGB")),0).to(device)
if args.test_mode == 'val':
noise_seed = int(f[f.find('train/')+17:-5].replace('_',''))
gen = gen.manual_seed(noise_seed)
noise = torch.randn(ISource.shape,generator = gen) * args.noise_std/255.
INoisy = noise.to(device) + ISource
out = torch.clamp(net(INoisy), 0., 1.)
ind_metrics = get_all_comparison_metrics(out, ISource, INoisy, return_title_string = False)
for x in avreage_metrics_key:
avg_metrics[x].append(ind_metrics[x])
if(verbose):
print("%s %s" % (f, convert_dict_to_string(ind_metrics)))
metrics = {}
for x in avreage_metrics_key:
metrics[x+'_m'] = np.mean(avg_metrics[x])
metrics[x+'_s'] = np.std(avg_metrics[x])
if verbose:
print("\n Average %s" % (convert_dict_to_string(metrics)))
#if(not verbose):
return metrics
def metrics_avg_on_noise_range(net, args, noise_std_array, device = torch.device('cuda')):
array_metrics = {}
for x in metrics_key:
array_metrics[x] = np.zeros(len(noise_std_array))
for j, noise_std in enumerate(noise_std_array):
metric_list = average_on_folder(args, net,
noise_std = noise_std,
verbose=False, device=device);
for x in metrics_key:
array_metrics[x][j] += metric_list[x]
print('noise: ', int(noise_std*255), ' ', x, ': ', str(array_metrics[x][j]))
return array_metrics
| 4,464 | 29.582192 | 119 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/noise_model.py | import torch
def get_noise(data, noise_seed, fix_noise, noise_std = float(25)/255.0):
if fix_noise:
device = torch.device('cuda')
gen = torch.Generator(device=device)
batch_size = data.size(dim=0)
tensor_dim = list(data.size())[1:]
for i in range(0,batch_size):
gen = gen.manual_seed(noise_seed[i].item())
noise = torch.randn(tensor_dim,generator = gen, device=device) * noise_std
noise = torch.unsqueeze(noise,0)
if i == 0:
noise_tensor = noise
else:
noise_tensor = torch.cat((noise_tensor, noise),0)
noise = noise_tensor
#noise = torch.randn(data.shape,generator = gen, device=device) * noise_std
else:
noise = torch.randn_like(data)
noise.data = noise.data * noise_std
return noise | 880 | 31.62963 | 87 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/meters.py | import time
import torch
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
self.val = val / n
self.sum += val
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
def __init__(self, momentum=0.98):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if isinstance(val, torch.Tensor):
val = val.item()
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
class TimeMeter(object):
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
| 1,321 | 20.322581 | 75 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/data_helpers/load_datasets_helpers.py | import os
import os.path
import numpy as np
import h5py
import torch
import torchvision.transforms as transforms
import PIL.Image as Image
from utils.utils_image import *
class ImagenetSubdataset(torch.utils.data.Dataset):
def __init__(self, size, path_to_ImageNet_train, mode='train', patch_size='128', val_crop=True):
super().__init__()
load_path = '../training_set_lists/'
self.path_to_ImageNet_train = path_to_ImageNet_train
if mode=='train':
self.files = torch.load(load_path+f'trsize{size}_filepaths.pt')
self.transform = transforms.Compose([
transforms.CenterCrop(patch_size),
transforms.ToTensor(),
])
elif mode=='val':
self.files = torch.load(load_path+f'ImageNetVal{size}_filepaths.pt')
#print(self.files)
if val_crop:
self.transform = transforms.Compose([
transforms.CenterCrop(patch_size),
transforms.ToTensor(),
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
])
self.noise_seeds = {}
for i, file in enumerate(self.files):
key = file[file.find('train/')+16:-5]
number = int(file[file.find('train/')+17:-5].replace('_',''))
self.noise_seeds[key] = number
def __len__(self):
return len(self.files)
def __getitem__(self, index):
file = self.files[index]
key = file[file.find('train/')+16:-5]
noise_seed = self.noise_seeds[key]
image = Image.open(self.path_to_ImageNet_train + self.files[index]).convert("RGB") #ImageNet contains some grayscale images
data = self.transform(image)
return data, noise_seed
| 1,949 | 32.62069 | 131 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/train_network_for_histogram.py | # %%
import torch
import h5py
import numpy as np
import os
import yaml
import logging
import glob
import random
import pickle
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
from torch.nn import MSELoss
import copy
from argparse import ArgumentParser
from torch.utils.tensorboard import SummaryWriter
import sys
from tqdm import tqdm
import torchvision.transforms as transforms
import PIL.Image as Image
from skimage.transform import resize
from CS_natural_images_functions.unet import Unet
from CS_natural_images_functions.fftc import fft2c, ifft2c
from CS_natural_images_functions.losses import SSIMLoss
from CS_natural_images_functions.progress_bar import ProgressBar, init_logging, AverageMeter, TrackMeter, TrackMeter_testing
from CS_natural_images_functions.log_progress_helpers import save_figure, add_img_to_tensorboard, save_test_image_with_dc
from CS_natural_images_functions.load_save_model_helpers import setup_experiment_or_load_checkpoint, save_checkpoint
from CS_natural_images_functions.data_transforms import UnetDataTransform
from CS_natural_images_functions.data_transforms import compute_number_of_lines_in_input_target_kspace
# %%
class CropDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
In this case we are only interested in downsampled magnitude images.
"""
def __init__(
self,
dataset: List,
path_to_ImageNet_train: str,
transforms_list: List,
experiment_path: str,
img_size: int,
):
"""
Args:
dataset: A list containing one entry for every slice in the dataset.
Each entry is a dictionary with keys 'path','slice','filename'ArithmeticError
path_to_mridata: Path to fastMRI data on the server.
transform: Function that transforms the ground truth image x into training input and target.
"""
self.transform_30 = transforms_list[0]
self.transform_35 = transforms_list[1]
self.experiment_path = experiment_path
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Store downsampled ground truth training images here
self.examples = []
load_transform = transforms.Compose([
transforms.CenterCrop(img_size),
transforms.ToTensor(),
])
# Load mri magnitude images, downsample and store
for datapath in dataset:
image = Image.open(path_to_ImageNet_train+datapath).convert("L")
filename = datapath[16:-5]
self.examples.append((load_transform(image)[0].to(device),filename))
def __len__(self):
return len(self.examples)
def __getitem__(self, i: int):
# Determine input, target and ground truth
x,filename = self.examples[i]
y_input, x_input, y_target, x_target, x_gt, input_mask, target_mask, mean, std, fname = self.transform_30(x,filename,i)
_, _, _, _, _, _, target_mask_35, _, _, _ = self.transform_35(x,filename,i)
return y_input, x_input, y_target, x_target, x_gt, input_mask, target_mask, mean, std, fname,target_mask_35
# %%
def read_args():
parser = ArgumentParser()
parser.add_argument(
'--path_to_ImageNet_train',
type=str,
help='Path to ImageNet train directory.',
required=True
)
parser.add_argument(
'--training',
default=True,
action='store_false',
help='Add this flag to disable training.'
)
parser.add_argument(
'--testing',
default=False,
action='store_false',
help='Add this flag to disable testing.'
)
parser.add_argument(
'--experiment_number',
default='300',
type=str,
help='Set consecutive numbering for the experiments.'
)
parser.add_argument(
'--gpu',
choices=(0, 1, 2, 3),
default=1,
type=int,
help='Pick one out of four gpus.'
)
parser.add_argument(
'--seed',
default=0,
type=int,
help='Set seed for network initialization.'
)
parser.add_argument(
'--trainset_size',
choices=(50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000, 250000, 500000, 1000000),
default=50,
type=int,
help='Set training set size.'
)
parser.add_argument(
'--img_size',
default=100,
type=int,
help='Set img_size for downsampling.'
)
parser.add_argument(
'--num_epochs',
default=1000,
type=int,
help='Set number of training epochs.'
)
parser.add_argument(
'--acceleration',
default=4.0,
type=float,
help='Undersampling of training and test inputs.'
)
parser.add_argument(
'--center_fraction',
default=0.08,
type=float,
help='Fraction of lines that are always sample from the center (input and target). Set to 0.0 for sampling all lines randomly.'
)
parser.add_argument(
'--fix_split',
default=True,
action='store_true',
help='Add this flag to set fix_split=True for fixed input target split for self-supervised and fixed input for supervised training.'
)
args = parser.parse_args()
exp_nums = ['992']
# validation every second epoch
# lr decay on plateau
hyperparameters = {
#'acceleration_total' : [
#3.0,
#],
'trainset_size' : [
10000,
],
'center_fraction' : [
0.08,
],
'seed' : [
1,
],
'fix_split' : [
True,
],
'num_epochs' : [
2,
],
'patience' : [ # as we validate every second epoch a patience of 10 actually means 20 epochs
15,
]
}
# Sanity checks
for key in hyperparameters.keys():
if len(hyperparameters[key]) != len(exp_nums):
print(key)
raise ValueError("Specify hyperparameters for every experiment!")
for i in range(len(exp_nums)):
args.trainset_size = hyperparameters['trainset_size'][i]
args.center_fraction = hyperparameters['center_fraction'][i]
args.seed = hyperparameters['seed'][i]
args.fix_split = hyperparameters['fix_split'][i]
args.num_epochs = hyperparameters['num_epochs'][i]
args.patience = hyperparameters['patience'][i]
args.experiment_number = exp_nums[i]
experiment_name = f"N{args.experiment_number}_t{args.trainset_size}_"
experiment_name+="sup_VS_ss3035_"
if args.center_fraction==0.0:
experiment_name+="RandCenter_"
else:
experiment_name+="FixCenter_"
experiment_name+="grad_diff_"
experiment_name += f"run{args.seed}"
experiment_path = experiment_name+"/"
#dataset_path = f"../datasets/train_{args.trainset_size}_selfsup_slice.yaml"
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
if args.training:
run_training(experiment_path=experiment_path,
acceleration=args.acceleration,
center_fraction=args.center_fraction,
seed=args.seed,
img_size=args.img_size,
fix_split=args.fix_split,
num_epochs=args.num_epochs,
patience=args.patience,
trainset_size=args.trainset_size,
path_to_ImageNet_train=args.path_to_ImageNet_train)
################################################################################################
def run_training(experiment_path,
acceleration,
center_fraction,
seed,
img_size,
fix_split,
num_epochs,
patience,
trainset_size,
path_to_ImageNet_train):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Create directory that holds train files
if not os.path.isdir(experiment_path):
os.mkdir(experiment_path)
else:
print(experiment_path)
#raise ValueError("Experiment already exists!!")
print("Warning: Experiment already exists!!")
# Init train.log file
init_logging(experiment_path)
logging.info("Training...")
# train loss function
loss_fct = MSELoss(reduction='sum')
#val_ssim_fct = SSIMLoss()
# Init model
model = Unet(
in_chans=2,
out_chans=2,
chans=24,
num_pool_layers=3,
drop_prob=0.0,).to(device)
# Init optimizer and scheduler
optimizer = torch.optim.Adam( params=model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, amsgrad=False)
# Load train set
train_pool = torch.load('CS_natural_images_functions/training_set_lists/trsize1000000_filepaths.pt')
zero_norm_files = ['train/n03729826/n03729826_6483.JPEG',
'train/n04515003/n04515003_24673.JPEG',
'train/n02111277/n02111277_12490.JPEG',
'train/n03888605/n03888605_9775.JPEG',
'train/n02992529/n02992529_3197.JPEG',
'train/n01930112/n01930112_18908.JPEG',
'train/n06874185/n06874185_3219.JPEG',
'train/n06785654/n06785654_17232.JPEG',
'train/n04033901/n04033901_29617.JPEG',
'train/n07920052/n07920052_14729.JPEG',
'train/n03729826/n03729826_40479.JPEG',
'train/n03729826/n03729826_10716.JPEG',
'train/n04286575/n04286575_74296.JPEG',
'train/n03937543/n03937543_10198.JPEG',
'train/n03063599/n03063599_3942.JPEG',
'train/n04152593/n04152593_13802.JPEG',
'train/n04522168/n04522168_24105.JPEG',
'train/n03532672/n03532672_78983.JPEG',
'train/n04404412/n04404412_12316.JPEG',
'train/n04330267/n04330267_18003.JPEG',
'train/n04118776/n04118776_37671.JPEG',
'train/n04591713/n04591713_3568.JPEG',
'train/n02437616/n02437616_12697.JPEG',
'train/n02799071/n02799071_54867.JPEG',
'train/n02883205/n02883205_26196.JPEG',
'train/n02667093/n02667093_2919.JPEG',
'train/n03196217/n03196217_1135.JPEG',
'train/n03196217/n03196217_3568.JPEG',
'train/n15075141/n15075141_19601.JPEG',
'train/n01943899/n01943899_24166.JPEG']
for zero_norm_file in zero_norm_files:
train_pool.remove(zero_norm_file)
rng_dataset = np.random.default_rng(seed)
train_set = rng_dataset.choice(train_pool, size=trainset_size, replace=False, p=None)
torch.save(train_set,experiment_path+'train_set.pt')
# Train loader
data_transform_train_35 = UnetDataTransform(acceleration=acceleration,acceleration_total=3.5, fix_split=fix_split, experiment_path=experiment_path,center_fraction=center_fraction)
data_transform_train_30 = UnetDataTransform(acceleration=acceleration,acceleration_total=3.0, fix_split=fix_split, experiment_path=experiment_path,center_fraction=center_fraction)
trainset = CropDataset(dataset=train_set, path_to_ImageNet_train=path_to_ImageNet_train, transforms_list=[data_transform_train_30,data_transform_train_35], experiment_path=experiment_path, img_size=img_size)
train_loader = torch.utils.data.DataLoader(dataset=trainset, batch_size=1, num_workers=0, shuffle=True, generator=torch.Generator().manual_seed(0))
# store training loss metrics
train_meters = {'train_L2': AverageMeter()}
sup_diff_tracks = {'divide_by_norm_of_risk_grad': TrackMeter_testing(), 'take_mse': TrackMeter_testing()}
ss_diff_30_tracks = {'divide_by_norm_of_risk_grad': TrackMeter_testing(), 'take_mse': TrackMeter_testing()}
ss_diff_35_tracks = {'divide_by_norm_of_risk_grad': TrackMeter_testing(), 'take_mse': TrackMeter_testing()}
# Init tensorboard
#writer = SummaryWriter(log_dir=experiment_path)
#log_image_interval_tb = 10
# when to compute gradient histograms
compute_gradients_interval = 1
# Start training
break_counter=0
for epoch in range(num_epochs):
# compute gradient histogrms
if epoch % compute_gradients_interval == 0:
model_copy = copy.deepcopy(model)
train_bar_hist = ProgressBar(train_loader, epoch)
model_copy.train()
for meter in sup_diff_tracks.values():
meter.reset()
for meter in ss_diff_30_tracks.values():
meter.reset()
for meter in ss_diff_35_tracks.values():
meter.reset()
# estimate ground truth gradient based on whole dataset
for id,sample in enumerate(train_bar_hist):
y_input, x_input, y_target, x_target, x, input_mask, target_mask_30, mean, std, fname, target_mask_35 = sample
# prediction
x_output = torch.moveaxis(model_copy(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output_sup = fft2c(x_output)
# apply target mask (all ones for supervised training)
y_output_ss_30 = y_output_sup * target_mask_30 + 0.0
y_output_ss_35 = y_output_sup * target_mask_35 + 0.0
y_target_sup = fft2c(x)
y_target_ss_30 = y_target_sup * target_mask_30 + 0.0
y_target_ss_35 = y_target_sup * target_mask_35 + 0.0
save_figure(torch.log(torch.abs(y_input[0,:,:,0].detach().cpu())+ 1e-9),"y_input_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_output_sup[0,:,:,0].detach().cpu())+ 1e-9),"y_output_sup_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_output_ss_30[0,:,:,0].detach().cpu())+ 1e-9),"y_output_ss_30_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_output_ss_35[0,:,:,0].detach().cpu())+ 1e-9),"y_output_ss_35_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_target_sup[0,:,:,0].detach().cpu())+ 1e-9),"y_target_sup_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_target_ss_30[0,:,:,0].detach().cpu())+ 1e-9),"y_target_ss_30_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_target_ss_35[0,:,:,0].detach().cpu())+ 1e-9),"y_target_ss_35_real",experiment_path) if id==0 else None
# compute loss
train_loss_sup = loss_fct(y_output_sup,y_target_sup) / torch.sum(torch.abs(y_target_sup)**2)
#train_loss_ss_30 = loss_fct(y_output_ss_30,y_target_ss_30) / torch.sum(torch.abs(y_target_ss_30)**2)
#train_loss_ss_35 = loss_fct(y_output_ss_35,y_target_ss_35) / torch.sum(torch.abs(y_target_ss_35)**2)
param = list(model_copy.parameters())
model_copy.zero_grad()
train_loss_sup.backward(retain_graph=True)
if id == 0:
for p in param:
p.grad_true_risk = p.grad
p.grad = None
else:
for p in param:
p.grad_true_risk += p.grad
p.grad = None
for p in param:
p.grad_true_risk = p.grad_true_risk/len(train_loader)
# compute stochastic supervised and self-supervised gradients based on the same dataset
train_bar_hist = ProgressBar(train_loader, epoch)
for id,sample in enumerate(train_bar_hist):
y_input, x_input, y_target, x_target, x, input_mask, target_mask_30, mean, std, fname, target_mask_35 = sample
# prediction
x_output = torch.moveaxis(model_copy(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output_sup = fft2c(x_output)
# apply target mask (all ones for supervised training)
y_output_ss_30 = y_output_sup * target_mask_30 + 0.0
y_output_ss_35 = y_output_sup * target_mask_35 + 0.0
y_target_sup = fft2c(x)
y_target_ss_30 = y_target_sup * target_mask_30 + 0.0
y_target_ss_35 = y_target_sup * target_mask_35 + 0.0
# compute loss
train_loss_sup = loss_fct(y_output_sup,y_target_sup) / torch.sum(torch.abs(y_target_sup)**2)
train_loss_ss_30 = loss_fct(y_output_ss_30,y_target_ss_30) / torch.sum(torch.abs(y_target_ss_30)**2)
train_loss_ss_35 = loss_fct(y_output_ss_35,y_target_ss_35) / torch.sum(torch.abs(y_target_ss_35)**2)
param = list(model_copy.parameters())
model_copy.zero_grad()
train_loss_sup.backward(retain_graph=True)
for p in param:
p.grad_sup = p.grad
p.grad = None
train_loss_ss_30.backward(retain_graph=True)
for p in param:
p.grad_ss_30 = p.grad
p.grad = None
train_loss_ss_35.backward(retain_graph=True)
for p in param:
p.grad_ss_35 = p.grad
p.grad = None
diff_sup = torch.zeros(1).to(device)
diff_ss_30 = torch.zeros(1).to(device)
diff_ss_35 = torch.zeros(1).to(device)
norm_grad_of_risk = torch.zeros(1).to(device)
for p in param:
diff_sup += torch.sum(torch.square(torch.sub(p.grad_sup,p.grad_true_risk)))
diff_ss_30 += torch.sum(torch.square(torch.sub(p.grad_ss_30,p.grad_true_risk)))
diff_ss_35 += torch.sum(torch.square(torch.sub(p.grad_ss_35,p.grad_true_risk)))
norm_grad_of_risk += torch.sum(torch.square(p.grad_true_risk))
sup_diff_tracks['divide_by_norm_of_risk_grad'].update(torch.div(diff_sup,norm_grad_of_risk).item())
sup_diff_tracks['take_mse'].update(torch.mean(diff_sup).item())
ss_diff_30_tracks['divide_by_norm_of_risk_grad'].update(torch.div(diff_ss_30,norm_grad_of_risk).item())
ss_diff_30_tracks['take_mse'].update(torch.mean(diff_ss_30).item())
ss_diff_35_tracks['divide_by_norm_of_risk_grad'].update(torch.div(diff_ss_35,norm_grad_of_risk).item())
ss_diff_35_tracks['take_mse'].update(torch.mean(diff_ss_35).item())
pickle.dump( sup_diff_tracks, open(experiment_path + f"sup_diff_tracks_ep{epoch}.pkl", "wb" ) , pickle.HIGHEST_PROTOCOL )
pickle.dump( ss_diff_30_tracks, open(experiment_path + f"ss_diff_30_tracks_ep{epoch}.pkl", "wb" ) , pickle.HIGHEST_PROTOCOL )
pickle.dump( ss_diff_35_tracks, open(experiment_path + f"ss_diff_35_tracks_ep{epoch}.pkl", "wb" ) , pickle.HIGHEST_PROTOCOL )
# perform one training epoch
train_bar = ProgressBar(train_loader, epoch)
for meter in train_meters.values():
meter.reset()
for id,sample in enumerate(train_bar):
model.train()
y_input, x_input, y_target, x_target, x, input_mask, target_mask_30, mean, std, fname, target_mask_35 = sample
# prediction
x_output = torch.moveaxis(model(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output = fft2c(x_output)
# apply target mask (all ones for supervised training)
# DO SUPERVISED TRAINING HERE
#y_output = y_output #* target_mask + 0.0
y_target = fft2c(x)
# compute loss
train_loss = loss_fct(y_output,y_target) / torch.sum(torch.abs(y_target)**2)
model.zero_grad()
train_loss.backward()
optimizer.step()
# log train metrics
train_meters['train_L2'].update(train_loss.item())
train_bar.log(dict(**train_meters), verbose=True)
################################################################################################
if __name__ == '__main__':
read_args()
# %%
| 21,063 | 36.681574 | 211 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/run_CS_natural_images.py | # %%
import torch
import h5py
import numpy as np
import os
import yaml
import logging
import glob
import json
import random
import pickle
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
from torch.nn import MSELoss
from argparse import ArgumentParser
from torch.utils.tensorboard import SummaryWriter
import sys
from CS_natural_images_functions.unet import Unet
from CS_natural_images_functions.fftc import fft2c, ifft2c
from CS_natural_images_functions.losses import SSIMLoss
from CS_natural_images_functions.progress_bar import ProgressBar, init_logging, AverageMeter, TrackMeter, TrackMeter_testing
from CS_natural_images_functions.log_progress_helpers import save_figure, add_img_to_tensorboard, save_test_image_with_dc
from CS_natural_images_functions.load_save_model_helpers import setup_experiment_or_load_checkpoint, save_checkpoint
from CS_natural_images_functions.data_transforms import UnetDataTransform, CropDataset
from CS_natural_images_functions.data_transforms import compute_number_of_lines_in_input_target_kspace
# %%
def read_args():
parser = ArgumentParser()
# Required arguments
parser.add_argument(
'--config_file',
type=str,
help='Name of a config file in the experiment_configs folder.',
required=True
)
parser.add_argument(
'--path_to_ImageNet_train',
type=str,
help='Path to ImageNet train directory.',
required=True
)
parser.add_argument(
'--experiment_number',
type=str,
help="Set a unique identifier for the folder containing the experimental results. Start number with '001'. ",
required=True
)
parser.add_argument(
'--run_which_seeds',
type=str,
choices=('run_best_seed','run_all_seeds'),
help='Choose to run either only the best seed or all seeds shown in our results.',
required=True
)
# Optional arguments
parser.add_argument(
'--training',
default=True,
action='store_false',
help='Add this flag to disable training.'
)
parser.add_argument(
'--testing',
default=True,
action='store_false',
help='Add this flag to disable testing.'
)
parser.add_argument(
'--gpu',
choices=(0, 1, 2, 3),
default=3,
type=int,
help='Pick one out of four gpus.'
)
parser.add_argument(
'--trainset_size',
choices=(50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000, 250000, 500000, 1000000),
default=50,
type=int,
help='Set training set size.'
)
parser.add_argument(
'--img_size',
default=100,
type=int,
help='Set img_size for downsampling.'
)
parser.add_argument(
'--num_epochs',
default=1000,
type=int,
help='Set number of training epochs.'
)
parser.add_argument(
'--val_epoch_interval',
default=2,
type=int,
help='Set how often the validation loss is computed.'
)
parser.add_argument(
'--patience',
default=10,
type=int,
help='Patience parameter for the learning rate scheduler.'
)
parser.add_argument(
'--acceleration',
default=4.0,
type=float,
help='Undersampling of training and test inputs.'
)
parser.add_argument(
'--acceleration_total',
default=1.0,
type=float,
help='Undersampling of data available for input target split. Set to 1 for supervised training.'
)
parser.add_argument(
'--center_fraction',
default=0.08,
type=float,
help='Fraction of lines that are always sample from the center (input and target). Set to 0.0 for sampling all lines randomly.'
)
parser.add_argument(
'--fix_split',
default=True,
action='store_true',
help='Add this flag to set use_seed=True for fixed input target split for self-supervised and fixed input for supervised training.'
)
args = parser.parse_args()
with open("experiment_configs/"+args.config_file) as handle:
config_file = json.load(handle)
args.acceleration_total = config_file['acceleration_total']
args.acceleration = config_file['acceleration']
args.trainset_size = config_file['trainset_size']
args.val_epoch_interval = config_file['val_epoch_interval']
args.patience = config_file['patience']
if args.run_which_seeds == 'run_best_seed':
seeds = [config_file['best_seed']]
elif args.run_which_seeds == 'run_all_seeds':
seeds = config_file['all_seeds']
for seed in seeds:
experiment_name = f"N{args.experiment_number}_t{args.trainset_size}_"
if args.acceleration_total==1.0:
experiment_name+="sup_"
else:
experiment_name+="selfsup_"
if args.fix_split:
experiment_name+="fixInput_"
else:
experiment_name+="RandInput_"
experiment_name += f"run{seed}"
experiment_path = experiment_name+"/"
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
if args.training:
run_training(experiment_path=experiment_path,
acceleration=args.acceleration,
center_fraction=args.center_fraction,
acceleration_total=args.acceleration_total,
seed=seed,
img_size=args.img_size,
fix_split=args.fix_split,
val_epoch_interval=args.val_epoch_interval,
num_epochs=args.num_epochs,
patience=args.patience,
trainset_size=args.trainset_size,
path_to_ImageNet_train=args.path_to_ImageNet_train)
if args.testing:
run_testing(experiment_path=experiment_path,
acceleration=args.acceleration,
center_fraction=args.center_fraction,
acceleration_total=args.acceleration_total,
img_size=args.img_size,
path_to_ImageNet_train=args.path_to_ImageNet_train)
################################################################################################
def run_training(experiment_path,
acceleration,
center_fraction,
acceleration_total,
seed,
img_size,
fix_split,
val_epoch_interval,
num_epochs,
patience,
trainset_size,
path_to_ImageNet_train):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Create directory that holds train files
if not os.path.isdir(experiment_path):
os.mkdir(experiment_path)
else:
print(experiment_path)
#raise ValueError("Experiment already exists!!")
print("Warning: Experiment already exists!!")
# Init train.log file
init_logging(experiment_path)
logging.info("Training...")
# Log sanity checks on the number of lines in the input/target kspaces
input_size, target_size, overlap_size_high, size_low, p, q, mu, nu, weight_on_random_lines = compute_number_of_lines_in_input_target_kspace(p=1/acceleration,mu=1/acceleration_total,nu=center_fraction, n=img_size)
logging.info(f"mu: {mu}, p: {p}, q: {q}, nu: {nu}, weight_on_random_lines: {weight_on_random_lines}")
logging.info(f"\n Lines in kspace: {img_size} \n Lines in input: {input_size} \n Lines in target: {target_size} \n Number of high freq overlapping lines: {overlap_size_high} \n Number of low freq lines: {size_low}")
# train loss function
loss_fct = MSELoss(reduction='sum')
val_ssim_fct = SSIMLoss()
# Init model
model = Unet(
in_chans=2,
out_chans=2,
chans=24,
num_pool_layers=3,
drop_prob=0.0,).to(device)
logging.info(f"Built a model consisting of {sum(p.numel() for p in model.parameters()):,} parameters")#
# Init optimizer and scheduler
optimizer = torch.optim.Adam( params=model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, amsgrad=False)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode='max',
factor=0.1,
patience=patience,
threshold=0.0001,
threshold_mode='abs',
cooldown=0,
min_lr=1e-5,
eps=1e-08,
verbose=True
)
# If a checkpoint exists, it is automtically loaded
setup_experiment_or_load_checkpoint(experiment_path, resume_from='best', model=model, optimizer=optimizer, scheduler=scheduler)
# Load train set
train_pool = torch.load('CS_natural_images_functions/training_set_lists/trsize1000000_filepaths.pt')
zero_norm_files = ['train/n03729826/n03729826_6483.JPEG',
'train/n04515003/n04515003_24673.JPEG',
'train/n02111277/n02111277_12490.JPEG',
'train/n03888605/n03888605_9775.JPEG',
'train/n02992529/n02992529_3197.JPEG',
'train/n01930112/n01930112_18908.JPEG',
'train/n06874185/n06874185_3219.JPEG',
'train/n06785654/n06785654_17232.JPEG',
'train/n04033901/n04033901_29617.JPEG',
'train/n07920052/n07920052_14729.JPEG',
'train/n03729826/n03729826_40479.JPEG',
'train/n03729826/n03729826_10716.JPEG',
'train/n04286575/n04286575_74296.JPEG',
'train/n03937543/n03937543_10198.JPEG',
'train/n03063599/n03063599_3942.JPEG',
'train/n04152593/n04152593_13802.JPEG',
'train/n04522168/n04522168_24105.JPEG',
'train/n03532672/n03532672_78983.JPEG',
'train/n04404412/n04404412_12316.JPEG',
'train/n04330267/n04330267_18003.JPEG',
'train/n04118776/n04118776_37671.JPEG',
'train/n04591713/n04591713_3568.JPEG',
'train/n02437616/n02437616_12697.JPEG',
'train/n02799071/n02799071_54867.JPEG',
'train/n02883205/n02883205_26196.JPEG',
'train/n02667093/n02667093_2919.JPEG',
'train/n03196217/n03196217_1135.JPEG',
'train/n03196217/n03196217_3568.JPEG',
'train/n15075141/n15075141_19601.JPEG',
'train/n01943899/n01943899_24166.JPEG']
for zero_norm_file in zero_norm_files:
train_pool.remove(zero_norm_file)
rng_dataset = np.random.default_rng(seed)
if trainset_size == 1000000:
train_set = train_pool
else:
train_set = rng_dataset.choice(train_pool, size=trainset_size, replace=False, p=None)
torch.save(train_set,experiment_path+'train_set.pt')
validation_set = torch.load('CS_natural_images_functions/training_set_lists/ImageNetVal80_filepaths.pt')
# Train loader
data_transform_train = UnetDataTransform(acceleration=acceleration,acceleration_total=acceleration_total, fix_split=fix_split, experiment_path=experiment_path,center_fraction=center_fraction)
trainset = CropDataset(dataset=train_set, path_to_ImageNet_train=path_to_ImageNet_train, transform=data_transform_train, experiment_path=experiment_path, img_size=img_size)
train_loader = torch.utils.data.DataLoader(dataset=trainset, batch_size=1, num_workers=0, shuffle=True, generator=torch.Generator().manual_seed(0))
# Val loader
data_transform_val = UnetDataTransform(acceleration=acceleration,acceleration_total=acceleration_total, fix_split=True, experiment_path=experiment_path,center_fraction=center_fraction)
valset = CropDataset(dataset=validation_set, path_to_ImageNet_train=path_to_ImageNet_train, transform=data_transform_val, experiment_path=experiment_path, img_size=img_size)
val_loader = torch.utils.data.DataLoader( dataset=valset, batch_size=1, num_workers=0, shuffle=False, generator=torch.Generator().manual_seed(0))
# store training loss metrics
train_meters = {'train_L2': AverageMeter()}
train_tracks = {'train_L2': TrackMeter('decaying')}
# store validation metrics
valid_meters = {'val_SSIM' : AverageMeter(), 'val_PSNR' : AverageMeter(), 'val_L2' : AverageMeter(), 'val_L2_kspace': AverageMeter()}
valid_tracks = {'val_SSIM' : TrackMeter('increasing'), 'val_PSNR' : TrackMeter('increasing'), 'val_L2' : TrackMeter('decaying'), 'val_L2_kspace': TrackMeter('decaying')}
# Init tensorboard
writer = SummaryWriter(log_dir=experiment_path)
log_image_interval_tb = 30
break_counter=0
# Start training
for epoch in range(save_checkpoint.start_epoch, num_epochs):
train_bar = ProgressBar(train_loader, epoch)
for meter in train_meters.values():
meter.reset()
for id,sample in enumerate(train_bar):
model.train()
y_input, x_input, y_target, x_target, x, input_mask, target_mask, mean, std, fname = sample
# sanity check on number of lines in input and target mask
if epoch==0 and id==0:
tm = target_mask.detach()
target_mask_no_zeros = torch.where(tm != 0., tm , torch.tensor(1, dtype=tm.dtype).to(device))
target_mask_norm_to_one = tm / target_mask_no_zeros
logging.info(f"\n Mask sanity check! Lines in kspace: {input_mask.shape[-2]} \n Lines in input: {torch.sum(input_mask)} \n Lines in target: {torch.sum(target_mask_norm_to_one)} \n Number of all overlapping lines: {torch.sum(input_mask*target_mask_norm_to_one)}")
# prediction
x_output = torch.moveaxis(model(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output = fft2c(x_output)
# apply target mask (all ones for supervised training)
y_output = y_output * target_mask + 0.0
# compute loss
train_loss = loss_fct(y_output,y_target) / torch.sum(torch.abs(y_target)**2)
model.zero_grad()
train_loss.backward()
optimizer.step()
# log train metrics
train_meters['train_L2'].update(train_loss.item())
train_bar.log(dict(**train_meters), verbose=True)
if id ==0: # log a random train image to tensorboard
name = f"train_0_img"
add_img_to_tensorboard(writer, epoch, name, x_input.detach(),x_output.detach(),x_target.detach(),ksp=False) if epoch % log_image_interval_tb == 0 else None
name = f"train_0_ksp"
add_img_to_tensorboard(writer, epoch, name, y_input.detach(),y_output.detach(),y_target.detach(),ksp=True) if epoch % log_image_interval_tb == 0 else None
if id ==1: # log a specific train image to tensorboard
name = f"train_1_img"
add_img_to_tensorboard(writer, epoch, name, x_input.detach(),x_output.detach(),x_target.detach(),ksp=False) if epoch % log_image_interval_tb == 0 else None
name = f"train_1_ksp"
add_img_to_tensorboard(writer, epoch, name, y_input.detach(),y_output.detach(),y_target.detach(),ksp=True) if epoch % log_image_interval_tb == 0 else None
train_tracks['train_L2'].update(train_meters['train_L2'].avg,epoch)
current_lr = optimizer.param_groups[0]["lr"]
#scheduler.step()
############################################################################################################################
if epoch % val_epoch_interval == 0: # set this value such that it works with save_at_epochs and log_image_interval_tb
model.eval()
for meter in valid_meters.values():
meter.reset()
valid_bar = ProgressBar(val_loader, epoch)
rand_id = random.randint(0, len(val_loader)) # draw id to log a random slice to tensorboard
for id, sample in enumerate(valid_bar):
with torch.no_grad():
y_input, x_input, y_target, x_target, x, input_mask, target_mask, mean, std, fname = sample
# prediction
x_output = torch.moveaxis(model(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output = fft2c(x_output)
if id ==0: # log one fixed and one random validation image to tensorboard
name = f"val_0_img"
add_img_to_tensorboard(writer, epoch, name, x_input.detach(),x_output.detach(),x.detach(),ksp=False) if epoch % log_image_interval_tb == 0 else None
name = f"val_0_ksp"
add_img_to_tensorboard(writer, epoch, name, y_input.detach(),y_output.detach(),y_target.detach(),ksp=True) if epoch % log_image_interval_tb == 0 else None
elif id==rand_id: # log one fixed and one random validation image to tensorboard
name = f"val_1_img"
add_img_to_tensorboard(writer, epoch, name, x_input.detach(),x_output.detach(),x.detach(),ksp=False) if epoch % log_image_interval_tb == 0 else None
name = f"val_1_ksp"
add_img_to_tensorboard(writer, epoch, name, y_input.detach(),y_output.detach(),y_target.detach(),ksp=True) if epoch % log_image_interval_tb == 0 else None
# apply target mask (all ones for supervised training)
y_output = y_output * target_mask + 0.0
# val loss in kspace (L2)
val_loss = loss_fct(y_output,y_target) / torch.sum(torch.abs(y_target)**2)
valid_meters['val_L2_kspace'].update(val_loss)
# L2 in image domain between complex output and target image
val_loss = loss_fct(x_output,x) / torch.sum(torch.abs(x)**2)
valid_meters['val_L2'].update(val_loss)
output_magnitude = (x_output ** 2).sum(dim=-1).sqrt()
x_magnitude = (x ** 2).sum(dim=-1).sqrt() # since x is real, this operation is identity
x_magnitude = x_magnitude.unsqueeze(1)
output_magnitude = output_magnitude.unsqueeze(1)
# psnr
max_value = x.max().unsqueeze(0)
mse = torch.mean(torch.abs(output_magnitude-x_magnitude)**2)
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse)
valid_meters["val_PSNR"].update(psnr.item())
# ssim
ssim_loss = 1-val_ssim_fct(output_magnitude, x_magnitude, data_range=max_value)
valid_meters["val_SSIM"].update(ssim_loss.item())
# log progrss
valid_tracks['val_L2_kspace'].update(valid_meters['val_L2_kspace'].avg,epoch)
valid_tracks['val_L2'].update(valid_meters['val_L2'].avg,epoch)
valid_tracks['val_PSNR'].update(valid_meters['val_PSNR'].avg,epoch)
valid_tracks['val_SSIM'].update(valid_meters['val_SSIM'].avg,epoch)
valid_bar.log(dict(**valid_meters), verbose=True)
scheduler.step(valid_meters['val_PSNR'].avg)
if current_lr > optimizer.param_groups[0]["lr"]:
scheduler.patience= scheduler.patience//2
if current_lr == scheduler.min_lrs[0]:
break_counter+=1
if break_counter == 3:
break
if save_checkpoint.best_score < valid_meters['val_PSNR'].avg:
if scheduler.num_bad_epochs == 0:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, New='Highscore', Scheduler_patience='reset')))
else:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, New='Highscore')))
else:
if scheduler.num_bad_epochs == 0:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, Scheduler_patience='reset')))
else:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr)))
writer.add_scalar("lr", current_lr, epoch)
writer.add_scalar("epoch", epoch, epoch)
writer.add_scalar("train_L2", train_meters["train_L2"].avg, epoch)
for val_loss_name in valid_meters.keys():
writer.add_scalar(val_loss_name, valid_meters[val_loss_name].avg, epoch)
sys.stdout.flush()
# Save checkpoint
save_checkpoint(experiment_path, epoch, model, optimizer=optimizer, scheduler=scheduler, score=valid_meters['val_PSNR'].avg, save_at_epochs=[])
else:
logging.info(train_bar.print(dict(**train_meters, lr=current_lr)))
writer.add_scalar("lr", current_lr, epoch)
writer.add_scalar("epoch", epoch, epoch)
writer.add_scalar("train_L2", train_meters["train_L2"].avg, epoch)
sys.stdout.flush()
logging.info(f"Done training! Best Val score {valid_tracks['val_PSNR'].best_val:.5f} obtained after epoch {valid_tracks['val_PSNR'].best_count}.")
pickle.dump( valid_tracks, open(experiment_path + 'valid_tracks_metrics.pkl', "wb" ) , pickle.HIGHEST_PROTOCOL )
pickle.dump( train_tracks, open(experiment_path + 'train_tracks_metrics.pkl', "wb" ) , pickle.HIGHEST_PROTOCOL )
################################################################################################
def run_testing(experiment_path,
acceleration,
center_fraction,
acceleration_total,
img_size,
path_to_ImageNet_train):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Init train.log file
init_logging(experiment_path)
# Log sanity checks on the number of lines in the input/target kspaces
logging.info("Testing...")
input_size, target_size, overlap_size_high, size_low, p, q, mu, nu, weight_on_random_lines = compute_number_of_lines_in_input_target_kspace(p=1/acceleration,mu=1/acceleration_total,nu=center_fraction, n=img_size)
logging.info(f"mu: {mu}, p: {p}, q: {q}, nu: {nu}, weight_on_random_lines: {weight_on_random_lines}")
logging.info(f"\n Lines in kspace: {img_size} \n Lines in input: {input_size} \n Lines in target: {target_size} \n Number of high freq overlapping lines: {overlap_size_high} \n Number of low freq lines: {size_low}")
# train loss function
loss_fct = MSELoss(reduction='sum')
val_ssim_fct = SSIMLoss()
# Init model
model = Unet(
in_chans=2,
out_chans=2,
chans=24,
num_pool_layers=3,
drop_prob=0.0,).to(device)
validation_set = torch.load('CS_natural_images_functions/training_set_lists/ImageNetVal80_filepaths.pt')
test_set = torch.load('CS_natural_images_functions/training_set_lists/ImageNetTest300_filepaths.pt')
# test loader
data_transform_test = UnetDataTransform(acceleration=acceleration,acceleration_total=acceleration_total, fix_split=True, experiment_path=experiment_path,center_fraction=center_fraction)
testset = CropDataset(dataset=test_set, path_to_ImageNet_train=path_to_ImageNet_train, transform=data_transform_test, experiment_path=experiment_path, img_size=img_size)
test_loader = torch.utils.data.DataLoader( dataset=testset, batch_size=1, num_workers=0, shuffle=False, generator=torch.Generator().manual_seed(0), )
# Val loader
data_transform_val = UnetDataTransform(acceleration=acceleration,acceleration_total=acceleration_total, fix_split=True, experiment_path=experiment_path,center_fraction=center_fraction)
valset = CropDataset(dataset=validation_set, path_to_ImageNet_train=path_to_ImageNet_train, transform=data_transform_val, experiment_path=experiment_path, img_size=img_size)
val_loader = torch.utils.data.DataLoader( dataset=valset, batch_size=1, num_workers=0, shuffle=False, generator=torch.Generator().manual_seed(0), )
setup_experiment_or_load_checkpoint(experiment_path, resume_from='best', model=model, optimizer=None, scheduler=None)
test_validationSet_tracks = {'SSIM' : TrackMeter_testing(), 'PSNR' : TrackMeter_testing(), 'L2' : TrackMeter_testing(),'SSIM_dc' : TrackMeter_testing(), 'PSNR_dc' : TrackMeter_testing(), 'L2_dc' : TrackMeter_testing()}
test_testSet_tracks = {'SSIM' : TrackMeter_testing(), 'PSNR' : TrackMeter_testing(), 'L2' : TrackMeter_testing(),'SSIM_dc' : TrackMeter_testing(), 'PSNR_dc' : TrackMeter_testing(), 'L2_dc' : TrackMeter_testing()}
model.eval()
tmp=0
for data_loader, track_meter in zip([val_loader, test_loader],[test_validationSet_tracks, test_testSet_tracks]):
tmp+=1
test_bar = ProgressBar(data_loader, epoch=0)
for id, sample in enumerate(test_bar):
y_input, x_input, y_target, x_target, x, input_mask, target_mask, mean, std, fname = sample
# prediction
x_output = torch.moveaxis(model(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# Apply data consistency
y_output = fft2c(x_output)
y_output_dc = y_output * (1-input_mask) + y_input
x_output_dc = ifft2c(y_output_dc)
# L2 in image domain between complex output and target image
val_loss = loss_fct(x_output,x) / torch.sum(torch.abs(x)**2)
track_meter['L2'].update(val_loss)
val_loss_dc = loss_fct(x_output_dc,x) / torch.sum(torch.abs(x)**2)
track_meter['L2_dc'].update(val_loss_dc)
output_magnitude = (x_output ** 2).sum(dim=-1).sqrt()
output_dc_magnitude = (x_output_dc ** 2).sum(dim=-1).sqrt()
x_magnitude = (x ** 2).sum(dim=-1).sqrt() # since x is real, this operation is identity
x_magnitude = x_magnitude.unsqueeze(1)
output_magnitude = output_magnitude.unsqueeze(1)
output_dc_magnitude = output_dc_magnitude.unsqueeze(1)
# psnr
max_value = x.max().unsqueeze(0)
mse = torch.mean(torch.abs(output_magnitude-x_magnitude)**2)
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse)
track_meter["PSNR"].update(psnr.item())
mse_dc = torch.mean(torch.abs(output_dc_magnitude-x_magnitude)**2)
psnr_dc = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse_dc)
track_meter["PSNR_dc"].update(psnr_dc.item())
# ssim
ssim_loss = 1-val_ssim_fct(output_magnitude, x_magnitude, data_range=max_value)
track_meter["SSIM"].update(ssim_loss.item())
ssim_loss_dc = 1-val_ssim_fct(output_dc_magnitude, x_magnitude, data_range=max_value)
track_meter["SSIM_dc"].update(ssim_loss_dc.item())
# Save the first image in test set
if (tmp==1 and id==1) or (tmp==1 and id==22):
x_input_abs = (x_input ** 2).sum(dim=-1).sqrt()
x_input_abs = x_input_abs.unsqueeze(1)
save_test_image_with_dc(experiment_path, ground_truth_image=x_magnitude, input_img=x_input_abs, output=output_magnitude, output_image_dc=output_dc_magnitude, fname=fname, track_meter=track_meter)
pickle.dump( test_validationSet_tracks, open(experiment_path + 'test_validationSet_metrics.pkl', "wb" ) , pickle.HIGHEST_PROTOCOL )
pickle.dump( test_testSet_tracks, open(experiment_path + 'test_testSet_metrics.pkl', "wb" ) , pickle.HIGHEST_PROTOCOL )
logging.info(f"\nEvaluate validationset of length {len(val_loader)}:")
for metric in test_validationSet_tracks.keys():
logging.info(f"{metric}: avg {test_validationSet_tracks[metric].avg:.6f}, std {test_validationSet_tracks[metric].std:.6f}")
logging.info(f"\nEvaluate testset of length {len(test_loader)}:")
for metric in test_testSet_tracks.keys():
logging.info(f"{metric}: avg {test_testSet_tracks[metric].avg:.6f}, std {test_testSet_tracks[metric].std:.6f}")
if __name__ == '__main__':
read_args()
# %%
| 29,149 | 44.404984 | 278 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/progress_bar.py | from collections import OrderedDict
from numbers import Number
from tqdm import tqdm
import torch
import logging
import os
import numpy as np
def init_logging(experiment_path):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
mode = "a" if os.path.exists(experiment_path+"train.log") else "w"
handlers.append(logging.FileHandler(experiment_path+"train.log", mode=mode))
logging.basicConfig(handlers=handlers, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
self.val = val / n
self.sum += val
self.count += n
self.avg = self.sum / self.count
class TrackMeter(object):
def __init__(self, inc_or_dec='decaying'):
self.inc_or_dec = inc_or_dec
self.reset()
def reset(self):
self.val = []
self.epochs = []
self.count = 0
self.best_val = float("inf") if self.inc_or_dec=='decaying' else float("-inf")
self.best_count = 0
self.best_epoch = 0
def update(self, val, epoch):
if isinstance(val, torch.Tensor):
val = val.item()
self.val.append(val)
self.epochs.append(epoch)
if (self.inc_or_dec=='decaying' and val < self.best_val) or (self.inc_or_dec=='increasing' and val > self.best_val):
self.best_val = val
self.best_count = self.count
self.best_count = epoch
self.count += 1
class TrackMeter_testing(object):
def __init__(self,):
self.reset()
def reset(self):
self.val = []
self.avg = 0
self.std = 0
def update(self, val,):
if isinstance(val, torch.Tensor):
val = val.item()
self.val.append(val)
self.avg = np.mean(self.val)
self.std = np.std(self.val)
class ProgressBar:
def __init__(self, iterable, epoch, quiet=False):
self.epoch = epoch
self.quiet = quiet
self.prefix = f"epoch {epoch:02d}"
self.iterable = iterable if self.quiet else tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.iterable)
def log(self, stats, verbose=False):
if not self.quiet:
self.iterable.set_postfix(self.format_stats(stats, verbose), refresh=True)
def format_stats(self, stats, verbose=False):
postfix = OrderedDict(stats) # method set_postfix requires ordered_dict
for key, value in postfix.items():
if isinstance(value, Number):
fmt = "{:.6f}" if value > 0.001 else "{:.3e}"
postfix[key] = fmt.format(value)
elif isinstance(value, AverageMeter):
if verbose:
postfix[key] = f"{value.avg:.6f} ({value.val:.6f})"
else:
postfix[key] = f"{value.avg:.6f}"
elif not isinstance(postfix[key], str):
postfix[key] = str(value)
return postfix
def print(self, stats, verbose=False):
postfix = " | ".join(key + " " + value.strip() for key, value in self.format_stats(stats, verbose).items())
return f"{self.prefix + ' | ' if self.epoch is not None else ''}{postfix}"
| 3,573 | 30.910714 | 127 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/losses.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIMLoss(nn.Module):
"""
SSIM loss module.
"""
def __init__(self, win_size: int = 7, k1: float = 0.01, k2: float = 0.03):
"""
Args:
win_size: Window size for SSIM calculation.
k1: k1 parameter for SSIM calculation.
k2: k2 parameter for SSIM calculation.
"""
super().__init__()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.win_size = win_size
self.k1, self.k2 = torch.tensor(k1).to(device), torch.tensor(k2).to(device)
self.register_buffer("w", torch.ones(1, 1, win_size, win_size).to(device) / win_size ** 2)
NP = win_size ** 2
self.cov_norm = torch.tensor(NP / (NP - 1)).to(device)
def forward(self, X: torch.Tensor, Y: torch.Tensor, data_range: torch.Tensor):
assert isinstance(self.w, torch.Tensor)
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range) ** 2
C2 = (self.k2 * data_range) ** 2
ux = F.conv2d(X, self.w) # typing: ignore
uy = F.conv2d(Y, self.w) #
uxx = F.conv2d(X * X, self.w)
uyy = F.conv2d(Y * Y, self.w)
uxy = F.conv2d(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (
2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2,
)
D = B1 * B2
S = (A1 * A2) / D
return 1 - S.mean()
| 1,849 | 31.45614 | 98 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/load_save_model_helpers.py | import glob
import torch
import os
from torch.serialization import default_restore_location
import logging
def setup_experiment_or_load_checkpoint(experiment_path, resume_from='best', model=None, optimizer=None, scheduler=None):
'''
Args:
- resume_from: Either 'best' or 'some_number' where some_number could by any epoch at which a checkpoint was saved
'''
# Look for checkpoints to load from. If avalable, always load.
available_models = glob.glob(experiment_path + '*.pt')
if available_models:
restore_file = experiment_path + f"checkpoint_{resume_from}.pt"
print('restoring model..')
state_dict = torch.load(restore_file, map_location=lambda s, l: default_restore_location(s, "cpu"))
save_checkpoint.last_epoch = state_dict["best_epoch"] if resume_from=='best' else state_dict["last_epoch"]
save_checkpoint.start_epoch = state_dict["best_epoch"]+1 if resume_from=='best' else state_dict["last_epoch"]+1
save_checkpoint.best_score = state_dict["best_score"]
save_checkpoint.best_epoch = state_dict["best_epoch"]
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
if model is not None and state_dict.get("model", None) is not None:
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
if optimizer is not None and state_dict.get("optimizer", None) is not None:
for o, state in zip(optimizer, state_dict["optimizer"]):
o.load_state_dict(state)
if scheduler is not None and state_dict.get("scheduler", None) is not None:
for s, state in zip(scheduler, state_dict["scheduler"]):
s.load_state_dict(state)
logging.info("Loaded checkpoint {} with best_epoch {} last_epoch {}".format(restore_file, save_checkpoint.best_epoch, save_checkpoint.last_epoch))
else:
print("No checkpoint to load. Start training from scratch.")
save_checkpoint.best_epoch = -1
save_checkpoint.last_epoch = 0
save_checkpoint.start_epoch = 0
save_checkpoint.best_score = float("-inf")
def save_checkpoint(experiment_path, epoch, model, optimizer=None, scheduler=None, score=None, save_at_epochs=None):
''''
Args:
-
'''
save_checkpoint.last_epoch = epoch
best_score = save_checkpoint.best_score
if score > best_score:
save_checkpoint.best_epoch = epoch
save_checkpoint.best_score = score
if score > best_score:
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"last_epoch": save_checkpoint.last_epoch,
"best_epoch": save_checkpoint.best_epoch,
"best_score": save_checkpoint.best_score,
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
}
torch.save(state_dict, os.path.join(experiment_path + "checkpoint_best.pt"))
if save_at_epochs:
if epoch in save_at_epochs:
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"last_epoch": save_checkpoint.last_epoch, #set
"best_epoch": save_checkpoint.best_epoch, #set
"best_score": getattr(save_checkpoint, "best_score", None), #set
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
}
torch.save(state_dict, os.path.join(experiment_path + f"checkpoint{epoch}.pt"))
| 4,690 | 45.445545 | 154 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int,
out_chans: int,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1), # here is the only conv layer with a bias
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
| 6,021 | 31.907104 | 113 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/fftc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import List, Optional
import torch
import torch.fft # type: ignore
def fft2c(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
# Helper functions
def roll_one_dim(x: torch.Tensor, shift: int, dim: int) -> torch.Tensor:
"""
Similar to roll but for only one dim.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def roll(
x: torch.Tensor,
shift: List[int],
dim: List[int],
) -> torch.Tensor:
"""
Similar to np.roll but applies to PyTorch Tensors.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
if len(shift) != len(dim):
raise ValueError("len(shift) must match len(dim)")
for (s, d) in zip(shift, dim):
x = roll_one_dim(x, s, d)
return x
def fftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to fftshift.
Returns:
fftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = x.shape[dim_num] // 2
return roll(x, shift, dim)
def ifftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to ifftshift.
Returns:
ifftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = (x.shape[dim_num] + 1) // 2
return roll(x, shift, dim)
| 4,108 | 23.753012 | 80 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/data_transforms.py | import numpy as np
import torch
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import torchvision.transforms as transforms
import PIL.Image as Image
from CS_natural_images_functions.log_progress_helpers import save_figure
from CS_natural_images_functions.fftc import fft2c, ifft2c
class CropDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset that provides access to cropped images from ImageNet.
"""
def __init__(
self,
dataset: List,
path_to_ImageNet_train: str,
transform: Callable,
experiment_path: str,
img_size: int,
):
self.transform = transform
self.experiment_path = experiment_path
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.examples = []
load_transform = transforms.Compose([
transforms.CenterCrop(img_size),
transforms.ToTensor(),
])
for datapath in dataset:
image = Image.open(path_to_ImageNet_train+datapath).convert("L")
filename = datapath[16:-5]
self.examples.append((load_transform(image)[0].to(device),filename))
def __len__(self):
return len(self.examples)
def __getitem__(self, i: int):
# Determine input, target and ground truth
x,filename = self.examples[i]
sample = self.transform(x,filename,i)
return sample
class UnetDataTransform:
def __init__(
self,
acceleration,
acceleration_total,
fix_split,
experiment_path,
center_fraction,
):
self.acceleration = acceleration
self.acceleration_total = acceleration_total
self.fix_split = fix_split
self.experiment_path = experiment_path
self.center_fraction = center_fraction
def __call__(
self,
x: np.ndarray,
fname: str,
id: int,
) -> Tuple[torch.Tensor,torch.Tensor]:
"""
Args:
Returns:
tuple containing:
x_input: zero-filled coarse reconstruction in image domain
y_target: the fully sampled kspace
x: the ground truth image
input_mask: undersampled input mask
target_mask: in the case of supervised training all ones
"""
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
n = x.shape[-1]
# transform x to a tensor with real channel and complex channel. Right now the complex channel is all zerors.
#x = np.stack((x, np.zeros_like(x)), axis=-1)
x = torch.stack((x, torch.zeros_like(x)), axis=-1)
#x = torch.from_numpy(x)
# obtain kspace
y = fft2c(x)
#save_figure(y[:,:,0],"y_real",self.experiment_path) if id==0 else None
#save_figure(y[:,:,1],"y_imag",self.experiment_path) if id==0 else None
#######################################
# sample input mask
nu = self.center_fraction
p = 1/self.acceleration
mu = 1/self.acceleration_total
q = (mu-p+nu-mu*nu)/(1-p)
# 1. Determine the set S_low consisting of the indices of the nu*n many center frequencies which are always sampled
size_low = int(round(n*nu))
pad = (n - size_low + 1) // 2
# set of indices of all lines in kspace
S_all = np.arange(n)
S_low = S_all[pad : pad + size_low]
# 1.1 Determine S_mu_high, i.e, S_mu without S_low, so only the random high frequencies
# set of indices of all high frequencies
S_high = np.hstack((S_all[: pad],S_all[pad + size_low :]))
S_mu_size_high = int(round((mu-nu)*n))
S_p_size_high = int(round((p-nu)*n))
#### Depending on whether the input/target split is fixed or re-sampled, the order of sampling needs to be adapted
# This is so that validation during training samples the same input mask as during testing
# Recall that during testing selfsup=False, hence S_mu_high is not sampled.
seed = tuple(map(ord, fname))
rng = np.random.default_rng(seed)
if self.fix_split:
# If split is fixed, first sample S_p_high and then additional lines for S_mu_high
# such that the set S_p_high is the same as if we would sample for selfsup=False
S_p_high = rng.choice(S_high, size=S_p_size_high, replace=False, p=None)
S_mu_size_high_remainding = S_mu_size_high - S_p_size_high
S_high_remainding = np.array(list(set(S_high)-set(S_p_high)))
S_q_high = rng.choice(S_high_remainding, size=S_mu_size_high_remainding, replace=False, p=None)
else:
# If split is random, first sample S_mu_high such that this set is always fixed.
S_mu_high = rng.choice(S_high, size=S_mu_size_high, replace=False, p=None)
# 2. From S_mu_high sample the set S_p_high of size (p-nu)n
S_p_high = np.random.choice(S_mu_high, size=S_p_size_high, replace=False, p=None)
# 3. All other indices in S_mu_high add to the set S_q_high
S_q_high = np.array(list(set(S_mu_high)-set(S_p_high)))
# 4. Determine the size of the overlap between S_p_high and S_q_high, sample this many indices from S_p_high and add them to S_q_high
overlap_size_high = int(round(( (p-nu) / (1-nu) ) * ( (q-nu) / (1-nu) ) *(n-n*nu)))
S_overlap = S_p_high[0:overlap_size_high]
S_q_high = np.concatenate([S_q_high,S_overlap])
# 5. Define the final input and target masks by setting entries to zero or to one for S_p=S_low+S_p_high and S_q=S_low+S_q_high
input_mask = np.zeros(n)
input_mask[S_low] = 1.0
input_mask[S_p_high] = 1.0
input_mask = torch.from_numpy(input_mask.astype(np.float32)).unsqueeze(0).unsqueeze(-1).to(device)
# 6. Create a target mask where the random entries are weighted
weight_on_random_lines = np.sqrt((1-nu)/(q-nu))
target_mask = np.zeros(n)
target_mask[S_low] = 1.0
target_mask[S_q_high] = weight_on_random_lines
target_mask = torch.from_numpy(target_mask.astype(np.float32)).unsqueeze(0).unsqueeze(-1).to(device)
#######################################
# apply mask to kspace
y_input = y * input_mask + 0.0
#save_figure(y_input[:,:,0],"y_input_real",self.experiment_path) if id==0 else None
#save_figure(y_input[:,:,1],"y_input_imag",self.experiment_path) if id==0 else None
# compute zero-filed coarse reconstruction as input
x_input = ifft2c(y_input)
#save_figure(x_input[:,:,0],"x_input_real",self.experiment_path) if id==0 else None
#save_figure(x_input[:,:,1],"x_input_imag",self.experiment_path) if id==0 else None
mean = x_input.mean(dim=[0,1],keepdim=True)
std = x_input.std(dim=[0,1],keepdim=True)
x_input = (x_input - mean) / (std + 1e-11)
# training target. target_mask is all ones if supervised training
y_target = y * target_mask + 0.0
# training target in image domain
x_target = ifft2c(y_target)
return y_input, x_input, y_target, x_target, x, input_mask, target_mask, mean, std, fname
def compute_number_of_lines_in_input_target_kspace(p,mu,nu,n=160):
q = (mu-p+nu-mu*nu)/(1-p)
size_low = int(round(n*nu))
S_p_size_high = int(round((p-nu)*n))
S_mu_size_high = int(round((mu-nu)*n))
S_mu_size_high_remainding = S_mu_size_high - S_p_size_high
overlap_size_high = int(round(( (p-nu) / (1-nu) ) * ( (q-nu) / (1-nu) ) *(n-n*nu)))
input_size = size_low + S_p_size_high
target_size = size_low + S_mu_size_high_remainding + overlap_size_high
weight_on_random_lines = np.sqrt((1-nu)/(q-nu))
return input_size, target_size, overlap_size_high, size_low, p, q, mu, nu, weight_on_random_lines
| 8,047 | 36.432558 | 141 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/log_progress_helpers.py | import numpy as np
import matplotlib.pyplot as plt
from typing import Dict, Optional, Sequence, Tuple, Union, List
import os
import torchvision
import io
import torch
from CS_natural_images_functions.losses import SSIMLoss
def complex_abs(data: torch.Tensor) -> torch.Tensor:
"""
Compute the absolute value of a complex valued input tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1).sqrt()
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(figure)
buf.seek(0)
frameTensor = torch.tensor(np.frombuffer(buf.getvalue(), dtype=np.uint8), device='cpu')
image = torchvision.io.decode_png(frameTensor)
return image
def get_figure(image,figsize,title):
"""Return a matplotlib figure of a given image."""
if len(image.shape) != 3:
raise ValueError("Image dimensions not suitable for logging to tensorboard.")
if image.shape[0] == 1 or image.shape[0] == 3:
image = np.rollaxis(image,0,3)
# Create a figure to contain the plot.
if figsize:
figure = plt.figure(figsize=figsize)
else:
figure = plt.figure()
# Start next subplot.
plt.subplot(1, 1, 1, title=title)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap='gray')
figure.tight_layout()
return figure
def plot_figure(
x: np.array,):
""""
x must have dimension height,width
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(x,'gray')
ax.axis('off')
#ax.set_title(title,fontsize=10)
fig.tight_layout()
plt.show()
def save_figure(
x: np.array,
figname: str,
experiment_path: str,
save: Optional[bool]=True,):
""""
x must have dimension height,width
"""
if save:
save_path = experiment_path + 'train_figures/'
if not os.path.isdir(save_path):
os.mkdir(save_path)
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111)
ax.imshow(x,'gray')
ax.axis('off')
#ax.set_title(title,fontsize=10)
fig.tight_layout()
plt.savefig(save_path + figname + ".png")
plt.close(fig)
def save_test_image_with_dc(experiment_path, ground_truth_image, input_img, output, output_image_dc, fname, track_meter):
save_path = experiment_path + 'test_figures/'
if not os.path.isdir(save_path):
os.mkdir(save_path)
error = torch.abs(ground_truth_image - output)
error_dc = torch.abs(ground_truth_image - output_image_dc)
output = output - output.min()
output = output / output.max()
output_image_dc = output_image_dc - output_image_dc.min()
output_image_dc = output_image_dc / output_image_dc.max()
ground_truth_image = ground_truth_image - ground_truth_image.min()
ground_truth_image = ground_truth_image / ground_truth_image.max()
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
error = error - error.min()
error_dc = error_dc - error_dc.min()
max_norm = torch.stack([error,error_dc]).max()
error = error / max_norm
error_dc = error_dc / max_norm
image = torch.cat([ground_truth_image, input_img, output, output_image_dc, error, error_dc], dim=0)
image = torchvision.utils.make_grid(image, nrow=2, normalize=False, value_range=(0,1), pad_value=1)
ssim_score = track_meter["SSIM"].val[-1]
ssim_score_dc = track_meter["SSIM_dc"].val[-1]
psnr_score = track_meter["PSNR"].val[-1]
psnr_score_dc = track_meter["PSNR_dc"].val[-1]
figure = get_figure(image.cpu().numpy(),figsize=(8,12),title=f"ssim={ssim_score:.4f}, dc={ssim_score_dc:.4f}, psnr={psnr_score:.3f}, dc={psnr_score_dc:.3f}")
plt.savefig(experiment_path + 'test_figures/' + f"{fname[0]}.png", dpi='figure')
plt.close()
def add_img_to_tensorboard(writer, epoch, name, input_img_comp,output_comp,targetcomp,ksp):
if ksp:
input_img = torch.log(complex_abs(input_img_comp)[0]+ 1e-9)
output = torch.log(complex_abs(output_comp)[0]+ 1e-9)
target = torch.log(complex_abs(targetcomp)[0]+ 1e-9)
else:
input_img = complex_abs(input_img_comp)[0]
output = complex_abs(output_comp)[0]
target = complex_abs(targetcomp)[0]
val_ssim_fct = SSIMLoss()
max_value = target.max().unsqueeze(0)
ssim_loss = 1-val_ssim_fct(output.unsqueeze(0).unsqueeze(0), target.unsqueeze(0).unsqueeze(0), data_range=max_value)
error = torch.abs(target - output)
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
output = output - output.min()
output = output / output.max()
target = target - target.min()
target = target / target.max()
error = error - error.min()
error = error / error.max()
image = torch.cat([input_img, target, output, error], dim=0)
image = torchvision.utils.make_grid(image, nrow=1, normalize=False)
figure = get_figure(image.cpu().numpy(),figsize=(3,12),title=f"ssim={ssim_loss.item():.6f}")
writer.add_image(name+"_abs", plot_to_image(figure), epoch)
if ksp:
input_img = torch.log(torch.abs(input_img_comp[0,:,:,0])+ 1e-9)
#input_img_max = torch.max(torch.stack((torch.log(torch.abs(input_img_comp[0,:,:,0])+ 1e-9),torch.log(torch.abs(input_img_comp[0,:,:,1])+ 1e-9))))
output = torch.log(torch.abs(output_comp[0,:,:,0])+ 1e-9)
#output_max = torch.max(torch.stack((torch.log(torch.abs(output_comp[0,:,:,0])+ 1e-9),torch.log(torch.abs(output_comp[0,:,:,1])+ 1e-9))))
target = torch.log(torch.abs(targetcomp[0,:,:,0])+ 1e-9)
#target_max = torch.max(torch.stack((torch.log(torch.abs(output_comp[0,:,:,0])+ 1e-9),torch.log(torch.abs(output_comp[0,:,:,1])+ 1e-9))))
else:
input_img = input_img_comp[0,:,:,0]
output = output_comp[0,:,:,0]
target = targetcomp[0,:,:,0]
val_ssim_fct = SSIMLoss()
max_value = target.max().unsqueeze(0)
ssim_loss = 1-val_ssim_fct(output.unsqueeze(0).unsqueeze(0), target.unsqueeze(0).unsqueeze(0), data_range=max_value)
error = torch.abs(target - output)
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
output = output - output.min()
output = output / output.max()
target = target - target.min()
target = target / target.max()
error = error - error.min()
error = error / error.max()
image = torch.cat([input_img, target, output, error], dim=0)
image = torchvision.utils.make_grid(image, nrow=1, normalize=False)
figure = get_figure(image.cpu().numpy(),figsize=(3,12),title=f"ssim={ssim_loss.item():.6f}")
writer.add_image(name+"_re", plot_to_image(figure), epoch)
if ksp:
input_img = torch.log(torch.abs(input_img_comp[0,:,:,1])+ 1e-9)
output = torch.log(torch.abs(output_comp[0,:,:,1])+ 1e-9)
target = torch.log(torch.abs(targetcomp[0,:,:,1])+ 1e-9)
else:
input_img = input_img_comp[0,:,:,1]
output = output_comp[0,:,:,1]
target = targetcomp[0,:,:,1]
val_ssim_fct = SSIMLoss()
max_value = target.max().unsqueeze(0)
ssim_loss = 1-val_ssim_fct(output.unsqueeze(0).unsqueeze(0), target.unsqueeze(0).unsqueeze(0), data_range=max_value)
error = torch.abs(target - output)
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
output = output - output.min()
output = output / output.max()
target = target - target.min()
target = target / target.max()
error = error - error.min()
error = error / error.max()
image = torch.cat([input_img, target, output, error], dim=0)
image = torchvision.utils.make_grid(image, nrow=1, normalize=False)
figure = get_figure(image.cpu().numpy(),figsize=(3,12),title=f"ssim={ssim_loss.item():.6f}")
writer.add_image(name+"_im", plot_to_image(figure), epoch)
plt.close()
| 8,395 | 35.663755 | 162 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/main.py |
#################
# Import python packages
import torch
import logging
import time
from torch.utils.tensorboard import SummaryWriter
import sys
import os
from torch.serialization import default_restore_location
from collections import defaultdict
import numpy as np
import torchvision
import pickle
import matplotlib.pyplot as plt
from packaging import version
from torch.nn import L1Loss, MSELoss
from functions.math import complex_abs, complex_mul, complex_conj
from functions.data.transforms import center_crop_to_smallest, normalize_to_given_mean_std
if version.parse(torch.__version__) >= version.parse("1.7.0"):
from functions.fftc import fft2c_new as fft2c
from functions.fftc import ifft2c_new as ifft2c
else:
from functions.fftc import fft2c_old as fft2c
from functions.fftc import ifft2c_old as ifft2c
# Implementation of SSIMLoss
from functions.training.losses import SSIMLoss
from functions.training.debug_helper import print_tensor_stats, save_figure
# Set seeds, create directories, set path to checkpoints if available
from functions.train_utils import setup_experiment
from functions.train_utils import load_checkpoint,save_checkpoint, init_logging
# Function that returns a MaskFunc object
from functions.data.subsample import create_mask_for_mask_type
from functions.data.transforms import UnetDataTransform
from functions.data.mri_dataset import SliceDataset
from functions.models.unet import Unet
# Create scheduler and optimizer objects
from functions.training.training_functions import configure_optimizers, Compute_batch_train_loss
# Class that allows to track the average of some quantity over an epoch
from functions.training.meters import AverageMeter
# Gives a customized tqdm object that can be used as iterable instead of train_loader
from functions.training.progress_bar import ProgressBar
# Functions to log images with a header to tensorboard
from functions.log_save_image_utils import plot_to_image, get_figure
def add_img_to_tensorboard(writer, epoch, name, input_img, target, output, val_ssim_fct, max_value, crop):
output, _ = center_crop_to_smallest(output, target)
input_img, _ = center_crop_to_smallest(input_img, target)
# Normalize output to mean and std of target
#target, output = normalize_to_given_mean_std(target, output)
ssim_loss = 1-val_ssim_fct(output, target, data_range=max_value)
error = torch.abs(target - output)
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
output = output - output.min()
output = output / output.max()
target = target - target.min()
target = target / target.max()
error = error - error.min()
error = error / error.max()
image = torch.cat([input_img, target, output, error], dim=0)
image = torchvision.utils.make_grid(image, nrow=1, normalize=False)
if crop:
figure = get_figure(image.cpu().numpy(),figsize=(3,12),title=f"ssim={ssim_loss.item():.6f}")
else:
figure = get_figure(image.cpu().numpy(),figsize=(3,20),title=f"ssim={ssim_loss.item():.6f}")
writer.add_image(name, plot_to_image(figure), epoch)
def main_train(hp_exp):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# ------------
# setup:
# Set seeds, create directories, set path to checkpoints if available
# ------------
hp_exp = setup_experiment(hp_exp)
init_logging(hp_exp)
writer = SummaryWriter(log_dir=hp_exp['log_path']) if hp_exp['tb_logging'] else None
# Get list of filenames logged to tensorboard during validation (from the validation set)
val_log_filenames_list = []
for k in hp_exp['log_val_images'].keys():
val_log_filenames_list.append(k)
train_log_filenames_list = []
for k in hp_exp['log_train_images'].keys():
train_log_filenames_list.append(k)
mode_lookup = {
'SSIM' : 'max',
'PSNR' : 'max',
'L1' : 'min',
'L2' : 'min',
'MSE' : 'min',
'L2_kspace' : 'min',
'L1_kspace' : 'min',
}
# ------------
# data
# ------------
mask_func = create_mask_for_mask_type(
hp_exp['mask_type'], hp_exp['selfsup'], hp_exp['center_fraction'], hp_exp['acceleration'], hp_exp['acceleration_total']
)
data_transform_train = UnetDataTransform(hp_exp['challenge'],mask_func=mask_func, use_seed=hp_exp['use_mask_seed_for_training'], hp_exp=hp_exp,mode="train")
data_transform_val = UnetDataTransform(hp_exp['challenge'],mask_func=mask_func, use_seed=True, hp_exp=hp_exp,mode="val")
def _init_fn(worker_id):
np.random.seed(12 + worker_id)
trainset = SliceDataset(
dataset=hp_exp['train_set'],
path_to_dataset=hp_exp['data_path'],
path_to_sensmaps=hp_exp['smaps_path'],
provide_senmaps=hp_exp['provide_senmaps'],
challenge=hp_exp['challenge'],
transform=data_transform_train,
use_dataset_cache=True,
)
train_loader = torch.utils.data.DataLoader(
dataset=trainset,
batch_size=hp_exp['batch_size'],
num_workers=hp_exp['num_workers'],
shuffle=True,
generator=torch.Generator().manual_seed(hp_exp['seed']),
pin_memory =True,
)
valset = SliceDataset(
dataset=hp_exp['val_set'],
path_to_dataset=hp_exp['data_path'],
path_to_sensmaps=hp_exp['smaps_path'],
provide_senmaps=hp_exp['provide_senmaps'],
challenge=hp_exp['challenge'],
transform=data_transform_val,
use_dataset_cache=True,
)
val_loader = torch.utils.data.DataLoader(
dataset=valset,
batch_size=1,
num_workers=hp_exp['num_workers'],
shuffle=False,
generator=torch.Generator().manual_seed(hp_exp['seed']),
)
# ------------
# model
# ------------
if hp_exp['two_channel_imag_real']:
in_chans = 2
else:
in_chans = 1
model = Unet(
in_chans=in_chans,
out_chans=in_chans,
chans=hp_exp['chans'],
num_pool_layers=hp_exp['num_pool_layers'],
drop_prob=0.0,
).to(device)
logging.info(f"Built a model consisting of {sum(p.numel() for p in model.parameters()):,} parameters")#
# ------------
# trainer
# ------------
optimizer, scheduler = configure_optimizers(hp_exp, model.parameters())
compute_batch_train_loss = Compute_batch_train_loss()
train_meters = {'train_' + name: AverageMeter() for name in (hp_exp['loss_functions'])}
if len(hp_exp['loss_functions']) > 1:
train_meters['cumulated_loss'] = AverageMeter()
train_meters['train_L2_gt_abs'] = AverageMeter()
valid_meters = {'val_SSIM' : AverageMeter(), 'val_PSNR' : AverageMeter(), 'val_L1' : AverageMeter(), 'val_L2' : AverageMeter(),
'val_L2_kspace': AverageMeter(), 'val_L2_gt_abs': AverageMeter()}
if hp_exp['two_channel_imag_real']:
train_meters['train_L2_gt_comp'] = AverageMeter()
valid_meters['val_L2_gt_comp'] = AverageMeter()
val_ssim_fct = SSIMLoss()
val_l1_fct = L1Loss(reduction='sum')
val_mse_fct = MSELoss()
val_mse_reduceSum_fct = MSELoss(reduction='sum')
# ------------
# load a stored model if available
# ------------
if hp_exp['restore_file']:
load_checkpoint(hp_exp, model, optimizer, scheduler)
#########
# Training
save_train_figures = True
save_val_figures = False
mask_dict = {}
for epoch in range(save_checkpoint.start_epoch, hp_exp['num_epochs']):
start = time.process_time()
train_bar = ProgressBar(train_loader, epoch)
for meter in train_meters.values():
meter.reset()
for batch_id, batch in enumerate(train_bar):
hp_exp['mode'] = 'train'
model.train()
save_checkpoint.global_step +=1
binary_background_mask, input_image, input_kspace, input_mask, target_image, target_kspace, target_mask, target_mask_weighted, ground_truth_image, sens_maps, mean, std, fname, slice_num = batch
input_image=input_image.to(device)
target_image=target_image.to(device)
target_kspace=target_kspace.to(device)
input_kspace=input_kspace.to(device)
input_mask=input_mask.to(device)
target_mask=target_mask.to(device)
target_mask_weighted=target_mask_weighted.to(device)
ground_truth_image=ground_truth_image.to(device)
sens_maps=sens_maps.to(device)
mean=mean.to(device)
std=std.to(device)
binary_background_mask=binary_background_mask.to(device)
output = model(input_image)
output = output * std + mean
output_tensorboard = output.detach().clone()
################################
# Compute the training loss
################################
if hp_exp['selfsup'] or hp_exp['compute_sup_loss_in_kspace']:
# move complex dim to end
output_per_coil_imgs = torch.moveaxis(output , 1, -1 )
output_per_coil_imgs = complex_mul(output_per_coil_imgs, sens_maps)
# Transform coil images to kspace
output_kspace = fft2c(output_per_coil_imgs)
output_kspace = output_kspace * target_mask_weighted + 0.0
target_kspace = target_kspace * target_mask_weighted + 0.0
output_train_loss = output_kspace
target_train_loss = target_kspace
else:
output_train_loss = output
target_train_loss = target_image
# Use max value per ground truth slice instead of per volume to compute ssim and psnr in image domain
max_value = ground_truth_image.max().unsqueeze(0)
train_loss = compute_batch_train_loss.get_batch_train_loss(hp_exp, output_train_loss, target_train_loss, max_value, train_meters)
model.zero_grad()
train_loss.backward()
optimizer.step()
################################
# Compute train metrics that can be compared over all different setupts
################################
# Apply center cropping to outputs if necessary
output_train_metrics, _ = center_crop_to_smallest(output_tensorboard, ground_truth_image)
target_image_train_metrics, _ = center_crop_to_smallest(target_image, ground_truth_image)
# Apply binary masking to outputs if binary masks are given
binary_background_mask, _ = center_crop_to_smallest(binary_background_mask, ground_truth_image)
output_train_metrics = output_train_metrics * binary_background_mask
if hp_exp['two_channel_imag_real']:
# target_image is already masked (if possible)
loss = val_mse_reduceSum_fct(output_train_metrics, target_image_train_metrics) / torch.sum(torch.abs(target_image_train_metrics)**2)
train_meters["train_L2_gt_comp"].update(loss.item())
# prepare for train_L2_gt_abs
output_train_metrics = complex_abs(torch.moveaxis(output_train_metrics , 1, -1 )).unsqueeze(1)
loss = val_mse_reduceSum_fct(output_train_metrics, ground_truth_image) / torch.sum(torch.abs(ground_truth_image)**2)
train_meters["train_L2_gt_abs"].update(loss.item())
train_bar.log(dict(**train_meters), verbose=True)
################################
# Log some training images to tensorboard
################################
if hp_exp['tb_logging'] and fname[0] in train_log_filenames_list and epoch % hp_exp['log_image_interval'] == 0:
if slice_num.item() == hp_exp['log_train_images'][fname[0]]:
with torch.no_grad():
if hp_exp['two_channel_imag_real']:
crop = False
name = f"train_{fname[0]}_s{slice_num.item()}_ch1/"+hp_exp['exp_name']
inp = input_image[:,0,:,:].unsqueeze(1)
tar = target_image[:,0,:,:].unsqueeze(1)
out = output_tensorboard[:,0,:,:].unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, target_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
name = f"train_{fname[0]}_s{slice_num.item()}_ch2/"+hp_exp['exp_name']
inp = input_image[:,1,:,:].unsqueeze(1)
tar = target_image[:,1,:,:].unsqueeze(1)
out = output_tensorboard[:,1,:,:].unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, target_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
name = f"train_{fname[0]}_s{slice_num.item()}_abs/"+hp_exp['exp_name']
inp = complex_abs(torch.moveaxis(input_image , 1, -1 )).unsqueeze(1)
tar = complex_abs(torch.moveaxis(target_image , 1, -1 )).unsqueeze(1)
out = complex_abs(torch.moveaxis(output_tensorboard , 1, -1 )).unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, target_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
else:
name = f"train_{fname[0]}_s{slice_num.item()}_abs/"+hp_exp['exp_name']
add_img_to_tensorboard(writer, epoch, name, input_image, target_image, output_tensorboard, val_ssim_fct, max_value, crop=True)
if epoch % hp_exp['val_interval'] == 0:
hp_exp['mode'] = 'val'
model.eval()
for meter in valid_meters.values():
meter.reset()
valid_bar = ProgressBar(val_loader, epoch)
for sample_id, sample in enumerate(valid_bar):
with torch.no_grad():
binary_background_mask, input_image, input_kspace, input_mask, target_image, target_kspace, target_mask, target_mask_weighted, ground_truth_image, sens_maps, mean, std, fname, slice_num = sample
input_image=input_image.to(device)
input_kspace=input_kspace.to(device)
input_mask=input_mask.to(device)
target_image=target_image.to(device)
target_kspace=target_kspace.to(device)
target_mask=target_mask.to(device)
target_mask_weighted=target_mask_weighted.to(device)
ground_truth_image=ground_truth_image.to(device)
sens_maps=sens_maps.to(device)
mean=mean.to(device)
std=std.to(device)
binary_background_mask=binary_background_mask.to(device)
output = model(input_image)
output = output * std + mean
###############################################################
# Validation L1 and L2 are computed as during training
if hp_exp['selfsup'] or hp_exp['compute_sup_loss_in_kspace']:
# move complex dim to end
output_per_coil_imgs = torch.moveaxis(output , 1, -1 )
output_per_coil_imgs = complex_mul(output_per_coil_imgs, sens_maps)
# Transform coil images to kspace
output_kspace = fft2c(output_per_coil_imgs)
output_kspace_fully_sampled = output_kspace.clone()
output_kspace = output_kspace * target_mask_weighted + 0.0
target_kspace = target_kspace * target_mask_weighted + 0.0
L2kspace = val_mse_reduceSum_fct(output_kspace, target_kspace) / torch.sum(torch.abs(target_kspace)**2)
valid_meters["val_L2_kspace"].update(L2kspace.item())
# L1 and L2 validation are computed on the full images without cropping and complex absolute value and without masking or dc
# L1 validation loss
loss = val_l1_fct(output, target_image) / torch.sum(torch.abs(target_image))
valid_meters["val_L1"].update(loss.item())
# L2 validation loss
loss = val_mse_reduceSum_fct(output, target_image) / torch.sum(torch.abs(target_image)**2)
valid_meters["val_L2"].update(loss.item())
else:
if hp_exp['two_channel_imag_real']:
# To enable data consistency later on
# move complex dim to end
output_per_coil_imgs = torch.moveaxis(output , 1, -1 )
output_per_coil_imgs = complex_mul(output_per_coil_imgs, sens_maps)
# Transform coil images to kspace
output_kspace = fft2c(output_per_coil_imgs)
#save_figure(torch.log(complex_abs(output_kspace[0,0,:,:]) + 1e-9).detach().cpu(),'output_kspace_val',hp_exp,save=save_val_figures)
output_kspace_fully_sampled = output_kspace.clone()
L2kspace = val_mse_reduceSum_fct(output_kspace, target_kspace) / torch.sum(torch.abs(target_kspace)**2)
valid_meters["val_L2_kspace"].update(L2kspace.item())
# L1 and L2 validation are computed on the full images without cropping and complex absolute value and without masking or dc
# L1 validation loss
loss = val_l1_fct(output, target_image) / torch.sum(torch.abs(target_image))
valid_meters["val_L1"].update(loss.item())
# L2 validation loss
loss = val_mse_reduceSum_fct(output, target_image) / torch.sum(torch.abs(target_image)**2)
valid_meters["val_L2"].update(loss.item())
###############################################################
###############################################################
# Validation PSNR and SSIM are computed on masked, cropped and real images
# Apply masking before computing scores in the image domain in order to eliminate artifacts in the background
output = output * binary_background_mask
output_tensorboard = output.clone()
# PSNR and SSIM are computed on the center cropped magnitude reconstruction
output, _ = center_crop_to_smallest(output, ground_truth_image)
if hp_exp['two_channel_imag_real']:
target_image_train_metrics, _ = center_crop_to_smallest(target_image, ground_truth_image)
loss = val_mse_reduceSum_fct(output, target_image_train_metrics) / torch.sum(torch.abs(target_image_train_metrics)**2)
valid_meters["val_L2_gt_comp"].update(loss.item())
# Move complex dim to end, apply complex abs, insert channel dimension
output = complex_abs(torch.moveaxis(output , 1, -1 ))
output = output.unsqueeze(1)
# Use max value per ground truth slice instead of per volume
max_value = ground_truth_image.max().unsqueeze(0)
# SSIM
ssim_loss = 1-val_ssim_fct(output, ground_truth_image, data_range=max_value)
valid_meters["val_SSIM"].update(ssim_loss.item())
loss = val_mse_reduceSum_fct(output, ground_truth_image) / torch.sum(torch.abs(ground_truth_image)**2)
valid_meters["val_L2_gt_abs"].update(loss.item())
# MSE for PSNR
loss = val_mse_fct(output, ground_truth_image) # reduce with mean
# PSNR
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(loss)
valid_meters["val_PSNR"].update(psnr.item())
valid_bar.log(dict(**valid_meters), verbose=True)
if hp_exp['tb_logging'] and fname[0] in val_log_filenames_list and epoch % hp_exp['log_image_interval'] == 0:
if slice_num.item() == hp_exp['log_val_images'][fname[0]]:
if hp_exp['two_channel_imag_real']:
crop = False
name = f"val_{fname[0]}_s{slice_num.item()}_ch1/"+hp_exp['exp_name']
inp = input_image[:,0,:,:].unsqueeze(1)
tar = target_image[:,0,:,:].unsqueeze(1)
out = output_tensorboard[:,0,:,:].unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, ground_truth_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
name = f"val_{fname[0]}_s{slice_num.item()}_ch2/"+hp_exp['exp_name']
inp = input_image[:,1,:,:].unsqueeze(1)
tar = target_image[:,1,:,:].unsqueeze(1)
out = output_tensorboard[:,1,:,:].unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, ground_truth_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
name = f"val_{fname[0]}_s{slice_num.item()}_abs/"+hp_exp['exp_name']
inp = complex_abs(torch.moveaxis(input_image , 1, -1 )).unsqueeze(1)
tar = complex_abs(torch.moveaxis(target_image , 1, -1 )).unsqueeze(1)
out = complex_abs(torch.moveaxis(output_tensorboard , 1, -1 )).unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, ground_truth_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
else:
name = f"val_{fname[0]}_s{slice_num.item()}_abs/"+hp_exp['exp_name']
add_img_to_tensorboard(writer, epoch, name, input_image, target_image, output_tensorboard, val_ssim_fct, max_value, crop=True)
if hp_exp['two_channel_imag_real']:
val_metric_dict = { #keys should have the same name as the keys used to pick a training loss in hp_exp['loss_functions']
'SSIM' : valid_meters['val_SSIM'].avg,
'L1' : valid_meters['val_L1'].avg,
'L2' : valid_meters['val_L2'].avg,
'PSNR' : valid_meters['val_PSNR'].avg,
'L2_kspace' : valid_meters['val_L2_kspace'].avg,
'L2_gt_abs' : valid_meters["val_L2_gt_abs"].avg,
'L2_gt_comp' : valid_meters["val_L2_gt_comp"].avg,
}
else:
val_metric_dict = { #keys should have the same name as the keys used to pick a training loss in hp_exp['loss_functions']
'SSIM' : valid_meters['val_SSIM'].avg,
'L1' : valid_meters['val_L1'].avg,
'L2' : valid_meters['val_L2'].avg,
'PSNR' : valid_meters['val_PSNR'].avg,
'L2_kspace' : valid_meters['val_L2_kspace'].avg,
'L2_gt_abs' : valid_meters["val_L2_gt_abs"].avg,
}
current_lr = save_checkpoint.current_lr
current_best_score = save_checkpoint.best_score
# Logging to tensorboard
if hp_exp['tb_logging']:
writer.add_scalar("lr", current_lr, epoch)
writer.add_scalar("epoch", epoch, epoch)
for tr_loss_name in train_meters.keys():
writer.add_scalar(tr_loss_name, train_meters[tr_loss_name].avg, epoch)
for val_loss_name in val_metric_dict.keys():
writer.add_scalar('val_'+val_loss_name, val_metric_dict[val_loss_name], epoch)
sys.stdout.flush()
#### Learning rate decay
score = val_metric_dict[hp_exp['decay_metric']]
if hp_exp['lr_scheduler'] == 'MultiStepLR':
scheduler.step()
elif hp_exp['lr_scheduler'] == 'ReduceLROnPlateau':
scheduler.step(score)
else:
raise ValueError('Scheduler is not defined')
save_checkpoint(hp_exp, epoch, model, optimizer, scheduler, score=score) # This potentially updates save_checkpoint.best_score
end = time.process_time() - start
# Logging to train.log
if (val_metric_dict[hp_exp['decay_metric']] < current_best_score and save_checkpoint.mode == "min") or (val_metric_dict[hp_exp['decay_metric']] > current_best_score and save_checkpoint.mode == "max"):
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, time=np.round(end/60,3), New='Highscore')))
else:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, time=np.round(end/60,3))))
new_lr = optimizer.param_groups[0]["lr"]
save_checkpoint.current_lr = new_lr # current lr during next epoch
if hp_exp['early_stop_lr_deacy']:
if (score < current_best_score and save_checkpoint.mode == "min") or (score > current_best_score and save_checkpoint.mode == "max"):
save_checkpoint.best_val_current_lr_interval = score
#At every lr decay check if the model did not improve during the lr_convergence_break_counter many lastt lr intervals and break if it didn't.
if new_lr < current_lr:
if save_checkpoint.best_val_current_lr_interval != save_checkpoint.best_score:
save_checkpoint.lr_interval_counter += 1
if save_checkpoint.lr_interval_counter == hp_exp['lr_convergence_break_counter']:
logging.info(f'lr decayed to {new_lr}. Break training due to convergence of val loss!')
break
else:
logging.info(f'lr decayed to {new_lr}. lr_interval_counter increased but do not yet break due to convergence of val loss!')
else:
save_checkpoint.best_val_current_lr_interval = float("inf") if mode_lookup[hp_exp['decay_metric']] == "min" else float("-inf")
save_checkpoint.lr_interval_counter = 0
logging.info(f'lr decayed to {new_lr}. No convergence detected. Reset lr_interval_counter.')
if np.round(current_lr,10) <= hp_exp['lr_min']:
if hp_exp['lr_min_break_counter'] == save_checkpoint.break_counter:
logging.info('Break training due to minimal learning rate constraint!')
break
else:
save_checkpoint.break_counter += 1
else:
current_lr = save_checkpoint.current_lr
if hp_exp['lr_scheduler'] == 'MultiStepLR':
scheduler.step()
else:
raise ValueError('Scheduler is not defined')
if hp_exp['tb_logging']:
writer.add_scalar("lr", current_lr, epoch)
writer.add_scalar("epoch", epoch, epoch)
for tr_loss_name in train_meters.keys():
writer.add_scalar(tr_loss_name, train_meters[tr_loss_name].avg, epoch)
new_lr = optimizer.param_groups[0]["lr"]
save_checkpoint.current_lr = new_lr # current lr during next epoch
end = time.process_time() - start
logging.info(train_bar.print(dict(**train_meters, lr=current_lr, time=np.round(end/60,3))))
logging.info(f"Done training! Best PSNR {save_checkpoint.best_score:.5f} obtained after epoch {save_checkpoint.best_epoch}.")
# return names of matrics logged to tensorboard
return train_meters, val_metric_dict
def main_test(hp_exp):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# ------------
# setup:
# Set seeds, create directories, set path to checkpoints if available
# ------------
hp_exp = setup_experiment(hp_exp)
init_logging(hp_exp)
# Get list of filenames saved during testing (from the test set)
test_log_filenames_list = []
for k in hp_exp['save_test_images'].keys():
test_log_filenames_list.append(k)
# ------------
# data
# ------------
# For testing we want the target mask to be all ones.
# This can be achieved either by setting self_sup=False or acceleration_total=1.0
mask_func = create_mask_for_mask_type(
hp_exp['mask_type'], self_sup=False, center_fraction=hp_exp['center_fraction'], acceleration=hp_exp['acceleration'], acceleration_total=1.0
)
data_transform = UnetDataTransform(hp_exp['challenge'],mask_func=mask_func, use_seed=True, hp_exp=hp_exp, mode="test")
testset = SliceDataset(
dataset=hp_exp['test_set'],
path_to_dataset=hp_exp['data_path'],
path_to_sensmaps=hp_exp['smaps_path'],
provide_senmaps=hp_exp['provide_senmaps'],
challenge=hp_exp['challenge'],
transform=data_transform,
use_dataset_cache=True,
)
test_loader = torch.utils.data.DataLoader(
dataset=testset,
batch_size=1,
num_workers=hp_exp['num_workers'],
shuffle=False,
generator=torch.Generator().manual_seed(hp_exp['seed']),
)
# ------------
# model
# ------------
if hp_exp['two_channel_imag_real']:
in_chans = 2
else:
in_chans = 1
model = Unet(
in_chans=in_chans,
out_chans=in_chans,
chans=hp_exp['chans'],
num_pool_layers=hp_exp['num_pool_layers'],
drop_prob=0.0,
).to(device)
# ------------
# load a stored model if available
# ------------
if hp_exp['restore_file']:
load_checkpoint(hp_exp, model, None, None)
test_ssim_fct = SSIMLoss()
test_l1_fct_sum = L1Loss(reduction='sum')
test_mse_fct_sum = MSELoss(reduction='sum')
test_mse_fct_mean = MSELoss(reduction='mean')
model.eval()
test_bar = ProgressBar(test_loader, epoch=0)
# Collect scores
ssim_vals = []
L1_vals = []
psnr_vals = []
L2_vals = []
# Always compute both, scores after binary masking and after data consistency.
ssim_vals_dc = []
L1_vals_dc = []
psnr_vals_dc = []
L2_vals_dc = []
for sample_id, sample in enumerate(test_bar):
with torch.no_grad():
binary_background_mask, input_image, input_kspace, input_mask, target_image, target_kspace, target_mask, target_mask_weighted, ground_truth_image, sens_maps, mean, std, fname, slice_num = sample
input_image=input_image.to(device)
input_kspace=input_kspace.to(device)
input_mask=input_mask.to(device)
target_kspace=target_kspace.to(device)
target_mask=target_mask.to(device)
ground_truth_image=ground_truth_image.to(device)
sens_maps=sens_maps.to(device)
mean=mean.to(device)
std=std.to(device)
binary_background_mask=binary_background_mask.to(device)
output = model(input_image)
output = output * std + mean
#####################
sens_maps_conj = complex_conj(sens_maps)
output_per_coil_imgs = torch.moveaxis(output.clone() , 1, -1 )
output_per_coil_imgs = complex_mul(output_per_coil_imgs, sens_maps)
# Transform coil images to kspace
output_kspace = fft2c(output_per_coil_imgs)
################
################
# Get scores in image domain after data consistency
output_image_data_consistency = ifft2c(output_kspace* (1-input_mask) + input_kspace)
output_image_data_consistency = complex_mul(output_image_data_consistency, sens_maps_conj)
output_image_data_consistency = output_image_data_consistency.sum(dim=1, keepdim=False)
output_image_data_consistency = torch.moveaxis(output_image_data_consistency , -1, 1)
output_image_data_consistency, _ = center_crop_to_smallest(output_image_data_consistency, ground_truth_image)
output_image_data_consistency = complex_abs(torch.moveaxis(output_image_data_consistency , 1, -1 ))
output_image_dc = output_image_data_consistency.unsqueeze(1)
# L1
loss = test_l1_fct_sum(output_image_dc, ground_truth_image) / torch.sum(torch.abs(ground_truth_image))
L1_vals_dc.append(loss.item())
# L2
loss = test_mse_fct_sum(output_image_dc, ground_truth_image) / torch.sum(torch.abs(ground_truth_image)**2)
L2_vals_dc.append(loss.item())
max_value = ground_truth_image.max().unsqueeze(0)
# MSE for PSNR
mse = test_mse_fct_mean(output_image_dc, ground_truth_image)
# PSNR
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse)
psnr_vals_dc.append(psnr.item())
# SSIM
ssim_loss = 1-test_ssim_fct(output_image_dc, ground_truth_image, data_range=max_value)
ssim_vals_dc.append(ssim_loss.item())
output = output * binary_background_mask
######################
######################
# Get scores after binary masking without data consistency
# at test time L1, L2, PSNR and SSIM are all computed on center cropped magnitude values
output, _ = center_crop_to_smallest(output, ground_truth_image)
if hp_exp['two_channel_imag_real']:
# Move complex dim to end, apply complex abs, insert channel dimension
output = complex_abs(torch.moveaxis(output , 1, -1 ))
output = output.unsqueeze(1)
# L1
loss = test_l1_fct_sum(output, ground_truth_image) / torch.sum(torch.abs(ground_truth_image))
L1_vals.append(loss.item())
# L2
loss = test_mse_fct_sum(output, ground_truth_image) / torch.sum(torch.abs(ground_truth_image)**2)
L2_vals.append(loss.item())
# Normalize output to mean and std of target
#target, output = normalize_to_given_mean_std(target, output)
# Use max value per ground truth slice instead of per volume
max_value = ground_truth_image.max().unsqueeze(0)
# MSE for PSNR
mse = test_mse_fct_mean(output, ground_truth_image)
# PSNR
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse)
psnr_vals.append(psnr.item())
# SSIM
ssim_loss = 1-test_ssim_fct(output, ground_truth_image, data_range=max_value)
ssim_vals.append(ssim_loss.item())
######################
# Save some test images
if fname[0] in test_log_filenames_list:
if slice_num.item() == hp_exp['save_test_images'][fname[0]]:
error = torch.abs(ground_truth_image - output)
error_dc = torch.abs(ground_truth_image - output_image_dc)
output = output - output.min()
output = output / output.max()
output_image_dc = output_image_dc - output_image_dc.min()
output_image_dc = output_image_dc / output_image_dc.max()
ground_truth_image = ground_truth_image - ground_truth_image.min()
ground_truth_image = ground_truth_image / ground_truth_image.max()
error = error - error.min()
error_dc = error_dc - error_dc.min()
max_norm = torch.stack([error,error_dc]).max()
error = error / max_norm
error_dc = error_dc / max_norm
image = torch.cat([ground_truth_image, ground_truth_image, output, output_image_dc, error, error_dc], dim=0)
image = torchvision.utils.make_grid(image, nrow=2, normalize=False, value_range=(0,1), pad_value=1)
figure = get_figure(image.cpu().numpy(),figsize=(8,12),title=f"ssim={ssim_loss.item():.6f}, ssim_dc={ssim_vals_dc[-1]:.6f}")
if not os.path.isdir(hp_exp['log_path'] + 'test_imgs/'):
os.mkdir(hp_exp['log_path'] + 'test_imgs/')
plt.savefig(hp_exp['log_path'] + f"test_imgs/{fname[0]}_s{slice_num.item()}.png", dpi='figure')
plt.close()
test_metric_dict = {
'ssim_m' : np.mean(np.array(ssim_vals)),
'ssim_s' : np.std(np.array(ssim_vals)),
'L1_m' : np.mean(np.array(L1_vals)),
'L1_s' : np.std(np.array(L1_vals)),
'psnr_m' : np.mean(np.array(psnr_vals)),
'psnr_s' : np.std(np.array(psnr_vals)),
'L2_m' : np.mean(np.array(L2_vals)),
'L2_s' : np.std(np.array(L2_vals)),
}
print(test_metric_dict)
test_metric_dict_dc = {
'ssim_m' : np.mean(np.array(ssim_vals_dc)),
'ssim_s' : np.std(np.array(ssim_vals_dc)),
'L1_m' : np.mean(np.array(L1_vals_dc)),
'L1_s' : np.std(np.array(L1_vals_dc)),
'psnr_m' : np.mean(np.array(psnr_vals_dc)),
'psnr_s' : np.std(np.array(psnr_vals_dc)),
'L2_m' : np.mean(np.array(L2_vals_dc)),
'L2_s' : np.std(np.array(L2_vals_dc)),
}
print(test_metric_dict_dc)
testset_name = hp_exp['log_path'] + hp_exp['test_set'][hp_exp['test_set'].find('datasets/')+9 : hp_exp['test_set'].find('.yaml')]
pickle.dump( test_metric_dict, open(testset_name + '_metrics_' + hp_exp['resume_from_which_checkpoint'] + '.p', "wb" ) )
pickle.dump( test_metric_dict_dc, open(testset_name + '_metrics_DC_' + hp_exp['resume_from_which_checkpoint'] + '.p', "wb" ) )
logging.info("Evaluate testset : {}".format(testset_name))
for test_metric in test_metric_dict.keys():
logging.info("{}: {}".format(test_metric, test_metric_dict[test_metric]))
logging.info("Evaluate testset : {} with data consistency".format(testset_name))
for test_metric in test_metric_dict_dc.keys():
logging.info("{}: {}".format(test_metric, test_metric_dict_dc[test_metric]))
| 40,715 | 45.961938 | 214 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/coil_combine.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from functions.math import complex_abs_sq
def rss(data: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Compute the Root Sum of Squares (RSS).
RSS is computed assuming that dim is the coil dimension.
Args:
data: The input tensor
dim: The dimensions along which to apply the RSS transform
Returns:
The RSS value.
"""
return torch.sqrt((data ** 2).sum(dim))
def rss_complex(data: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Compute the Root Sum of Squares (RSS) for complex inputs.
RSS is computed assuming that dim is the coil dimension.
Args:
data: The input tensor
dim: The dimensions along which to apply the RSS transform
Returns:
The RSS value.
"""
return torch.sqrt(complex_abs_sq(data).sum(dim))
| 1,015 | 22.627907 | 66 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/math.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def complex_mul(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Complex multiplication.
This multiplies two complex tensors assuming that they are both stored as
real arrays with the last dimension being the complex dimension.
Args:
x: A PyTorch tensor with the last dimension of size 2.
y: A PyTorch tensor with the last dimension of size 2.
Returns:
A PyTorch tensor with the last dimension of size 2.
"""
if not x.shape[-1] == y.shape[-1] == 2:
raise ValueError("Tensors do not have separate complex dim.")
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x: torch.Tensor) -> torch.Tensor:
"""
Complex conjugate.
This applies the complex conjugate assuming that the input array has the
last dimension as the complex dimension.
Args:
x: A PyTorch tensor with the last dimension of size 2.
y: A PyTorch tensor with the last dimension of size 2.
Returns:
A PyTorch tensor with the last dimension of size 2.
"""
if not x.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
def complex_abs(data: torch.Tensor) -> torch.Tensor:
"""
Compute the absolute value of a complex valued input tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_sq(data: torch.Tensor) -> torch.Tensor:
"""
Compute the squared absolute value of a complex tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Squared absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1)
def tensor_to_complex_np(data: torch.Tensor) -> np.ndarray:
"""
Converts a complex torch tensor to numpy array.
Args:
data: Input data to be converted to numpy.
Returns:
Complex numpy version of data.
"""
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
| 2,728 | 25.754902 | 77 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/log_save_image_utils.py | import matplotlib.pyplot as plt
import torchvision
import io
import torch
import numpy as np
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
#img = buf.getvalue()#.to(torch.uint8)
#img = torch.import_ir_module_from_buffer(img,dtype=torch.uint8)
frameTensor = torch.tensor(np.frombuffer(buf.getvalue(), dtype=np.uint8), device='cpu')
image = torchvision.io.decode_png(frameTensor)
# Add the batch dimension
#image = torch.unsqueeze(image, 0)
return image
def get_figure(image,figsize,title):
"""Return a matplotlib figure of a given image."""
if len(image.shape) != 3:
raise ValueError("Image dimensions not suitable for logging to tensorboard.")
if image.shape[0] == 1 or image.shape[0] == 3:
image = np.rollaxis(image,0,3)
# Create a figure to contain the plot.
if figsize:
figure = plt.figure(figsize=figsize)
else:
figure = plt.figure()
# Start next subplot.
plt.subplot(1, 1, 1, title=title)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap='gray')
figure.tight_layout()
return figure | 1,533 | 33.088889 | 91 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/train_utils.py |
import torch
import numpy as np
import random
import os
import glob
import logging
from torch.serialization import default_restore_location
from tensorboard.backend.event_processing import event_accumulator
import time
import matplotlib.pyplot as plt
def setup_experiment(hp_exp):
'''
- Handle seeding
- Create directories
- Look for checkpoints to load from
'''
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(hp_exp['seed'])
torch.cuda.manual_seed(hp_exp['seed'])
np.random.seed(hp_exp['seed'])
random.seed(hp_exp['seed'])
hp_exp['log_path'] = './'+ hp_exp['exp_name'] + '/log_files/'
os.makedirs(hp_exp['log_path'] + 'checkpoints/', exist_ok=True)
hp_exp['log_file'] = os.path.join(hp_exp['log_path'], "train.log")
# Look for checkpoints to load from
available_models = glob.glob(hp_exp['log_path'] + 'checkpoints/*.pt')
if available_models and hp_exp['resume_from_which_checkpoint']=='last':
hp_exp['restore_file'] = hp_exp['log_path'] + 'checkpoints/checkpoint_last.pt'
elif available_models and hp_exp['resume_from_which_checkpoint']=='best':
hp_exp['restore_file'] = hp_exp['log_path'] + 'checkpoints/checkpoint_best.pt'
else:
hp_exp['restore_file'] = None
# Set attributes of the function save_checkpoint. They will be used to track the validation score and trigger saving a checkpoint
mode_lookup = {
'SSIM' : 'max',
'PSNR' : 'max',
'L1' : 'min',
'L2' : 'min',
'MSE' : 'min',
'L2_kspace' : 'min',
'L1_kspace' : 'min',
}
save_checkpoint.best_epoch = -1
save_checkpoint.last_epoch = 0
save_checkpoint.start_epoch = 0
save_checkpoint.global_step = 0
save_checkpoint.current_lr = hp_exp['lr']
save_checkpoint.break_counter = 0
save_checkpoint.best_val_current_lr_interval = float("inf") if mode_lookup[hp_exp['decay_metric']] == "min" else float("-inf")
save_checkpoint.lr_interval_counter = 0
save_checkpoint.mode = mode_lookup[hp_exp['decay_metric']]
save_checkpoint.best_score = float("inf") if save_checkpoint.mode == "min" else float("-inf")
return hp_exp
def init_logging(hp_exp):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
mode = "a" if hp_exp['restore_file'] else "w"
handlers.append(logging.FileHandler(hp_exp['log_file'], mode=mode))
logging.basicConfig(handlers=handlers, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
if hp_exp['mode'] == 'train':
logging.info("Arguments: {}".format(hp_exp))
def save_checkpoint(hp_exp, epoch, model, optimizer=None, scheduler=None, score=None):
''''
This function is used to save a range of parameters related to the training progress.
Saving those parameters allows to interrupt and then pick up training later at any point.
At the beginning of every experiment the parameters are initialized in setup_experiment()
Parameters:
- best_score: Holds the best validation score so far
- best_epoch: Holds the epoch in which the best validation score was achieved
- last_epoch: Holds the current epoch.
- break_counter: Count the number of epochs with minimal lr
- best_val_current_lr_interval: Holds the best val performance for the current lr-inerval
- lr_interval_counter: Counts for how many lr intervals there was no improvement
-
'''
save_checkpoint.last_epoch = epoch
best_score = save_checkpoint.best_score
if (score < best_score and save_checkpoint.mode == "min") or (score > best_score and save_checkpoint.mode == "max"):
save_checkpoint.best_epoch = epoch
save_checkpoint.best_score = score
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"last_step": save_checkpoint.global_step, #set
"last_score": score, #set
"break_counter": save_checkpoint.break_counter,
"best_val_current_lr_interval": save_checkpoint.best_val_current_lr_interval,
"lr_interval_counter": save_checkpoint.lr_interval_counter,
"last_epoch": save_checkpoint.last_epoch, #set
"best_epoch": save_checkpoint.best_epoch, #set
"current_lr":save_checkpoint.current_lr, #set
"mode": save_checkpoint.mode,
"best_score": getattr(save_checkpoint, "best_score", None), #set
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
"args": hp_exp,
}
torch.save(state_dict, os.path.join(hp_exp['log_path'] + 'checkpoints/', "checkpoint_last.pt"))
if hp_exp['epoch_checkpoints']:
if epoch in hp_exp['epoch_checkpoints']:
torch.save(state_dict, os.path.join(hp_exp['log_path'] + 'checkpoints/', "checkpoint{}.pt".format(epoch)))
if (score < best_score and save_checkpoint.mode == "min") or (score > best_score and save_checkpoint.mode == "max"):
torch.save(state_dict, os.path.join(hp_exp['log_path'] + 'checkpoints/', "checkpoint_best.pt"))
def load_checkpoint(hp_exp, model=None, optimizer=None, scheduler=None):
print('restoring model..')
state_dict = torch.load(hp_exp['restore_file'], map_location=lambda s, l: default_restore_location(s, "cpu"))
save_checkpoint.last_epoch = state_dict["last_epoch"]
save_checkpoint.start_epoch = state_dict["last_epoch"]+1
save_checkpoint.global_step = state_dict["last_step"]
save_checkpoint.best_score = state_dict["best_score"]
save_checkpoint.best_epoch = state_dict["best_epoch"]
save_checkpoint.break_counter = state_dict["break_counter"]
save_checkpoint.best_val_current_lr_interval = state_dict["best_val_current_lr_interval"]
save_checkpoint.lr_interval_counter = state_dict["lr_interval_counter"]
save_checkpoint.current_lr = state_dict["current_lr"]
save_checkpoint.mode = state_dict["mode"]
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
if model is not None and state_dict.get("model", None) is not None:
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
if optimizer is not None and state_dict.get("optimizer", None) is not None:
for o, state in zip(optimizer, state_dict["optimizer"]):
o.load_state_dict(state)
if scheduler is not None and state_dict.get("scheduler", None) is not None:
for s, state in zip(scheduler, state_dict["scheduler"]):
#milestones = s.milestones
#state['milestones'] = milestones
s.load_state_dict(state)
#s.milestones = milestones
logging.info("Loaded checkpoint {} from best_epoch {} last_epoch {}".format(hp_exp['restore_file'], save_checkpoint.best_epoch, save_checkpoint.last_epoch))
| 7,597 | 44.771084 | 160 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/fftc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import List, Optional
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse("1.7.0"):
import torch.fft # type: ignore
def fft2c_old(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.fft(data, 2, normalized=True)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c_old(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.ifft(data, 2, normalized=True)
data = fftshift(data, dim=[-3, -2])
return data
def fft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
# Helper functions
def roll_one_dim(x: torch.Tensor, shift: int, dim: int) -> torch.Tensor:
"""
Similar to roll but for only one dim.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def roll(
x: torch.Tensor,
shift: List[int],
dim: List[int],
) -> torch.Tensor:
"""
Similar to np.roll but applies to PyTorch Tensors.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
if len(shift) != len(dim):
raise ValueError("len(shift) must match len(dim)")
for (s, d) in zip(shift, dim):
x = roll_one_dim(x, s, d)
return x
def fftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to fftshift.
Returns:
fftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = x.shape[dim_num] // 2
return roll(x, shift, dim)
def ifftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to ifftshift.
Returns:
ifftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = (x.shape[dim_num] + 1) // 2
return roll(x, shift, dim)
| 5,535 | 25.236967 | 80 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/training/losses.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import L1Loss, MSELoss
class SSIMLoss(nn.Module):
"""
SSIM loss module.
"""
def __init__(self, win_size: int = 7, k1: float = 0.01, k2: float = 0.03):
"""
Args:
win_size: Window size for SSIM calculation.
k1: k1 parameter for SSIM calculation.
k2: k2 parameter for SSIM calculation.
"""
super().__init__()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.win_size = win_size
self.k1, self.k2 = torch.tensor(k1).to(device), torch.tensor(k2).to(device)
self.register_buffer("w", torch.ones(1, 1, win_size, win_size).to(device) / win_size ** 2)
NP = win_size ** 2
self.cov_norm = torch.tensor(NP / (NP - 1)).to(device)
def forward(self, X: torch.Tensor, Y: torch.Tensor, data_range: torch.Tensor):
assert isinstance(self.w, torch.Tensor)
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range) ** 2
C2 = (self.k2 * data_range) ** 2
ux = F.conv2d(X, self.w) # typing: ignore
uy = F.conv2d(Y, self.w) #
uxx = F.conv2d(X * X, self.w)
uyy = F.conv2d(Y * Y, self.w)
uxy = F.conv2d(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (
2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2,
)
D = B1 * B2
S = (A1 * A2) / D
return 1 - S.mean()
| 1,886 | 31.534483 | 98 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/training/training_functions.py |
import torch
from torch.nn import L1Loss, MSELoss
# Implementation of SSIMLoss
from functions.training.losses import SSIMLoss
# Apply a center crop on the larger image to the size of the smaller.
#from functions.data.transforms import center_crop_to_smallest
# In order to get access to attributes stored in save_checkpoint
from functions.train_utils import save_checkpoint
class Compute_batch_train_loss:
def __init__(self) -> None:
self.loss_fct_lookup = {
'SSIM' : SSIMLoss(),
'L1' : L1Loss(reduction='sum'),
'L2' : MSELoss(reduction='sum'),
}
def get_batch_train_loss(self, hp_exp, output, target, max_value, train_meters):
train_loss = 0
for loss in hp_exp['loss_functions']:
if loss == 'SSIM':
loss = self.loss_fct_lookup['SSIM'](output, target, data_range=max_value)
train_meters["train_SSIM"].update(loss.item())
train_loss += loss
elif loss == 'L1':
loss = self.loss_fct_lookup['L1'](output, target) / torch.sum(torch.abs(target))
train_meters["train_L1"].update(loss.item())
train_loss += loss
elif loss == 'L2':
# L2 loss in the image domain, i.e. directly between network output and target image
loss = self.loss_fct_lookup['L2'](output, target) / torch.sum(torch.abs(target)**2)
train_meters["train_L2"].update(loss.item())
train_loss += loss
elif loss == 'L2_kspace':
# L2 loss in the frequency domain. Actually this function works the same as 'L2'
loss = self.loss_fct_lookup['L2'](output, target) / torch.sum(torch.abs(target)**2)
train_meters["train_L2_kspace"].update(loss.item())
train_loss += loss
elif loss == 'L1_kspace':
# L1 loss in the frequency domain. Actually this function works the same as 'L1'
loss = self.loss_fct_lookup['L1'](output, target) / torch.sum(torch.abs(target))
train_meters["train_L1_kspace"].update(loss.item())
train_loss += loss
#else:
# raise ValueError("Chosen loss function is not implemented.")
if len(hp_exp['loss_functions']) > 1:
train_meters['cumulated_loss'].update(train_loss.item())
return train_loss
def configure_optimizers(hp_exp, parameters, optimizer=None):
if not optimizer:
if hp_exp['optimizer'] == 'Adam':
optimizer = torch.optim.Adam(
params=parameters,
lr=hp_exp['lr'],
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=0.0,
amsgrad=False
)
elif hp_exp['optimizer'] == 'RMSprop':
optimizer = torch.optim.RMSprop(
parameters,
lr=hp_exp['lr'],
weight_decay=0.0,
)
if hp_exp['lr_scheduler'] == 'ReduceLROnPlateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode=save_checkpoint.mode,
factor=hp_exp['lr_decay_factor'],
patience=hp_exp['lr_patience'],
threshold=hp_exp['lr_threshold'],
threshold_mode='abs',
cooldown=0,
min_lr=hp_exp['lr_min'],
eps=1e-08,
verbose=True
)
elif hp_exp['lr_scheduler'] == 'MultiStepLR':
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer,
milestones=hp_exp['lr_milestones'],
gamma=hp_exp['lr_decay_factor'],
last_epoch=- 1,
verbose=False
)
return optimizer, scheduler | 3,890 | 37.147059 | 100 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/training/meters.py | import time
import torch
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
self.val = val / n
self.sum += val
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
def __init__(self, momentum=0.98):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if isinstance(val, torch.Tensor):
val = val.item()
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
class TimeMeter(object):
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
| 1,318 | 19.936508 | 75 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/training/debug_helper.py | import torch
import numpy as np
from typing import Dict, Optional, Sequence, Tuple, Union, List
import os
import matplotlib.pyplot as plt
def save_figure(
x: np.array,
figname: str,
hp_exp: Dict,
save: Optional[bool]=True,):
""""
x must have dimension height,width
"""
if save:
save_path = hp_exp['log_path'] + 'train_figures/'
if not os.path.isdir(save_path):
os.mkdir(save_path)
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111)
ax.imshow(x,'gray')
ax.axis('off')
#ax.set_title(title,fontsize=10)
fig.tight_layout()
plt.savefig(save_path + figname + ".png")
plt.close(fig)
def print_tensor_stats(
x: torch.Tensor,
name: Optional[str]="Tensor",
dim: Optional[Union[int,List]]=None,
precision: Optional[float]=6,
):
"""
Prints mean, std and min and max of a realy valued tensor.
If dim is given stats are computed separatly over this dimension.
"""
shape = x.shape
if dim and not isinstance(dim, List):
dim = [dim]
if dim:
for d in dim:
#print(f"dimension {d}")
x_reorder = torch.moveaxis(x,d,0)
for s in range(shape[d]):
l1norm = torch.sum(torch.abs(x_reorder[s]))
l2norm = torch.sum(torch.abs(x_reorder[s]**2))
rss = torch.sqrt(torch.sum(torch.abs(x_reorder[s]**2)))
print(f"""{name} shape {x.shape} dim {d} {s+1}/{shape[d]}:
mean {np.round(x_reorder[s].mean().item(),precision)},
std {np.round(x_reorder[s].std().item(),precision)},
min {np.round(x_reorder[s].min().item(),precision)},
max {np.round(x_reorder[s].max().item(),precision)},
l1norm {np.round(l1norm.item(),precision)},
l2norm {np.round(l2norm.item(),precision)},
rss {np.round(rss.item(),precision)}""")
else:
l1norm = torch.sum(torch.abs(x))
l2norm = torch.sum(torch.abs(x**2))
rss = torch.sqrt(torch.sum(torch.abs(x**2)))
print(f"""{name} shape {x.shape}:
mean {np.round(x.mean().item(),precision)},
std {np.round(x.std().item(),precision)},
min {np.round(x.min().item(),precision)},
max {np.round(x.max().item(),precision)},
l1norm {np.round(l1norm.item(),precision)},
l2norm {np.round(l2norm.item(),precision)},
rss {np.round(rss.item(),precision)}""")
| 2,560 | 34.082192 | 75 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/models/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int,
out_chans: int,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1), # here is the only conv layer with a bias
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
| 6,021 | 31.907104 | 113 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/data/mri_dataset.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import pickle
import xml.etree.ElementTree as etree
from pathlib import Path
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import h5py
import numpy as np
import torch
import yaml
def et_query(
root: etree.Element,
qlist: Sequence[str],
namespace: str = "http://www.ismrm.org/ISMRMRD",
) -> str:
"""
ElementTree query function.
This can be used to query an xml document via ElementTree. It uses qlist
for nested queries.
Args:
root: Root of the xml to search through.
qlist: A list of strings for nested searches, e.g. ["Encoding",
"matrixSize"]
namespace: Optional; xml namespace to prepend query.
Returns:
The retrieved data as a string.
"""
s = "."
prefix = "ismrmrd_namespace"
ns = {prefix: namespace}
for el in qlist:
s = s + f"//{prefix}:{el}"
value = root.find(s, ns)
if value is None:
raise RuntimeError("Element not found")
return str(value.text)
class SliceDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
"""
def __init__(
self,
dataset: str,
path_to_dataset: str,
path_to_sensmaps: str,
provide_senmaps: bool,
#path_to_max_vals: str,
#use_SENSE_targets: bool,
challenge: str,
transform: Optional[Callable] = None,
use_dataset_cache: bool = True,
dataset_cache_file: Union[str, Path, os.PathLike] = "dataset_cache.pkl",
num_cols: Optional[Tuple[int]] = None,
):
"""
Args:
dataset: Path to a file that contains a list of volumes/slices in the dataset.
path_to_dataset: Path to a all the volumes/slices in the dataset.
path_to_sensmaps: Path to a all the sensmaps. One sensmap for each slice
provide_senmaps: Load sensmaps or not
challenge: "singlecoil" or "multicoil" depending on which challenge
to use.
transform: Optional; A callable object that pre-processes the raw
data into appropriate form. The transform function should take
'kspace', 'target', 'attributes', 'filename', and 'slice' as
inputs. 'target' may be null for test data.
use_dataset_cache: Whether to cache dataset metadata. This is very
useful for large datasets like the brain data.
dataset_cache_file: Optional; A file in which to cache dataset
information for faster load times.
num_cols: Optional; If provided, only slices with the desired
number of columns will be considered.
"""
if challenge not in ("singlecoil", "multicoil"):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
self.dataset_cache_file = Path(dataset_cache_file)
self.path_to_sensmaps = path_to_sensmaps
self.provide_senmaps = provide_senmaps
#self.path_to_max_vals = path_to_max_vals
#self.use_SENSE_targets = use_SENSE_targets
self.transform = transform
self.recons_key = (
"reconstruction_esc" if challenge == "singlecoil" else "reconstruction_rss"
)
self.examples = []
# Load the dataset cache if it exists and we want to use it.
# The dataset cache is a dictionary with one entry for every train, val or test set
# for which a cache has already been created. One entry contains a list of tuples,
# where each tuple consists of (filename, slice_ind, meta_data).
if self.dataset_cache_file.exists() and use_dataset_cache:
with open(self.dataset_cache_file, "rb") as f:
dataset_cache = pickle.load(f)
else:
dataset_cache = {}
# Check if the dataset is in the cache.
# If yes, use that cache as list of data examples with corresponding meta data,
# if not, then generate the list of data examples and also the meta data.
if dataset in dataset_cache.keys() and use_dataset_cache:
logging.info(f"For dataset {dataset} using dataset cache from {self.dataset_cache_file}.")
self.examples = dataset_cache[dataset]
else:
with open(dataset, 'r') as stream:
# files contains a list of dictionaries. Every dictionary contains an entry fname,
# which can contain a path prefix like multicoil_val, and optionally a slice number.
files = yaml.safe_load(stream)
# Go through all files and add them to the data examples.
# If no slice number is given, all slices are added to the dataset.
#print(files)
for file in files:
metadata, num_slices = self._retrieve_metadata(path_to_dataset + file['path'])
if file['slice'] is not None:
self.examples += [
(path_to_dataset + file['path'], file['slice'], metadata, file['filename'])
]
else:
self.examples += [
(path_to_dataset + file['path'], slice_ind, metadata, file['filename']) for slice_ind in range(num_slices)
]
if use_dataset_cache:
dataset_cache[dataset] = self.examples
logging.info(f"For dataset {dataset} saving dataset cache to {self.dataset_cache_file}.")
with open(self.dataset_cache_file, "wb") as f:
pickle.dump(dataset_cache, f)
if num_cols:
self.examples = [
ex
for ex in self.examples
if ex[2]["encoding_size"][1] in num_cols # type: ignore
]
def _retrieve_metadata(self, fname):
with h5py.File(fname, "r") as hf:
et_root = etree.fromstring(hf["ismrmrd_header"][()])
enc = ["encoding", "encodedSpace", "matrixSize"]
enc_size = (
int(et_query(et_root, enc + ["x"])),
int(et_query(et_root, enc + ["y"])),
int(et_query(et_root, enc + ["z"])),
)
rec = ["encoding", "reconSpace", "matrixSize"]
recon_size = (
int(et_query(et_root, rec + ["x"])),
int(et_query(et_root, rec + ["y"])),
int(et_query(et_root, rec + ["z"])),
)
lims = ["encoding", "encodingLimits", "kspace_encoding_step_1"]
enc_limits_center = int(et_query(et_root, lims + ["center"]))
enc_limits_max = int(et_query(et_root, lims + ["maximum"])) + 1
padding_left = enc_size[1] // 2 - enc_limits_center
padding_right = padding_left + enc_limits_max
num_slices = hf["kspace"].shape[0]
metadata = {
"padding_left": padding_left,
"padding_right": padding_right,
"encoding_size": enc_size,
"recon_size": recon_size,
}
return metadata, num_slices
def __len__(self):
return len(self.examples)
def __getitem__(self, i: int):
filepath, dataslice, metadata, filename = self.examples[i]
if self.provide_senmaps:
smap_fname = filename + '_smaps_slice' + str(dataslice) + '.h5'
with h5py.File(self.path_to_sensmaps + smap_fname, "r") as hf:
sens_maps = hf["sens_maps"][()] #np.array of shape coils,height,width with complex valued entries
else:
sens_maps = None
with h5py.File(filepath, "r") as hf:
kspace = hf["kspace"][dataslice]
#mask = np.asarray(hf["mask"]) if "mask" in hf else None
target = hf[self.recons_key][dataslice] if self.recons_key in hf else None
attrs = dict(hf.attrs)
attrs.update(metadata)
sample = self.transform(kspace, sens_maps, target, attrs, filename, dataslice)
return sample
| 8,347 | 36.773756 | 130 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/data/subsample.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import contextlib
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import torch
@contextlib.contextmanager
def temp_seed(rng: np.random, seed: Optional[Union[int, Tuple[int, ...]]]):
if seed is None:
try:
yield
finally:
pass
else:
state = rng.get_state()
rng.seed(seed)
try:
yield
finally:
rng.set_state(state)
class MaskFunc:
"""
An object for GRAPPA-style sampling masks.
This creates a sampling mask that densely samples the center while
subsampling outer k-space regions based on the undersampling factor.
It creates one mask for the input and one mask for the target.
"""
def __init__(self, self_sup: bool, center_fraction: float, acceleration: float, acceleration_total: Optional[float]):
"""
Args:
self_sup: If False the target mask is all ones. If True the target mask is also undersampled
center_fractions: Fraction of low-frequency columns to be retained both in input and target.
accelerations: Amount of under-sampling for the input
acceleration_total: Required if self_sup=True. Determines how much measurements are available for the split into input and target masks
"""
self.self_sup = self_sup
self.center_fraction = center_fraction #cent
self.acceleration = acceleration #p
self.acceleration_total = acceleration_total #mu
self.rng = np.random.RandomState() # pylint: disable=no-member
if self_sup and acceleration_total==None:
raise ValueError("For self-supervised training or validation acceleration_total has to be defined.")
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None
) -> torch.Tensor:
raise NotImplementedError
class n2nMaskFunc(MaskFunc):
"""
n2nMaskFunc creates a sub-sampling mask of a given shape.
It returns a mask for the training input and a mask for the training target.
"""
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None, fix_selfsup_inputtarget_split: Optional[bool] = True
) -> torch.Tensor:
"""
Create the mask.
Args:
shape: The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last
dimension.
seed: Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same
shape. The random state is reset afterwards.
fix_selfsup_inputtarget_split: Only important for self-sup training.
If it is False the input/target split is random.
Returns:
input_mask: Input mask of the specified shape.
target_mask: Target mask is all ones in the supervised case, but ones and zeros in the self-supervised case.
weighted_target_mask: Only important for self-supervised training. Must be used to scale the random non-center
at the output and target before computing the training loss (and validation losses in the k-space).
If supervised training weighted_target_mask is just ones same as target_mask.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
with temp_seed(self.rng, seed):
n = shape[-2]
nu = self.center_fraction
p = 1/self.acceleration
mask_shape = [1 for _ in shape]
mask_shape[-2] = n
if self.self_sup:
mu = 1/self.acceleration_total
q = (mu-p+nu-mu*nu)/(1-p)
# 1. Determine the set S_low consisting of the indices of the nu*n many center frequencies which are always sampled
size_low = int(round(n*nu))
pad = (n - size_low + 1) // 2
# set of indices of all lines in kspace
S_all = np.arange(n)
S_low = S_all[pad : pad + size_low]
# 1.1 Determine S_mu_high, i.e, S_mu without S_low, so only the random high frequencies
# set of indices of all high frequencies
S_high = np.hstack((S_all[: pad],S_all[pad + size_low :]))
S_mu_size_high = int(round((mu-nu)*n))
S_p_size_high = int(round((p-nu)*n))
#### Depending on whether the input/target split is fixed or re-sampled, the order of sampling needs to be adapted
# This is so that validation during training samples the same input mask as during testing
# Recall that during testing selfsup=False, hence S_mu_high is not sampled.
if fix_selfsup_inputtarget_split:
# If split is fixed, first sample S_p_high and then additional lines for S_mu_high
# such that the set S_p_high is the same as if we would sample for selfsup=False
S_p_high = self.rng.choice(S_high, size=S_p_size_high, replace=False, p=None)
S_mu_size_high_remainding = S_mu_size_high - S_p_size_high
S_high_remainding = np.array(list(set(S_high)-set(S_p_high)))
S_q_high = self.rng.choice(S_high_remainding, size=S_mu_size_high_remainding, replace=False, p=None)
else:
# If split is random, first sample S_mu_high such that this set is always fixed.
S_mu_high = self.rng.choice(S_high, size=S_mu_size_high, replace=False, p=None)
# 2. From S_mu_high sample the set S_p_high of size (p-nu)n
S_p_high = np.random.choice(S_mu_high, size=S_p_size_high, replace=False, p=None)
# 3. All other indices in S_mu_high add to the set S_q_high
S_q_high = np.array(list(set(S_mu_high)-set(S_p_high)))
# 4. Determine the size of the overlap between S_p_high and S_q_high, sample this many indices from S_p_high and add them to S_q_high
overlap_size_high = int(round(( (p-nu) / (1-nu) ) * ( (q-nu) / (1-nu) ) *(n-n*nu)))
S_overlap = S_p_high[0:overlap_size_high]
S_q_high = np.concatenate([S_q_high,S_overlap])
# 5. Define the final input and target masks by setting entries to zero or to one for S_p=S_low+S_p_high and S_q=S_low+S_q_high
input_mask = np.zeros(n)
input_mask[S_low] = 1.0
input_mask[S_p_high] = 1.0
input_mask = torch.from_numpy(input_mask.reshape(*mask_shape).astype(np.float32))
target_mask = np.zeros(n)
target_mask[S_low] = 1.0
target_mask[S_q_high] = 1.0
target_mask = torch.from_numpy(target_mask.reshape(*mask_shape).astype(np.float32))
# Create a version of the target mask where the random entries are weighted
weight_on_random_lines = np.sqrt((1-nu)/(q-nu))
target_mask_weighted = np.zeros(n)
target_mask_weighted[S_low] = 1.0
target_mask_weighted[S_q_high] = weight_on_random_lines
target_mask_weighted = torch.from_numpy(target_mask_weighted.reshape(*mask_shape).astype(np.float32))
else:
# In the supervised case this just creates random input mask with fixed center lines, same as random_mask
# The target mask is all ones
target_mask = torch.ones(mask_shape,dtype=torch.float32)
target_mask_weighted = target_mask.clone()
size_low = int(round(n*nu))
p_size_high = int(round(n*p)) - size_low
pad = (n - size_low + 1) // 2
# set of indices of all lines in kspace
S_all = np.arange(n)
# set of indices of all high frequencies
S_high = np.hstack((S_all[: pad],S_all[pad + size_low :]))
# set of indices of high frequencies in the input
# recall that even using rng here, there can be no seed depending on hp_exp['use_mask_seed_for_training']
S_p_high = self.rng.choice(S_high, size=p_size_high, replace=False, p=None)
input_mask = np.zeros(n)
input_mask[pad : pad + size_low] = 1.0
input_mask[S_p_high] = 1.0
input_mask = torch.from_numpy(input_mask.reshape(*mask_shape).astype(np.float32))
return input_mask, target_mask, target_mask_weighted
def create_mask_for_mask_type(
mask_type_str: str,
self_sup: bool,
center_fraction: float,
acceleration: float,
acceleration_total: Optional[float],
) -> MaskFunc:
"""
Creates a mask of the specified type.
Args:
center_fractions: What fraction of the center of k-space to include.
accelerations: What accelerations to apply.
"""
if mask_type_str == "n2n":
return n2nMaskFunc(self_sup, center_fraction, acceleration, acceleration_total)
else:
raise Exception(f"{mask_type_str} not supported") | 9,552 | 43.849765 | 149 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/data/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from packaging import version
from functions.coil_combine import rss, rss_complex
from functions.math import complex_abs, complex_conj, complex_mul
from functions.training.debug_helper import print_tensor_stats, save_figure
if version.parse(torch.__version__) >= version.parse("1.7.0"):
from functions.fftc import fft2c_new as fft2c
from functions.fftc import ifft2c_new as ifft2c
else:
from functions.fftc import fft2c_old as fft2c
from functions.fftc import ifft2c_old as ifft2c
from functions.data.subsample import MaskFunc
def to_tensor(data: np.ndarray) -> torch.Tensor:
"""
Convert numpy array to PyTorch tensor.
For complex arrays, the real and imaginary parts are stacked along the last
dimension.
Args:
data: Input numpy array.
Returns:
PyTorch version of data.
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def center_crop(data: torch.Tensor, shape: Tuple[int, int]) -> torch.Tensor:
"""
Apply a center crop to the input real image or batch of real images.
Args:
data: The input tensor to be center cropped. It should
have at least 2 dimensions and the cropping is applied along the
last two dimensions.
shape: The output shape. The shape should be smaller
than the corresponding dimensions of data.
Returns:
The center cropped image.
"""
if not (0 < shape[0] <= data.shape[-2] and 0 < shape[1] <= data.shape[-1]):
raise ValueError("Invalid shapes.")
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data: torch.Tensor, shape: Tuple[int, int]) -> torch.Tensor:
"""
Apply a center crop to the input image or batch of complex images.
Args:
data: The complex input tensor to be center cropped. It should have at
least 3 dimensions and the cropping is applied along dimensions -3
and -2 and the last dimensions should have a size of 2.
shape: The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
The center cropped image
"""
if not (0 < shape[0] <= data.shape[-3] and 0 < shape[1] <= data.shape[-2]):
raise ValueError("Invalid shapes.")
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(
x: torch.Tensor, y: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Apply a center crop on the larger image to the size of the smaller.
The minimum is taken over dim=-1 and dim=-2. If x is smaller than y at
dim=-1 and y is smaller than x at dim=-2, then the returned dimension will
be a mixture of the two.
Args:
x: The first image.
y: The second image.
Returns:
tuple of tensors x and y, each cropped to the minimim size.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(
data: torch.Tensor,
mean: Union[float, torch.Tensor],
stddev: Union[float, torch.Tensor],
eps: Union[float, torch.Tensor] = 0.0,
) -> torch.Tensor:
"""
Normalize the given tensor.
Applies the formula (data - mean) / (stddev + eps).
Args:
data: Input data to be normalized.
mean: Mean value.
stddev: Standard deviation.
eps: Added to stddev to prevent dividing by zero.
Returns:
Normalized tensor.
"""
return (data - mean) / (stddev + eps)
def normalize_to_given_mean_std(
im1: torch.Tensor,
im2: torch.Tensor
) -> torch.Tensor:
"""
This function computes the mean and std of im1 and normalizes im2 to have this mean and std.
"""
im2 = (im2-im2.mean()) / im2.std()
im2 *= im1.std()
im2 += im1.mean()
return im1,im2
def normalize_instance(
data: torch.Tensor, eps: Union[float, torch.Tensor] = 0.0
) -> Tuple[torch.Tensor, Union[torch.Tensor], Union[torch.Tensor]]:
"""
Normalize the given tensor with instance norm/
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
are computed from the data itself.
Args:
data: Input data to be normalized
eps: Added to stddev to prevent dividing by zero.
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
def normalize_separate_over_ch(
x: torch.Tensor,
mean: Union[float, torch.Tensor] = None,
std: Union[float, torch.Tensor] = None,
eps: Union[float, torch.Tensor] = 0.0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
If mean and stddev is given x is normalized to have this mean and std.
If not given x is normalized to have 0 mean and std 1.
x is supposed to have shape c,h,w and normalization is only over h,w
Hence mean and std have shape c,1,1
"""
if x.shape[-1]==2:
raise ValueError("Group normalize does not expect complex dim at last position.")
if len(x.shape) != 3:
raise ValueError("Gourp normalize expects three dimensions in the input tensor.")
# group norm
if mean == None and std == None:
mean = x.mean(dim=[1,2],keepdim=True)
std = x.std(dim=[1,2],keepdim=True)
return (x - mean) / (std + eps), mean, std
def apply_mask(
data: torch.Tensor,
mask_func: MaskFunc,
seed: Optional[Union[int, Tuple[int, ...]]] = None,
fix_selfsup_inputtarget_split: Optional[bool] = True,
padding: Optional[Sequence[int]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Subsample given k-space by multiplying with a mask.
Args:
data: The input k-space data. This should have at least 3 dimensions,
where dimensions -3 and -2 are the spatial dimensions, and the
final dimension has size 2 (for complex values).
mask_func: A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed: Seed for the random number generator.
fix_selfsup_inputtarget_split: Only important for self-sup training.
If it is False the input/target split is random. Always True for validation and testing.
Determined by hp_exp['use_mask_seed_for_training'] during self-sup training.
padding: Padding value to apply for mask.
Returns:
tuple containing:
masked data: Subsampled k-space data
mask: The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
input_mask, target_mask, target_mask_weighted = mask_func(shape, seed, fix_selfsup_inputtarget_split)
if padding is not None:
input_mask[:, :, : padding[0]] = 0
input_mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
target_mask[:, :, : padding[0]] = 0
target_mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
input_data = data * input_mask + 0.0 # the + 0.0 removes the sign of the zeros
target_data = data * target_mask + 0.0
return input_data, input_mask, target_data, target_mask, target_mask_weighted
class UnetDataTransform:
"""
Data Transformer for training U-Net models.
"""
def __init__(
self,
which_challenge: str,
mask_func: Optional[MaskFunc] = None,
use_seed: bool = True,
hp_exp: dict = None,
mode:str="train",
):
"""
Args:
which_challenge: Challenge from ("singlecoil", "multicoil").
mask_func: Optional; A function that can create a mask of
appropriate shape.
use_seed: If true, this class computes a pseudo random number
generator seed from the filename. This ensures that the same
mask is used for all the slices of a given volume every time.
mode: either train,val or test
"""
if which_challenge not in ("singlecoil", "multicoil"):
raise ValueError("Challenge should either be 'singlecoil' or 'multicoil'")
self.mask_func = mask_func
self.which_challenge = which_challenge
self.use_seed = use_seed
self.hp_exp = hp_exp
self.mode = mode
def __call__(
self,
kspace: np.ndarray,
sens_maps: np.ndarray,
target: np.ndarray,
attrs: Dict,
fname: str,
slice_num: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, str, int, float]:
"""
Args:
kspace: Input k-space of shape (num_coils, rows, cols) for
multi-coil data or (rows, cols) for single coil data.
sens_maps: Sensitivity maps of shape shape coils,height,width with complex valued entries
target: Target image.
attrs: Acquisition related information stored in the HDF5 object.
fname: File name.
slice_num: Serial number of the slice.
Returns:
tuple containing:
input_image: Zero-filled input image
input_kspace: Undersampled input kspace, can be used for data consistency steps
input_mask: input mask
target_image: target_image for training in image domain. Can be center cropped, have 1 or 2 channels, be rss or sens combine reconstruction
target_kspace: target_kspace for training
target_mask: target_mask
ground_truth_image: center cropped, real ground truth image for computing val and test scores in image domain.
sens_maps: sensitivity maps to compute expand operations
mean: for de-normalization
std: for de-normalization
fname: File name for logging
slice_num: Serial number of the slice for logging
"""
# Convert sens_maps and kspace to tensors. Stack imaginary parts along the last dimension
if self.hp_exp['provide_senmaps']:
sens_maps = to_tensor(sens_maps)
sens_maps_conj = complex_conj(sens_maps)
binary_background_mask = torch.round(torch.sum(complex_mul(sens_maps_conj,sens_maps),0)[:,:,0:1])
binary_background_mask = torch.moveaxis( binary_background_mask , -1, 0 )
else:
sens_maps = torch.Tensor([0])
binary_background_mask = torch.Tensor([0])
kspace = to_tensor(kspace)
crop_size = (target.shape[-2], target.shape[-1])
#################################
# Computing the target images that are used for supervised training in the image domain (can be complex or real, cropped or not)
# and computing the gronud truth images to compute scores in the image domain (always real and center cropped)
#################################
target_image = ifft2c(kspace)
target_image = complex_mul(target_image, sens_maps_conj)
target_image = target_image.sum(dim=0, keepdim=False)
ground_truth_image = complex_center_crop(target_image, crop_size)
ground_truth_image = complex_abs(ground_truth_image)
ground_truth_image = ground_truth_image.unsqueeze(0)
if self.hp_exp['two_channel_imag_real']:
# move complex channels to channel dimension
target_image = torch.moveaxis( target_image , -1, 0 )
else:
# absolute value
target_image = complex_abs(target_image)
# add channel dimension
target_image = target_image.unsqueeze(0)
#################################
# Computing input kspace and target kspace
#################################
# Get masked input and target kspace from the original kspace self.use_seed during training is determined by hp_exp['use_mask_seed_for_training'], while for validation and testing it is always True
if self.hp_exp['selfsup']==True or self.use_seed:
# during self-supervised training we compute the mask seed even if use_seed is False
# The reason is that if use_seed is False we only want to have the split into input and target to be random, but the overall mask to be fixed
seed = tuple(map(ord, fname))
else:
seed = None
if seed:
seed = seed + (slice_num,)
if self.mode=='train': # the last option only matters for self-supervised training. During training the input/taret split is random if self.hp_exp['use_mask_seed_for_training']=False and fixed otherwise. During validation and testin it is always fixed.
input_kspace, input_mask, target_kspace, target_mask, target_mask_weighted = apply_mask(kspace, self.mask_func, seed, fix_selfsup_inputtarget_split = self.hp_exp['use_mask_seed_for_training'])
else:
# during validation and test we want to always use the same seed for the same slice
input_kspace, input_mask, target_kspace, target_mask, target_mask_weighted = apply_mask(kspace, self.mask_func, seed, True)
#################################
# Computing the coarse input image from the undersampled kspace
#################################
# inverse Fourier transform to get zero filled solution
input_image = ifft2c(input_kspace) #shape: coils,height,width,2
input_image = complex_mul(input_image, sens_maps_conj)
input_image = input_image.sum(dim=0, keepdim=False) #shape: height,width,2
if self.hp_exp['two_channel_imag_real']:
# move complex channels to channel dimension
input_image = torch.moveaxis( input_image , -1, 0 )
else:
# absolute value
input_image = complex_abs(input_image)
# add channel dimension
input_image = input_image.unsqueeze(0)
# normalize input to have zero mean and std one
input_image, mean, std = normalize_separate_over_ch(input_image, eps=1e-11)
return binary_background_mask, input_image, input_kspace, input_mask, target_image, target_kspace, target_mask, target_mask_weighted, ground_truth_image, sens_maps, mean, std, fname, slice_num
| 15,090 | 37.595908 | 260 | py |
tinysegmenter | tinysegmenter-master/runtests.py | #! /usr/bin/env python
sources = """
eNrMvW2b40aSICaffbaPd3t7ez6v7+zzPRDbfQTVLHRXazQvtNgzLak1295RS1a3ZnqfUi2FIsAq
qEiADYBVRWk1z33yn/MX/wP/FcdbviLBYrWkXWt3ugggXyIjIyMjIiMj/ss/++HNO/Hrf/XOO+/M
N7s2b9pkndaXb/6r1/PxO+8Mh8PoPC/zulhE63xxkZZFs46WVR1hoaI8j9Iyi5p8lS9afIIWLqoy
Wm5LeK7KJomghUGx3lR1Cx8Hg0GWLyPuZ16m67zZpIs8Hk8HEfxX5+22LqPvR9j4aBp9Bn9+j72n
bVXH4x/c6mmWVRvsJd6kdZPX0sh5XW030Szil8l53tKbeEjjSFfDsSmWWI3QW/xvdHQ5muinlAYy
GzYAQj6cRBl0PRte5rvrqs7oeZluV+1sNJoAhtr0Kq1nw2evv/jy2cuXzz9/MTQtXeSrzWxYlatd
VG/LCMfQRNcXxeICkNnCv+1FHp0XV3kZNduzpq0RofnNps6bBkAAVOqm6L/h09L6HBVNlCr851fp
apu26dkqt4v4DVxf5HUepatVRFMRpfCkuz4ioPIsSs/TomxaAlgK+g0hEQD0RY1Yz8s2WqzSpslh
+p/dpOvNKp9GR5fRiKYNkHRRZRGQEDXoN1VBO/WIMYIgAXBUTpMUUZy0D+irmpyA6jS0qMoWAXe6
HWG//II7CmA1ywrsCHreRTLPjBqFkLbS/SMsBrIeEHge2zoV7BWlIGtEb+fSyVzGPIL11E66OIbR
5oAN+Gt6ZPK5SK9yNYUw0+clAJkVNaxJGAJAC72tE25vPLiF+odH6+Ft1I/LE8nKIv/h0CL/z55+
+be4BG4jfhowYoepHpt1CL6Dg1xT0zoiJnFMM1BWLVV+PAqNMjjIIxpD3QDYzhjnbb3NO3A3F9V1
JDWi+GxbrIDbTaLNansOk4kgbPL6aFNX3wLOo6rMm7ELiTAjAAXIIR6aztVP4qkuocPX0aoo81XR
tKOxy/sW6ww/AckUZQxktizOhf0Vy4ifEx50Ih1M9ZDkc1bN+de2Bgasv7bXyDt3SVElr/J6XcA6
+FNdtHltlUFYsXukZGkN2KwzsvHUmTokzgnwdxjfjKomzWZVtPFwCqM8Hjtl2+vkGnuMh7+zNqTk
fjMdRvelpbNqlc1ewVR1qmLjMXYU/jIOoAEQHkCEbEaPBgGUJ229WxY1jQah8CanWtF+WJXzdZUV
yx0MZt3E9O8kcmZLlj4SPTTlTpx8o3LMGkKl1FpUk49rwW7VrA9pYeqNkB55B9+WbbGCHj5NV02u
GrQaOzk6np5Gs1kE02aacesSOtQnd3h2S9Oj49OBbPpr4ZGz6OSUXgGXoTaBjel3SHOAWEQikh0h
08DgwukOWj7EUntil/TI1HScpJtNXupKhixyQI1bCfq2UOB+6yLhBXAHv3pgbqyveiA41WYUutK4
W+vAsdj/4V5VlFsXNj03nSYUdZheOgzmoqouE1kSphgvg5l5YcChLydIYaZjWFm015Is+BlAAe+4
JxAsv6irqyLDXS9aVYt0BTihAlpARcbKQg7RTbpYwNYiVNdUqyuoCtsjkiyOBeUvroXCFG5DIMqw
BMsIXUbzOUDVzucxQL/UlNRYU7DeYRMNjAF2cY9nQvFJhKIZcU5VOWH2MO5QVdGg3JWWICBTpQkh
4Xm5rMYoxoQ/f5IvKpKXA2QhsOE2FEPvY3sJL5O5AV1+DaxxI4MHOPXQkQ+PfV7CkhiMzWlQz+Hf
8ojvNI0aS1T69yQoQDnYE6NqyUIProUNrPmdJTfDN1ho+AdmtVHSkUx2onrum1cqPZ56+OE2Zvx1
H24ADA89OBIXN9RIl5FwTSyGf7tTKHh2uKy8Y67NO1GQX/BGMdZ4f1UXjBNWP6qScCgrQDCbtm2q
hF6jn0ijiUKhQIBqR6z6mUTf/zCxl60CJdGrZmxDewCb7sLM4EhJW9WBEhpU5DO4XrBvnrY/FauV
GTWOi7BeLSPde1EuVtsMSUl9bvB7AWIr6zhCkZ+jSKu0FasYLGmRzlF1yAvUNoBwp7QSpt98jH++
wWWcUjPq/aci/X3DrdvKyER3U5WOJrFPjwBgqCGcWR6YpUdwI1pT0LoDzrUjiroLpndXT+p8s0J9
fniEoi1uXKJt44rOM72CmDXSl3s4RhfH+LDY1qRIMt+G7RwHIMolIZc3IGVYQHC1mKBkBEVvyCwW
Fygpjx2JAeGzmCijR0TO5/LW46L2OIiNUge02MPDgTKoQjeiAGp9EnVIGWJVdwdm8wt7GNSKasQe
TgewfTB5a1oBoqYbXoLidH4hlCF0sYYiate/SBtopDZrdaTqjnqYnhqEKpfM51mxAI57C36tYahd
YebtI7FdaaxAHML/0QSXNoWa3u5FzSZfFLDbLFLYHwAVsMZAy0OgmwK1TNASL2FmmmZIBIhPx8n7
Q5/tClgnVi8ssYLi5kilCVBU3TbXwApiWRvccBDSk19MT7vbK4mCnf6w7GmHEVsFmBdLTV+V1LqP
q0d2llZHs8StdtEaIG1tTZrV6oojx2mbnmbrsHkhj94R+hUvdMQZ4JVnqF030RHy+aqhZcW8Lfrm
G6vvb76BCSzPV3mLVjFZx0mkTVHTgSE5Y5nUrxylswGS0AiI1I5PeFOU7AtuSDHcwzXuMsiD02ik
Gho5o0NZTg2Mqshu9M03ThffqDIsjLqSBy7EPqmsYAnp5BFrbfOhC2qdFkD6T6GB4mzb5s/quqrj
4WdGAIjWW9gGXnz+KiLajZB2o22ZgVy9QIPQ2O5JMQWGZCQUMPLQw9IPsJ/FpSxuj8adWbfXP42Y
KwYHCzp5R57y5FHfFBLQhGnl3izyTeshJjDLnvgsCscsWgWlf2UxEQwcZDc5y8+LUpTjW8wmN1BE
F4cZV0VjKgovnNIr4q43Y59UFCfag7Aw1dyvqWoKuDzHjaqG9cnV0WpD8zhBgQ9nEb83LVJ3jP+4
1n9FRvgFhIj5HDSC1XwuzPJrDYqQvimHXUA5+P3hKl2fZekTqPPuzHq0eZAmMcODngKJycuAOY66
p1fauB396QK1kQ0gOmc7TdHyql/UedrmvWvd2P13MGcsANb5cvoN4AAE3CvA3dkuQgUaLbos0GjR
4UMgT1DW62rVXBabJyIpOmPSbI8N19WyRUmYYMqAji5hv7soGpsTsjF15vDwF08/e+auk3tqa3R6
cxp5HGiEZv94RlrqGBoBcQdewEi/A3DcpkRYzWBXJlZYAn4UhhEVeoaarpRqj+d3BMvgLbg2z2kP
PtEwgPQIwMBUZxXqIsCvlxXKDlqnjUBQiJ4vVUnimqlsSkw9PKsoyYPcDZuZCPkwW+dbFLTEgmVA
TLUioLUdVRa0NIBFhDpqFtaulu2ot6aC16YxWDqNQeq2rUBjKBZ04MFykNB5ikccywoYPmsaqdB+
Ik097h+lliUPGKgBrKzuMNBUm0NkVcmYTXMKCDK6qpMYGh5gRrcKb2BlZLuIDgBwqMC9+PinMHsC
1XGIQmHh/YRpRtEFq9F0vEQoJKg3eQ0sZY1Qj5YgwSNEID9tWWimuhNLLElAYIFazBKxUplf95Ek
IR6hq+riHO31ph2nxqghNCDSt5uMWIGuapCBC0GpBoXZ4ghC0Z5fVG0+jV5BtXq7wok4q65ytJdd
YdsulIrfLetqTQgm4wwSA+u9PrmglZdIFtgh7CE4vrRRZ4sB8qERyEIJkE4VrNXIQG6xAE2wQjND
i+0kurzWD75hiPZNNgu5H7AGfKA/AEnsGdy4SVShr1WR739gyH63qSsgmXan4UN+hj0QfF3DmwHk
Hu1dZ+kCW82OFtV6A+s2fpz8IjmGzTWjecVVVNty1RxUdyVGWq1nJMmgKCDqWrKoNjtrIDL2LNlU
m3iET6OORDf80KWJ+/j/T4xYEGUOJLzfyzy8h5iBP+8xjizQcMcGoUXo1bBXqE3n3mqyp2rPyHl3
bi/4oLEAxpE4ewAt2WsUboCSuOeHqmMofkS2DbY9Sf0aGScdzQ+HtiRFNdQhBDfgbjdI70IZvmQG
1Vd5GdNoUWrnI87Yl5kia9/d858rTY3m8zNk6vP5aDwOHjb0lw+fMnSr8OZPHhw9dUj+hNFjhcQU
7y3bNdWsJmQB3tO+QrLVPp4lQUWkq9Peet3jnQ7k6iyElsrg7o0EwArD1N/MRbUCHQxqukKwZgIT
OmYa984YVwc2gMX6IdXdqI023osZq3vN/iY2q+utPR70t9kzQIbtbfDPNUn/CYM57rO6IxDmbPNa
8Uau5XPGy+uEt9nYb1W4vtkiHkQOdmyWDuwQt0Phh4m1LTHYsi1dXo8dCwtOllFsPhNfLRGTlCIA
KkaPqUWrD8lhe6SCw1qQ96bapp4qVfH2bRNqhXZ5XOyTKF9v2h1zA9CkyXy3VIpXaM+1W/VlyQi3
M+yl3jkNt+SP0tO2t2F7+j+8omMpWM+xjRQhjPHp7duts2GSZAt7JU0w/JWpVlvn4O2WnrXVqhXQ
O4dIO7gVWpK2lhj5dEVvdDJUZyN00KK4poOa8TgweQ+82bPXmLeibMoEob2LUBzBrshXma8rNFEO
WgQgfJWS4yLqPcqn8YhkXWcoKFLZgBsTiQzPZTdul/7S1aMfvPlnr//qnXfe2eySOQhpF4laYG/+
69fRO+x9uS2JEKNlATJNs2voPGRTEHRv/pvXf2n5bS7SDZBP/uafv/5/fkGVB+iVRKpc02bVtn0I
f/IatRIsiMPWPp0gEWMFEtPnoCVjQ/O5so/irM+BJUDneAqgHDoBHPWzargulNgQqPL+VY5/YZV9
Ci8Hlieoa1/myuR2ZGretM8/H8D4F1WWs1OSeF4leA4yGGzwnAtgwKUM379/NI1GMMKiHE2iY/4N
g4aHx/wAQx/9o/mQHslsjCa+E51tlGDfuWU2tAWpCihkmG03wzHtZNEQxmhV02527NI4BO32oiqA
Wc9ORssM+htBefxTVqNT35NNU4RNA9jMVJ2XL7N/gPr/UFbJAaNs/OHNSa8dTli/nQ3LSnsNCkJC
znV1u9i2tMg03mYMwGDg2OOV35U9h6sqzWjdFOmKzKrkXBjnab3aiZl1IjOp1t98vgbEF6xpyJyX
yNjtSsllWV2XuMAb46rM3nzk97dOy/ScxCOnlvNxwKrzBh6h3Mc8tM/4W1w2asmOu80myo4ac3Ug
CSksBYb6dG+N9oQGvoiJxinGriyoTKLrZU7HEYBqtBNEzcUWlsh1GegdZnuuPgsECdWea8JpAgCs
qvNzpCmyjKHQzsZiNqizFbWl7SMXsw/OYwOsAYSNuVSep63p2j3MGEqRIfHfXZOsqwxNEN4Rg/lw
omucJgTKMwOJ7ee2Z/B7oNMIIHSi9awuzhHnigojpE3EB0g/aPIl/VStO1Dpmm3+m/fHFpEkxP8N
ikWedI42OuccIjU4RJ3kNzmsKduhUYB06wKDBPmnJr9ChqDZNrhRCxVJfZ5Ay+4BKGb2Kr6a8Gvs
f4Vm5Sv8sqwCOA9aWnXXxLRP1mQm1XHJEWfymXy3BALg2WoIPS2g451Un0XIg4NY/QxxKkDiKMnr
lFAmv+Tb7NNPPrYXsz4BtvpAPv7jOnm5a27tBVjuYZ0Q+XMv8rMo5/xzn78jL+k/ojFfzn62JbHK
wJ5yn05/BPVmdnwq96Q2NOmABID8wz1UHJpKQ997zXSuTFYeCShq6a3IB/TmObYg9nmfDzJ86ZjK
0CRmQ+xr5XigDtUK5pRdVRw+YhtzmCuYo3lbzdHCG487hWDn3fSCDeqOjYXOEjLD58LhhjyuwNNh
yMVrNMtRKLgCeZEOXHALtT2vCVfKdNGd2bviifHDHCxBUz6/CCGKR2ENEYcA/xuHFgw3Y+lLnTEx
7JsdviC/WIcgpBiJrqpEgDqkWM+wVSP7qETKJHMiYG/cepYNOObBDC40ZV0aN230kPpPN5jFqmpy
RYJhIVDBLoIgiiHK+R4VbBArZYacrXFifAL3+oPpUtopDHWYcYi3utTvLrrxvo0bF8KP3bepz+Cu
LeOCsp1+kyYX1z6llw9la8hEWYQpxE3daQk6vGtLUGVIkI475wJQOTC7eOp9XWNbtT/B9bakv8CK
QRkTHuQuu70TQGq5W9TFG1oR1uIIOKRehuO3AxFn8y0gZCtIZyG6bvvuMO71c1x1JMmHb30Uchse
sJG3RUMLqhFL0T/zZKmOhndhGJf57qyCenM68q23G8UvQNzF450AqLknnR/emTpWzklmeqt+uhsx
48Agd4LXHUqrvcP5BG5OqPgw45zLqo6xPcCvYQiH1jALfzCgMeOAQAJeZvQEAAFllchztw2pRfBN
PD82y0zpiQ0aidtinQ8tU8CyuEHoyQjAFeM6f7PN9YnQcDjMSzwFbCyZtFpGpI2Q54hRYB4abYW6
x12kUSoZszEotQEFLr1KCz5bvCrQ55C7diSOb75RkjiuGm6GXX2E52G9WE0KFgces/IuJKCiS0gg
NVdGluh1PXU1KV2Anpi6Avgei48Vly2rLPeFAmOg+JTxG/vahoxh0TsTy+zOE0HGwixvFnWxIbce
Pu98fPe5WGY/21Sg4nbbXNxhFkTWsO19o2y7sc9LBb/o4RUzMURqqso8z5qoahIyEb7FtHqaqplV
VyWX4r0quUwJ1fE5mP2NBU79aOnoLK7u0UrQ7Hurbqy1kMkhB+L4n9KmO6CGVcN8n2LIYuodtcH8
zjoOt+VoOj9aNdRrxYO+z8Dk4iSoaB3gOes0plQsYiBNusznaNqfA2UjX4jRbAMkra+Zz/NyUaFB
bTb86tWnvx4aLmMWdQX7W4StMGtRrmEXaTtCRypoegWydsu2bnG41gB++snRKr/KV+RZUjVNceZz
BAWBffiOu528dibWQeQSzVTLBIEqK8esFmmjpGvsPNP3AuyeECHo5CoOsdr7wgbCU1LuRdewk+b5
GhkuBQtIGUNNC7MIQsMqR+R8i87euBnbvoLW6fegaw8q82saF3OjeJmNe0aAUHtXPfBU58EMCxkc
cVPLDCcxpsYVBTwa4zjKKjrbLpc5ErPNvJ7hyPPsUyEaPUkgZ/ikYw7K7UpMJoI3YtXIt4Z8xWGI
XQs57Ij/8un5Zvd+RIdTbMr94IMPxn3MkqE2kPkMj7+jCzf9cD9aNKd+mnXMFlbuBUbhqrSWOgvf
JpGcpnn0AZ8Q8Wffck9iLlNdAWHJvS7f5sawKgsw9O1BhR7rykiiIhnYzmVpm0K/o1HybVWUsS7h
dsKtY9nxHW4+CF2ohSPXJ3Q1Yk667pAHMhxLM8aVwtl7bBIBxoWzQcwYNiB1sbtv+vv2Lde66zkX
4vyV88A+wB0quB7tV/apBoOrahzvV+p59mloqsZjWwPxNkNv9+Dr8ARkGPTEt1KpGr2g99boBV3X
MFpYd7cM+ApAKX0drilBcrsA9oVKgjk2zznoy2rbXCA7xZaEj3ruD77qFdww980Yk35Vn7unK/um
zFRxj1y0QZMg6miT3myGLbsHTBO3dreJCtVBApfAUnsJqVPXiErwXT5TFAmO0uGaxW+n4f6hcgt3
Gmmoin+8YUM9HPcMu9MQD2sVGLm9ah1BMED7WMLS0hD/fF5s3gmd++6u/s0vquZ7f3aPja5Tx/ce
L3F2TnUYAi+syO3Tk1Xl3Sanp0I/7UmFA8RpSzrVLEV5tXAwiGrZ56DjsRRpJjYrDhqMxzZCbBVC
3EeGXcXILD+3Aew80MBYbYgvKtkReHhql0MWRxcEZxHiBR+ZAimUB1Ir3l+jW1jsVz2V7ZJb1Qqp
EeflRfT8c2CyD8mAmYrPWdWIjI6itLEcdO5GOhswQAdbP4qTLTsoBT35VSmAVv3s14qcGvMGxWot
CDvfOurR5y8DehGb5wSLjKtpN1oMG3sZx8FCwfg0Zlyz6FHXM9Y6bxXsTLRxDpefvGTnP/KwGnba
UGVmpIDFiIf8qtyuVig+DsdhCmx2jVH0jbUp7mKu360Xh6d8zvY5NKNu4XijxWHnYRrkst992Ay0
V1+dDa/PHgTGbM8E6ES2/1q4v4OQpEl6fAcP8m7LemmHtlMzaPnVW2TOq4cfjLZ7uOurZgbR/Saq
Vtkym91v2OXVWVWTwAL0pdQQK35JS8yyRpZmUrYNOy+sK76bqIbhMOEOI0CNFZ0j4xBA3tKPXdPI
RPGCWzdLbC3zWB6tWNhCCgqPsXZvZDMnehx7k+OhzT8G0hTRlaBpq/AQulTCrU8W0FSeX9q6SU2R
P1gMdnfa2nfgyulyzK3mFVsghhoosFj6bo3ha87QjDoOXfhuXI9SUApwJXOtnG4Ud5VeHlhbo3G1
dRSv8JCds86Of/9oZJ2+ozRxq0zGEfQqjNLJgthES/Zy9U/rKMTErbnNL/lSn/Kuj77L68qja28r
C5gvnSIBWnO+91IalGarac9S8SnRk81senO9BCyNJmhVtqm7ozq462XvQDw94ra+OvL6oUvTtaag
VsfCDBlDXEKhEkofJUrwDlY6d9Rs4ufIMrHXrmWjwT/KODTctstfD/Ee9+vXr8leaMxtS9M/k6g9
ZmOp8vEr1h0RB80e13v2sEeQk2sl9uZ6ssxOPb0CNhbbB2nXKNPPrVdUbpM3VCAFcvgjWWm6R1j6
pCrbL4EjfgoS7vNys/W9d8I7uKnPXvI9q8Parvfvi42NB/8uiZYu3m4nUHwfcE3aju3ZEuSloa1D
89C9HHPvMHDKx4Ouulz5XgZ3Zy239norw7gT+vdzA92tYzntfiULVmxWXYcSTcw1JTUD8WzPJARA
FD3PcnJ6xvN+1gooBjXFWMxxfcq5imU+uKi2sO7OgFHBfOJh7hLgJ4FbW9fxej4xHXXiy9fEq2iZ
1hEGZFSRDYCx5TkGCX6F95lN0yqeER4OIxAYyYp3wrN8kdLxRqmCHuQS0QOoEMMUIPxneQtKPMXn
qlM08V2k7N4Ff8+hKkwAcqJ8tTPnQv7BkGsKJoHHus089g/wn38uMh6WpLiHfFkeMcpO43LoXTQa
K/rgN6WotKRnp5nzrrFfqgtS6p2GTiR0XxrvyJ/QJ0UDA5S9VHO9afJtVrG+eIHx5CrV3Hhoe/ui
dWjXI/BL1MK9x6kUnOPNf/v6X8pdKdm33vx3r7/5D++8Y11GGmjZHIMR1E3OBgj+ORAR/AWsLUv/
5iniItDxm23uhn2DCe5+jo6eqEroByOxDuCZLTo6jJ2qYAKTfKnOKf3qdodh4xo3RbeyFUTdi9n6
eJVLJ3ZZV+x23autxnU4y7AJ79Vuo4hC36aE5aKRNOaoURi3RYMxDDBzgUqN3yCXkaXCQPjF9DGM
E9zFkw+grZD1FF7TNvWm+8kOQ4z377GFThfBO4beobZbqUS5KlChMKcCKtKtH/01ehKy1RTR0Sw6
7r7Ge4EzPcqT4jRscbGHWfTdc8bGAhP/EpSR5zQNoED4uAGcnbcX84uibG9FkTVos2IxaMoM/92z
TuFrTIRwtvKjq95IVGj61pHIbt4mkqoBbYXhCuDffaCtVm8HGi7DfvDMFSUfZgNeA+yPOB3/6AFS
mOZ8sUbfFPz3Hh1l49olDzx4AzzcuihtST7UsB7fBAuruCj5Tn7JUp09soYPDUuHjpGSAyP2e7ug
N8x602/PQ5ik6fhmEu16AkEIuqyRxzcnj06hAvx7iJx9t372tb+ia+Ew6Dhf0SXa8SRSv4hI5MEm
ldP9Rl3BUBiDLvLh3w40eNRuKNaeGlWvt/FVggQRS7l9d4WkpGfuITrxC8prt+xeKhG0nyjUIRrj
uYVXwOXq1F88K3vlYKINkLa+y0s+ZnJf9KwkFs8whpYSPdxag3sksagwqoO8hE2SAtCg/4b8tqD4
KLXuJUIZ57kHBr+OKW/a1WE2n90UODjnuadd3nWdkrFu27M5kOCtr3bSPOEc8m6RTUSgbin+OkXN
uaQQPMqvCt7p5th8QUdiHLCICpBm8PEXnNPmcfJBhBGmaFara+hHvuhGkH7oIoItQelAjc6YQCzi
G6IkEY/M/dTRYDAHSTLHaIrx34p/+HPlHj5B2wRsjtjEJPoM7cQ7seM6zY+tWdABiWb6555tRBWJ
XVceLzYi+fJYoREHAzzA2zWJoGCOXuXRExjC++g5JV5NN/kiioebGrdotJDgj+h/p/fzGf4rUppQ
tbDNRl1ybap1ruNK6WkqVkW7o5vc0DWrGhQyqFVX8VpzOUAZWQl87crHuwf7XIVcYDwXppBJl10i
0Gerd8swPk4YDirLY+MPdvs+0FtZwe3dxvRuEhHcgIig21XD89l11jj71jo1KRoad3zTJQqroxuF
HrsiofzWeo2OB0aegWbiqEF0TMO/A/eGbbFGcS5W0dv6/a9MJD7QL7AOKBfKlO+0iAXRZHenNofK
Sj0Mtkkzdsf2sE6oPVwmfNQH5c5X1ZmKD7eqFiHqpSJhqsSlhHiGZUsjx5iYsRfUlavPqGyynONj
unLjKGHHpgQlUGg6niH41SVRqhaESxqkro0cu1Enuzhw50S3ezBebesF8rvlxj/tCd5lg2LK1mZS
iFDk6PUGUc3twQ5L10qQT03Q/IdXamBvKVovERDxs3hRyfzw1OAlFL20hcHNlSEBNgGQfDW/IybG
rAqgEFeNAGszD/a6scurtaPWkqIeFATkZ+8ecfelT0v4zgtfuYT++JVdrGlp/9Mv7D4H9n0r/W18
2APt4U9qsgsxb7zx/oiGa1CL1ykvWA6sgyc171/Kbq1j8PiRlnB5jqKRzXpG8HZEUeUDAQi5hkRV
otiNWNpCAvnujL4u3SbhdW+TXMVuEkvba58IfIR/rUbs8FFUi75ThhtzScvRCQJ9S1wu9ItnZ+IT
3FZvxkb/5lqeVtYxaBUlH6d3Qs9Oo/sN+iB4kdzaufLYcewIutcupCIdqJpdWYPOHtRR2cY7Yra+
0fjczxY4rt3Aqgdz4m1nc5ZPZG8x25l6OJRCSb7sIdD5vK3TRY51LoqMpW7fpU8ACG9KtDseslti
CzMu3rNh8ubX25NsgW473raqdtDeRnQDPgCIpMcOwhWu9wrxSoYnzlfnRLfxAisDvU6i9syapFtx
Le5eSpmgFkZWLTT8eicBUMbaP0WTgPm9DZ69sHDTdq2BoskeFB3QKMGGkjVem3GrY4QziUAPLexY
Cojfs5M9Wb6aFNebcvw1WzrTWm4pJDTVwuD2pD8aMldOdJQcSjEBO5lTs2eDms+53fncS4ogm9Fz
+hrYieQGsawMeCKqiffGbxFDrBUlCftkQ4mezPfU3eTBm//+9b+xoszpGHX/4vW/h9f3BvO5IljS
pR8nv0zeHw3eDF7/J6sWYwdj5Zb5NV2F3gAcb/7l6//ymEPVfVqUEnp7nWcFGksoiW1LxmY6N121
FHZP8KwZjbazKAEuevryVTKwTwXFkwe9yXTXyWZH5oR2iwJpQtHvAnHt0qa1o9ZxfDx/TEqg3LbF
qqcICMK6a1X8I4b3qSpD0zvQbABgbjHEsJM35tv0KlWO4feil3keXbTtZvrw4dkW9s9vySCSVPX5
Q7qRdPyL3/yKpxWzwCA/iocfVdXq8w3ulB8VJf/4CsNg8s8/kE8p/nq+fHZDrz7BG1C+C/HwDyBf
fgwCOpawbB/0/Hd4dR9/YIGUbjsOP8YoAp1Wvswpi+zwxXaNf1629KRFL3q3PWMfFioHhBqGBb++
wgu8IsnBTrhuecQqp9Yn+ZIgQROX/ObTOBplvsq5w6eU7qjby9PtufoUDb9AcQx/fFoRyH/Cc1pG
Gz3CZFH7uJa6Tb2qd2xUI6jRHZXUIukdqIFaIioxv/BgvtvUM9aHhr8nXoe/nnFm3uEXmLoIpxl0
sEueDU7xqDCENDHHK8MqJZeSZ1OMtkp8yGFiTEQWeu9UmebD0U/mUJTajEv3TprOdoS3mVUAXA0B
99ppCNs/vCEDvrWvHQiXpT+VdDcRyieUpu4uQAVbwfLGJePTtFihQ65vgAVeRbxCcjOwj4BmL6ic
Igvc5w4P0K7SMuXAjcPu7W08EOrdUvimgGmBtFn9JJd5NThGfa/ZqYV48nwJY3OuGK0pjhsigWI1
SjXu9QqQ1VLMiE9yYHTPVds56F613i+dXVWqJPQ3XquwX7KfCmatczp+0Rn0yfFpQE2V0rH81bf6
rbF15jsexmbDwBJoIue4gGjUhr8pHncfYXpsXDhdF3s23RbkhmLtk+xxQOb2rOCQ89h8EkUvt+fn
sAtRbpRQe2hgAS6rtlQrfeJZvqwonqT+CBAhURwd8fMM5rsox0qeAqDjarkE5QJDJKL3icy3rUEA
gliM74jMSrrf7BLUnZNPSbr3ZX0nXIGhr2C/Apc1V7k2g9uEi1t2woH859aXOLcJXIiQ7uoArfP6
UJSB085S9NZe5coDx2pGMjKhUj12y2GCLrwKHUUffiiKpuR0tSVBG25sxFybFb4qFqjhfa2uxgrY
R6eJypDkrH5NuljXljRGrlgyVXRg+4RIf/jn5PiXUycFHYfoxbijKBjM1xQR4XuRlNvko6L9vI6A
Kv9BtjR5+bqit3/vvn0KXA7e/mfr7R9eXhTLFt9++KH1+kv9+skT6zUmQIR3D6xXIFfgqyPrFV73
xXfvWe8+Ka7w1UPr1aerqqrVe/vDZxX1ct969ewNvpnNrFcvqpbfvmu//QOPxXnzjF7ZpX7PQ3Pe
UKkndqkvqmsahj2O5w2+KhrnFYDCb5Fr2F9Kel26UPNbvtA/HPwwGGxRbuxMrTSK5e473ZVXSEDw
6c/O+6/UTLhv1ZTBW+xLeSn6/J97zPI/Mr83O6QuhJthxJIKZoVd5ekaWRmoc7AzQmvnzFG9bB69
nse1e9GcNkPFv+ivqUrRoovFnPcguafuCgP30FKzoqMA2geuczRtj1oVpkElqaS0W6juMIi23WSf
xOJurM80g+dybrSQSvv8K8P7uvJuYHX0VnHwxXUzs1CRUALKRTX2ddWeaBeusUGJPX5tm/9ZfeF9
opiBCF69sCpOBFjXkLNPWHMR+Bnpy/EJFjo9BH3qwOIANNrYkxOMnxJ9ggy27t1yY1Ay/DkbAEYh
Yross1XO2wsLrmMnP6M6s2E3PBH9+FIcEsWwKwj7xzzDDy3FWi1imj7ge9SU1SEtrTkStpFs6570
SLwMKRUr79Z+MzK9Ei/YCRZMaWGBPiSWSXJWZaHroLLSWYp3G3+hkjd1YlUECFTRkstBXNMQq/+S
t74UuYosXYY96KM4zM9IjhNoCMMLv+xUIxayeKyzMSVFNvHO4VyqtuXyIDFzDvf97GA/Ld9j7oc5
GRq6PEyeJEB9wX58zzJ+LzJmJLfkLGBgtbf1No+pXNhJUSHCZxd7OIo322L5UKGy4MmWDfNlK+eb
Cf52Psyd1umNSxNMC/jBzS9abSbkbjqvNjjV3xWbmHqoNg1DkJDPBmdP9MZN9ZyO6U2oY+nC5S7V
Zt7s1mfVik+Jtcx3Um2M4n26h5+DzBrR/xMhdvGgOzg4yFdnTOPwIfVwTjsnCgBzmhgAw3pVF+cX
LQFlQLiV8/tr5G32zknkATazaOFwLHTGMrNm9sdtML24DQ5VDr4CC1FgCftWnqH1KgRmxzmrB9xA
3W7Jbi+3LcW9K2bguFOiikm3nnn95fs8Ghulk9oVLNKzoXC67FyJ7dyq7S66wKXUA1gbG5BVXCt4
qGzeBpNKhhrLvMVlEuRPKER9XgeVWhz5yanH0YCTcPJRaYNtHdM+RuJvn8yxpJlecVJHLu4laEYn
NTzjEd5GQnJzkLHBWd7RpsP/0HOPYj+Mh9ED3oBIvbfhHMOX4Xj4FnMmFn6ZNNLbnPs1eOeJzCFK
ozuhX0mYfQtCfQ7NL0MzoBrz5sEduYKCWG23g71CiVXX5uc5mcLHe0NwH8aA8cfMHeKh0swBTPMO
iw/PbNTaK8rKFysOlB6oauLKELQ7uPX5VX8D9N3O+xESAbhomJJ82ldSwLhHDLibDNAZ0Xjw9tt/
Z+9/G9n4Z97vO3u9PYH/JPT6sYmDjp5ttkaFnlJOC5IU1aIySsRG6U7t5Inz/l2Dnr+35xRrD6Mp
Nf6D3YrkO+5sOpSfgLM5J0FHIxdofBFaIPB+3Kkpu4INn5Z4yxgYvCdals2JqnbKSQ3nnkbiDEbt
Y6rOeB/oTungLCM2lCOUwojKQv+WWJHqSv/+x8GPdDq3lOFmdl8aVhCF5svFqtcM1FV9T8aHYTrc
grPJeRGBAeNok/8RdOi0cRDGsfDwpyDD4XuC47viyal4C3p6vAHvgJxQ0tce1FxeZ81PhJq3x80B
yKGcdfStKOlmoHGM9Nvtk8ZgjcRqH+6yareDTmpbpzse+S1bLyW2Vv2Rh+zPuNG+95477h+5Gxrx
GZD2dfn9fUQB/vrBFtU3B1itewViKI077CZwdnfofoxWbuXvXvbaIu2x1Dr8tLISkuZi3itr4rA7
txbs2pgmP37sxBqD7lsZF8NxB1ha0Y5HKi9u2+7PEYQFkkV7w5rtH6o023Pr2TXmUtse4jxhl98F
pQvs19tEO+s3EUYRapsaCIHgrUuyliuGo+r8yLX5oyxhpIg5mAkasfZ577OJ+bN0dyYeDqosa+2c
X6GtMILBOi3PV3n22z5rlkaJ46SH1wlQ0zdfgXNb33puEXesVe6sKAcrf3a0WkVevHzGj+lfR+Ou
y8eBtkp9E1Rtgj9yUugUjVE5xzEQkaGdw+rkQWTwtQcLPbRptz4Z/5hx/xSE+KO3lM5aNttKwjuL
jofY3U7uYBz2Tx5/VDOHsZl70ccX+eJSHVwRORQNX6pOS516vv8Mq2cdEPXzbSGg/u9/CO1KluD9
E/EyBHuuYP45icbvyD/1cr9bp18Bg7tTdhzKSXfI8fohYs/PLdTItk3nxWrPbpq6dZyUGs+IxBnK
QrsqVk3wc+9KFC8o4TqdtjsBdgLYt7ofH7IxErcI+4r/5Kc3IZxb8AYQX5yXBvHwYA2J5BMX9fyq
B/dQ+xaZJkkSojHjINeDfdm2yDkIZd2uUVgLbPEeRoe+dWU1s2FL+F1/nUW1mlfLZZO3bj3zfmyn
T5lzIQFWECoVOUqihCaceNDcBkc/PCFIAj4uGrbTvSwy6OUSDpbterd0GaNNHT+ztdLuavDmX73+
19ZdlXa9yYr6zV+8/r//Bd1NiZrthi5soEFsU1dXBUVpa3VYPI6MVtUFu5hRSDl1JxQj5aE/mdz5
qHPrOgkW7Ltcsq7Ky3xHUSzVfRHrlfaHw9h80P3fkFdOf/psDorX8fOmt+SJgD+8MIh4x0p/40cM
4hgPGT92hLe8xOzviBDl+NNyvFooNzv2LhDGGewoOUZ9zMbGW9TH5Q53igsKAeLySJAZOA683MNO
KcQIUByQAD5vQA7H6IQUfGTb0PUe9969jvCHDs1Yd8QjGulJ0wYbzhjnbntEADneW40A6naHN6jf
bPOjTV4f4dQdFeUVKMbUjh6Na9uhSMXR+Tat07KVGEygbVBzSTAQ3L3NLllV58k83RTXaV3GwyfH
yTHaWWgQBH8X/G7YNVFEW7y7TvM1TnjyYnvK4H/W7K4vrZnFisyOyu36LK/zjK/fmzlWTRsfJqs3
f39UjbiLeiO367FOsr4EcGLV776bbRt2IUfCSdgRilICqz7m2A6Q3rK4melhdPjnZZ5vZo9A1Kiq
FnGh4KBbhJdzzEWJ2XzcMNRmxcRDRhdMzaaD/Y3tG2qw0p8wAouEFsctEcmdHGuqm7tccPfnUJhA
RQw46bQoE6reBw7HTXvOBMXqQzBYviaBBWoMcZ+HiCpV5+vqKhTW3yOlQyK+9AHcT1EjZtxHo1A2
Gqu51qbtOk9X2Hgw5j6TE+zDujwQVdjHtbUDepZFcxGMSS0tcgl1dUPS06qwqZj32torCGRr4xFA
W0mmYLYeVY3jWTC5zBerPC23G9jyW7Rdn8CIt2UGOz9mDkUgRLRYYxZp1uq4KipzzNTY37QGla6d
0GaPIYP43pBflUcywSgFajvCaol57LQQTGTKHXczmeoUg727FcemtFKN4hZEuwNy+DyFN45wEJmt
ghnRAggCLxdjZhwQO86sDiSiqMsReNYNV4heXeRCExhel3MfYlTe6BuHjL+ZD0SFU0C7WQ5Ffnby
iuoo2PpjAhDGw5Ov/3SKGxEG6TAc+rOnr//49A9Q7P1H6ooJneHRxcAn8rkTtRv/nEz5Ix/L3lhg
KLJyKCOR7Sm0KdkXUm4Gb/71638rEWQJEwuYjxydrd785et//hd8DZmuDfO1aKTiNgU1OaL87O0F
xrI5wqQfEdX0fPKbZDB4ulpFH+O3hjxYeVECp69qtLzTAeCErilnub4sCFgB/o2fgBw5Yy3HxADB
pbo24YyJXviuGGnubZHSXTAkCoaHAzPTTWYSJXGrUrIj/QaqAmDwpxIfP0qbYkEQu+ntAiLkOr1B
SEHGnR0//rXPWsxX5Bf6wS20qbclBsCjnHltbNU5suo8/LUfrhqtOniu/4MdJzhP63DqWCyd8Hcv
XgxjmoeD+A67iVMDJ/D91KqN9+Tc2hjBrHXcqLg2DRID1DXtdY7uF3FoONQ+xQOENqyo5oocrG5c
lrUniICJaW5G0HWaDOz26N7GnfT5UNpiiyJmC0RF0nuAo2Fp5VFPBpH9QUC6DfStJ28utqYbNV8d
tRDfso5uz3dnFkPZArEQpYJUdEwRC7laxPVcSa0ksVyWCYcZVvPlisam3JOZv8J8Z/n8CtdHulhU
NSumFQ9q1AgMAz9SMLm9xDxyLsILoteYIK4ggkO0wlprjVqMx6fdbvzon/ShzCi6ojXEI5c9dCIh
U41gNGSEyxqARE9dNydTqnQalhjvYbnPXkVN0W7Fiwi5qIojusb2UB8797NvOdRls+6uYCH8FWlz
UTXtUwohwpzWMF0r6shTLvsKmPNDLny0qLZ83Te03Rg1EUFnHKCMkMo9VYwVz9d7M6Uig+aabRdY
auD6MGzXRxK2vzmqlkfpETfxHu0aR211REvsCNo4stYJ/ofihkQ8J9LHboDzctCUCwELtrM6ZyrN
MyePht66rK0A99nmAvNzYACPLWjVC8y3a8b7Kcy5i4toucpvVCjKNV9cBu2cDOBGIiNpUU2tQBOl
LcpzMExX/rnHG6aW08RYgOS2huFxOgPBSrG281LMD2COKkiTbMbWArmiGy1SI/bDcXXLCw/7E+E5
zz4WinlGdElBdaDmEfVnwgqES9+2//N9ZEQqz2pn41P3o5lEnG9Cnop56zbQVVT9tmzLlCYktLdL
rQedhsIbuYFEwQZLAAiq3cXqAhhj5CneAd2zOEkqZEolJtUQ0+cXhm6RlijUbYWRZlyCulWgoucm
B3Evg+dHiR1Fm1ZBbIDkzCBjHYrHkqTGHbFM2mSxTB4OFYvcPbcrVVDmLdxEQEa42YAaksWhsPuu
kNu13qoN3/1u7893W1/9q4iHQ5i01oheV9EDH2194Fh0c6flQ2gipoU9+vR9yBJyW7CzbKgp8FZO
u49ztFq+kIX05t+8HoiSlG6KzeX5m796/f/+NStH/GJKilFdrXjTvJF4WBTiakNhlnAX2nDUZnhx
mZ4Ddx00JlbRZrcpko2JVYTPD7nxwSBejCNg/+c5SBx1fpnD/Dx+9Og3ICR89vxVtCoWGGR7YJnU
q8aO28Saz26T6+jcfKCBQf4Gfqyq4+R9TFM5kqBgm908PWvIHoP/GAbAwcLlKra0QMPk0mwSwfV/
XYAGCBvgVU66tRxrc1im6Nu0xoS3SqhQ7WIuLlRH7eALc4CFSAy/cCKOji1RdewasFT04Ia1fWc8
KjJL0QKmY/gf68w8hfCp4QP7GcXpHFv8D6vA0IvvlO1dppWHZ8ggkuw4fO6hBgjMkI+XrJBjdJIg
EIgmzBoeSXVWai6uTIf7lCbNxA1WB99uDip7EpfS8ompjDrXUlXV4bRNH5o87DgX1ID5gm1wlcR6
vafRVZVmmDmk06b+4DSp3u5p0ZCF0568xtZOHGrmWEx0L1R3w4V1EBP0nqgWUFt59xmyIHNEYEK4
vMyHBwp/c8ZFr3hGku0mw7RfxjUkME4T/dVq3LTGeQmlIfEqZDp7uinkFDNI4yg8itFWf2e650bs
yHhS4JSsoJksIOIrwPDx7BOxyNUtT0oxIM3suH66rLmCP4ksTGk0KL8ybK+zrrnlgTJs8U4nL5Ud
Dfd61YDEnBkmchSkI5N6EQl1W2qa+cUkunGMaPxWb38az4bHOjsgDA3Xub8h7T2uwEp3OapwSFcr
pnjzaD7vTauiC13mNFrdwtiFvslbe//uyLxYxtmvpR1b1FRomOgmx73RRZgYmWZwv3GIlUmGKDWU
vFr5/vkJC+UjpQpAxnBjJaXRHVGSnOjdGeISLY417EakEiJp+k0Rbg27tr4YaKmAeYwkFJwtOuJI
PCcnKUXopmCP6KYl9oWQEydHRR42KN5QdlgHExPTWCDcRmvlrNfl3Nsv/nS4OAsD5qYssKeSrDyB
wB/bM7F+j8i7b6Rd+9xxdK0zIDL5DE8a66OhQB5ui9lJZbrIQE0fhDUuesihGh7NN0wWCoUSEms6
CjiMMsMknzEoTcGhHwWQIJxOlaPNin5hVC28bzsMnS1K61hmBphPRmFTkQHCouUH6vVgn1+wy79/
mhl3Z31VpI0/7wJZuOrdJltx10SLBP1Dugtx9BOIz2NOFISBnfb2lOIr93Kfk0sPIewT96iuvm8j
RWborky+kAKgrupeSup0IYJnX/sjEqFHXuNcyW15Fdw0Rx/qxY83M+43T0LUNFT3cVbjjgrothCq
7uBa75nB+yMg86/S74rVTlkfxfSiQ4FWtQov5OzpFCihvCyr69JJ36NZvOo2zOO9JNYB/9jAvrZf
WtAt2UU4Qn23Kc/ErQXE97iVcbzHNa5D2l6fVnTmQ05yBG4rxgRxRGo4vL+HNlagjXqH1j6rqFhP
KTYPpstCeJs+Ceu8wwxClh+XCOLbHHy04+g+EXxwG08KOTMHXW/NgVpwJm6dDd7zAHF84oA5Pxdb
4BpX+ZGyWKgTCPiYVduzVX6EnS5SzPWEDKInl7z2xaVsnThCkrfM6rRXLGtKPne8h2tykVNyQzLE
VyVaApWfgagw7JhGGmnGp8+qPaQwnV6W3RAWLeUbB0CMPqA1tRPjnn/q1MH04qpqQuNRwNrrl0r2
Bk7xuC6mqvLWpB2L2JaIA8m1Q6Tg7EzCAMNb9IEXkBRt+NwYx6k0Ccazq0rQu7EE2bUlAJgwRwIw
K8LWFlhyODl1TnThZdy95gZvXbBvwgvP9YSzZHsVk6w/Q+GNpXDe9C1hVPdls7wZB/RhEOHsjLE2
Thy11EtYGpYZFFRKwOsZVmgoGMA1QSlBFerZrg18uGEjXapdV0/hTSf1rGCJKMrRFg+6QuCyZDXj
IbZ8W74BL5SgD2ajmJGvV23zzsFACBhV1m8XmGKn3XHH+yE4Ott84UjMUFYQDjLRm3/ruIDX27LM
6zf/w+v/63fsAn6Gx0LoS79CjyrcUKEI+XLB4mwr+OC7A9le32fZWddG7Xnn2GkGXA9xKo4GeYqN
TEkSpcCrvF5j6HgMo+9482ljvFrXgoDvNcpGGBd5pLE3pZjRxht21FwWG/szPlufhQ/UXGwa2c9W
sfymaO1W8Jk//zAY3MNEmTtG4mp7Doz5oqouG2cgaZax02lMUcHVKc95XW03rPHBSzQm05t42ApG
UJoGgNgoMLQf0iWUmQ3pMi7GozTtJaaz0dFRJj5XzcjyEU4XHDK9gaFiuH88aJiBfDpBOk1hVxYj
zRqo+yqFbl7YEfsxSyLUvaiuoxdRI24isBC2m4eEBd1lFL+YPeJoJKvVOBmOJw5S1CDnzXa9Tutd
rF7wODWWTHsYpNkt4rn06qLKCmnqdqJ1My1JyPVA09w35pc1ChiOBdNw4EuYaL4wC0o9Ow/EXqRN
vIRWlKpCR8RVuz3dHRspSEcB2wpBoXaPWqVTkg0ucxu3hsXVyHkF8xuvKClGdDONbjSixlZBN3Wt
al0h0DZ8chImYJNAqjOkTEUF7vwLVboC8J7a9xu/AdhO9IPh8GpS6O/JVJeQDdRCvYcbdAAtMnLF
3CT8kEhWlXg4ncL8TREe+HfchRdvKsXD+48eJ4+XTXT/6NcS59GZLZwdjVy6FpiguDmRrseut3LD
YYzpyCyWh7HK6UtPyZwWFqWJAbhf4sNLfIBZ6jaknaZvaSlp87TOQElF4THW/gMYgPp5uax6T39X
4uXr22zVe/QGk5/OOGWTmatNBhM2rznuH/5SgfXhZ1Ig80y8eqvqnHHkzeSMqvBvAx2/XTkeyWL+
5/Z6wJhpeOy9hrOUS3aAQG2Ajbx0rXYsQRXWuLj9Am7UgueKw7l8GdpHR6o0KbtorsKxqJKWcEuv
cXLkU6yA3kj2xTmHeKPkUNIhEcCQQNalK7ZlnsBPfYaGVItCvX2fRYoqJtTTw4LS41AHTg+3VFPk
yFUnAzv2qTszY/Frou2PlT6py7suq/Vnec6BnsRR6150nZbsr65z9SlEU94pjKheRel1uutOhY90
M59udjr6qruwTDda3yVUBBcGzUzsr4XQyt1AMxhLk+k01BaOu9OUfIzDVRQO96zLvVwkvwGJwq/s
9kSDn+NvrLttYn5jcmrwcyKu4VGsqdVQh6vcSQXOQeI7k/I1TwDyvEIDEAgrdbuiG3MT9Hw4q5r8
CINYhZSbISVDphxE9M+XX37+5dCNtS59o5i48TtXrchH2uAoA9TfPv/ii2efDG/PaTbE4vS/AYuX
zx2ZnBEbXkq82RiehPk5MxUmGaso7qCmHleNU1dqMPeCj6gta8ZsrWnkZwFWjWYFC6IZN41dzihy
nr5FUVle2j08X9rhPzMhGNUAXaaaU2R1FGev8A61XK0lNjOJHArzu9GFdRMUBt6HV7Xi9O4saJmN
u0GjYsZj4C6V+05x/Lgj/lmCIjQBdHGdNjdI9xSfpRslxGo1AVq2zvGQJhOTh+mOtUH/Sz7Kzv7P
bdEqQeYgWppqYlLByLy5RnMD1uC5FapTer6hvoluw5kHDM6IYkvMAu6UW7DJT+BgsGb4j5Z5VGXj
JfQl2SUfaizxDpGGbjsZ56B7Ux2pQlfEZp1rSSbLod4bQqk5oB+G1DZzsrsaZqqplhYEU3YPXprN
XfZgu6a9vw7X+boSA8DQ881D1jszE6E/KEdfz/cufDteG7cpFlroVsS+eNfSX7XpdtcNLhQKgy1Z
vRSiVSomZ1L8qyxuf7ceyGGqJKsXr3/a3ZDGNQNQWYwasezq1eVHiejsB6Yxxiq0VJuQORJTtWNM
HH6oiJrpnc/TTD1em9y2STDVrEByQrBIz7HyhzjT7KSi01XIhXhwgNkYdS+qq6sab9e81vG+JBxS
djKyk8iOTk+m7586KoAPA545YSsn95tTVCePjqIv2GvRROV3zW0noyIbnU7wR7NrVLJKfHOFsgK8
xkAP2xaTmY9OuxfRrSto+Ze8X2kf2r4Vjuwo4IPjeIJBCVMfs7izcUJaqLb2vraqStwra3Xipp73
HOgi8nxLAzRK4TjiDjVQq0QSwUNi1R+0ayDtHBVLocQaDA5jD/kHrcAaStVg0PL7FaeDf1bSv2ED
sG5p+OG2pPNZSl6v2n1ih4bA45wm51AYgkNxRZkGXG3UIZQ6pFE1uzm0ybfNeMx22lT/7TAXqXSp
2x8M1PELRcfU5yu2oQcGuahw08Vdl0oqFzqSmw+pxiWlmki1h9RTArCg8XeqgrnjzW6dHnu1fdvE
RiPef2iXoWOSgArjyZ1OnGbZ2Eiy0bubMtKoD7gJHOkws3x4pcIEy9lefBLfTI6tbOwkmqhCp2Nv
k7clKbHELLxgz2Jks0taPqIKl3rmQiteixLu+ukGeZT2J+E9scsLVN8y/cHFTI78C4ePk34Ulh/l
VMITQns7VvTjSgCmS7p/Sfkl6rTxYw9YENIZSU2O5CAC1irkEPxag1Kbnu/dgm9BhIrfyyQ2E+kr
lIpdg8P6OEoWOqejylHdcS3Ca5GWHMpWdhQylUR3a0dzu6f5ZqfJ4E5h8Zp2t2LdyDO4t2f0yXjl
1ixd0JUWiw1qWNAK0GWH6o2yE8XDj9MNRjLIoOtM7d1yRZFEY926m0/zFRDYl4YNaNtgwCTY/U+t
5Ima9UlktjYa16DPZYzBn0TGrq4jG2mAjIhg3Umgi1ps9ha9WsIdxOmqoet6GWftoplHHVEbvJDU
GqBAubeR74inj2+9seXbSyeBgXcH6mFCj3mGZ5Rq2Bhx5r33QEupU1d5KTEtId68yNTpI1J0SYeA
mSsJaSM9/xjYzaRRjD5lIKlhsAQ6t1erOavWKQwT11HUYgZtvHBbLHR0AaeZBeZL1wjgqz40B0gn
0RFm6tJ3LLNiuYStBe2Hck3E1cpoNBimAhSxPDlP6IA94hAIeOpUlBc5HiBkEhzSapC9U5IDjOkO
DkiLPnoiPmm4M0EZjOMhoR4ontRqZbYvIBu7hXVaw2bZYKi7alFQpA669qpwYPRbFzJrO1Q/Hcio
tl466eo63TVaP5UtbKL56MRweK8fw3Pll9OL0rNTne0YyTJvuleKHWGY0Clysd2cwDeiBYYBZ3FZ
4V+1zkZoHRZays15OQi//szZCrTdBR1XQR+w/hYsIIKIn44jWid2zk4Jt6JC93pz1lwAZCiietqr
kSqwo1g9j925KTgBXYvGFRgQ5mf+dotHdhh1DM/n3buORjpSPwf7FRZe9Ldrz1pJNZwRHUiUpioz
Dj+fdLU1iz9MIluXtajG4rw8fyT997Hgnu1dZtIYTvpPxBRX7LK9XvLrm76T08NQbIm/FMdJeJCy
BMujziIrZl9tJtPfk7llDPJMQ7yrhuTMt5FZXbnKjhCofEDKyjLsIF8wBlRETfySWcUEL7di2YQ0
Mrdc3M2r0G8Gteq9hQhq4dAXsOx+Aam4Pw3HiZZT/ymkVDrQUFqJhtyRQW2oO25wON9K2ddt4cmM
VuVD0cDtXj/mXtVatJrpFdt1CetgU5r50iN0zRO6cptuXZmT2Ro/YuOZuvWnDysT64wEz5Hwuq9a
XlBwjh15ZwA6IIELXIfV7JHEAuJmb8IkR+7qYTq+CHXL7no3hqUNvPIDtmKfcfVvSQdvIV1rgRKG
ejYUCSPe2LcTrVd32ZmcicTNaZWXPNjZ/Wb/DtXZpXR0HBWOPLxZddaH7RO3J5pVc75nw4Gvh5oQ
tS3MaQPJUMUgE0AtDxQnVgAFmL1IWWNDQUmUFhK/yTMspWgjpLloUbtB6tHL2Lps3RmrP0z05r8M
7ZlLHCMoGnXjxdpKs0x/U0ct6hm1yZV15K1GBBwDo92lpiRFxtXBW6WWk8hc3imHswkULxq+Ug8g
uG1dpK2VclJcFjCaC11GB2UP3dgdX51wXFWJ56261s4jZsuTTxPWjcZ+zc0ukawqxAFRyow1nFbU
e68jbeCk2UB+iZxlWxbsCtZIFnhWXrnQ9GHvhGEIRHFANPCenI6VVcACyApxVW3oGFz7MHjUokCd
WZDS/RefGWk8oy6koGKOo4jDzhAOWDKQ6/jEPhE51NgZL4KhB+o6oMNW7F//AeUAtFpT3bt0WqDE
6jV+wO0QqBHfNYHuvej169fR5+VqF9HFGstIRXR+hjdrYHmc7dCAUU8CDRDFZzmGNSZdtbmotqsM
a4r/pRUqyY7TAmjpeHJ6wEuIBvhFx0auVAhvvWi7hvJhX6ADxhh21YU92XtIo2/iJbq4SyWajgKH
KJoOgv4uFiVrt5vYveXrr8l5H6X4a9gkWQgv669dROMtt36+YjYcm215a5Ip2XQxDQW989d1J6Mf
5W5i8cIdbih4z745dOL1CHZ01gOr3cDg2POJiSDoPMVubBj6a272OUm8bHi1PCQ4nMVFiksyJFeZ
QbTVNfxq4k7TQbJVpUXq7NQ5dGbUybdsvrNuUydTLfBQofHpIVmqe2dcj0W5vfUtNxI/2DhOUglH
qJGwogq6iFCrdnAKc8xmuk5Yc21jhX6vimrbAJdymndud4bmVy1Wa0bfdi6ZYS7sdDMrjE3nG1KB
k7CDREZB3Z3UGsIg3JXd65G+wBTdo7kgHTXlkFd6kHNC1cSuOA6B0CUbj2qmp0G3ENy6lSdptTpg
c0No2LfyrlucP47+TcULMUpeUZ7G2DHJKB8no8Ea9zrHA1oKsPezLj0z7VnKsVNxr2HIry9KL+q/
WidD8UH58zqRiqgwLpKDfN5sv5lD/O80ZD1OeOOu9n0PONGP+A/qoyky+pxVsmfG7ISDxMsuKL0R
9WJYazlOAJlfqUR+xdg/20Tm5Bfi7AktxoY842bUltoYY7NttxfbvnijnWHkvlY5ilucQBTL5va4
fc253PKBGcGI6n4CBA1qElA6vfh8zbmlbppAyTuVfUN+He5bBW2Fg+soHyoHCK253zdoROVcBQCQ
zHJ2KAC+m9roG8cMjtZy2e4X+7MmkLL8ewHsqGgu8Aghev8yugbZHBYdbi8rWAx0PwgjnTOLbAY6
ZGtVZ6xHkq+6dGWJ0OSKgmRGXst5/bAsFnIdaT7nIyOO+6aaHimwPyXbXx/UlJCB9weVpgyTyRSL
gq25CJKKIw8NgQiiyOeWbp/dFG3ccewL9EohT9d5hmdO6D9yXqdrukzXRLD6I6KRM8Bq85CvhGEk
yFtIeCjhFvASd1M5MnMPaXYADdL34B4F20KwmQXIiZ747yPQsbG+4CDxFS1JSt9SVxRrIMUD2m6X
0TUGs62L83OM8p5YiNY4uADtiPBNF1BMrIFnqufBAHs0jhRQFL8xfGi4jgk/Q9tAA2952tGljbP/
YIhnFfOWjRpib04i4PttPgUuNXJT0HRTz6hsP2l9Se4dGJmGDhmxUJYDmdS5Ol6ku/iOL9G2xAUB
NE4MDgNWcuw9WEewntZFs8acDYRW8c9rlOdylqM0kJcLIBUA+GVuw0PnAxuuRxczkQCzHHpZefzx
ALwrloBIJfS7bix4OYtL8ATQCuIJCHJYphheffmKj+5xahbbGo+GV7uj/ZP0mUwSs9QppnuXXqYU
ThCvpnAEBFgA+oCUIygvt7DY1eGpdfwYUKjZkqTCYGokaTXfVdbvgE5hVYJNgyL5Ox4gfA56ucbA
CV5XM53rmAFFqeLcWHewrEQ0XA1ITuJCYJQrOtpAmxtHfB6aRoa0fNtGxmRibuo77JihCY9HrJ4p
3jPG2FwWmBEJc0MSoQ/Exleh1UQvfSXQq1wNaUma51muQWXM4krk2JyqGwxMAxyywT0fm7yu6ksJ
X93QHW5dklMi8ShoRQ2Pk8fJ+0PYmljP5WcMIXo8RLWzAojqu6wQK4kaT8QID9Axc+RojEGoK7yK
swDAdiDD3JBm0nR9i90og8bNf99lfpr/4YJsR2TxXIvNHnd/Bc7YjqpoB4+SAtoXz53F0HXhSEWY
gnKSAlYdLHVjb9qCM4csbeFjjAVoSrpnDyeYUOLG8jDUZa0IiBpcBYNlwTFdUEzK6EP1xgxt7GNv
raM44EKw6fw+nQC82RZEqY24n3snHnrSpVN7GbrOWYi7N//u9V862Uuq9boq3/yPr//jMUfndULi
TijQgIkooE5LUPPQdq05bwKgbYFWAep2zqlFR/TQICWuivIS/2J2NPiDLkSjXu9sK2aukWRVBDcV
LI2yXBSd1B3M22Cx6htbecY5Lq3ERqv8LaplVRusaTnLEj5VRjAnDm8ysrPc3egco/V5N14KvPTr
eubDGiUqDKf2AH/3g4KXVWYzKmN1jnPyFuNX4wqC3ouFTv/4gUd3t4ZslGAV02Kdr9rqsMa4qFt9
WZJwc1gDqrDbhM7Kva8NpTcRdZtE3m5Lc2TZ21QnjneuKThRLbek31xe9wSPxPXfvSFL3CKnlfTC
z0kRNN9I4duCcB0Yq0j87fESCQVLBGrvCZeo4dc73MHRlfaCffL+9HTcd2xxaLilTsgltXlhr71n
I7zEMIYQxygLNj0ESeZ+zYYdvF59BSIX3Yng8D503M0kYGjpACschgYXn3P02E6v8WeM8I7h7Rxo
kHKERE+i42lf+MaYYjlBFcl5MI7+XqapH0tC+10i3B+6EVP2VdVKOoJ+6In6/nHdyiTHgAySgZJn
Lz5/9uIVeeTrF68+ef6l/eajr17+XTBoLH2JljkMhBg3cE3YpylkYY2Z1EKnbijxobZ9WfDZcpmj
loquo02Buyb0/9mzT55/9VmgroTA4iB6xRk52IpiEExo44YpC+zRvdhXNS+v+5EMxYD7YFjmQBCl
OxGCijcdCUtABzM/7NXdgUNU/UgAnQAUEqEDQ7R8SdaUjpWReIKU+wJWJjog9fpo+JlnYL6MY7Wd
UM+cdCihC92w5Kcbnu9Ki1CkQPRsZBS/E7cjLjTW2sU2pybIGse/MK7S+5c9MoB3+KN3fIyZi6RN
G6bjJOEEzcbEpTvU0+KRqjqiy0Vk4NDig7nlZDLG2pHPtXwgEbPHjnjYB6x8egtYpaYBVTVlQSqv
XEDlZQDOzbbO92EWv3O+Ro58ygorgXwIxHbzBmz7rQ27/d4dgP0lMAol1XrAU8pOledCwR3FwOxW
20zlHUQZ96CxQGtmCCTeGsjh0QUYXoSpQl96m0TvYawQvhCKv8LZcxFEk0DzC1qbknWKjUX8DhdW
7h5lsld/udPpLvBRkio3YU8i5/Zdfq3JfjYCHNHa9WG2ReE0m58VeBUhnAw4zST0imQgPSMzHfmF
kUUaS6hJ6a4Jtm2Q9xzsJfGoPhuN0UKzDJ4UoMddmsU+eK1Rf3K8HOqq4WEw5TJp9NagDjGaiOpu
pvs9BHgHemWgxoOyUT36efGLvdwZv+hDqL2huknTuyCquxFU0SRiYejwtJHMG7QxTujuDRAllrUs
kJguObNyuLBoQ2lqsaQ7SOW9Xvu+NXxVbaZ8XGFoo/qrUTBNsRRWUd+/Lkf7fLOXqlGhWr/RfbEu
GRJG6th390J7fMCfK1msqsYJZIBxb0MLMpTCXm6fbctNsbhcKbwapIwdbPpjOxuNb82PoUVbVjMy
DlgEb5s2S7jXBCGeRFbo3OBwA0Ol1NlMfhKz2h0yfmeHTGKG6ApCxTpEIq+Ndu9JcKxPGTH9+Ys/
Pv1DzLW6kvdwkZYcjFZSKqFZruLkyE5uZBDOoa/h7UE2aLeWIfoxNgxUrz959scpHzuCNlNkQPdV
0xxlmDcwx9UR8MNZVJtdp2WrZ8lOfutRrrWDpYp1uje1eBuTuQjKPxxMXnTosdUpp1IPhlPAftlZ
JmVUo3BrDIOUV4E3bTwzcnMf/AlYIJ6s601yYkvhylhPrbIlS2Tdaz7SWDW516BgXdkrp4OA/+Uq
nx1rFSulF+FSj1xFjF5G8Trdsf6HWzxB1TUxAI1xH6oTeNG9Ogzk4EDSoQ+mMRw3lrsnv90x/V21
pWMMPgPZcdZpPI5QVgUrudZE5iZFbSSEHDYR01wzniYC5ZiCwNFDtKnkvJxWlIO+AOf39bbLa/QT
V8biaBod/xAUhpTOE0vSQm0qcyJ1uOY8AL/N69IlTiXVWQSlhKyHeDJHIjG1kvNx3fmqOjsaSVsj
i8CYuOQDOrehG02AzN5TdilpFJVI0NsBZU6x33rFUHDEZLA0d9K2U+Gkyd+cehV0SVb937gV3uUa
fgWVmAPL6wrP9WLj4ak05ryqjxp0zUrRq6/Fy3r6XNOJd42J65XngWpIHyZTdvJo9N4Ii6GrF54k
Z+IeOLBIEKtZaC9cwLIqb9DJXTnohOBThW3A6NSOMQFwnOPYWoca9gron774jKrWsSIyCTYfNEzD
b+9YI8Sd+fZpwVRXYyDq4sooqYrzma3Jvgkxki5Gyb7FZzkOS/kJXXybaIvFOLjPGrvl8H49FfYn
UPPUKnsFnk+ppi3bJJSVtyixUCy1lZtgBj2PTCGkDfN4cnR8ilkn5HLTxo/SpKs9MGU6dya85mYH
Fn3s9GwPCN/JaDp5BvCUUzlLUFyK66J8/zGd7uoMeA0eV5JaLgeVbBX3jeLQmkrDiLf4F2hWkt7H
9rGIz7xDlRTWx/0pzbhldgg1FaanboARKWb3b5cevFXLOvyifbbS4LVOddQnGrtvZKBCRjRgTVy0
bwwGqrZclvP6LT3ckNGwvx9CpeGUhI8frKV9hlodwy9gZfDm0NWd2msb5SMl4buRjIEWQQbKJHeG
Bf+YnXpUdmznEpOWE8lUYMFJxJUph0VhB1h/4O/3JD1g3IWcb6zlWlvE6mH20pGSxY9QddpLbkCk
i23tS0Nk7ZopcRgPqGNCcOheLJYlEVvA5mzgUJ8uw5RZdd3sofVAu9jrYxsCZuT4xtdGVxkHoiXE
9BUTVFCTXTyU0gt9TugcJlbMBu9aHx+QEA3beORe48WbecQAELenIIGUPkwCfSBUjgl0vupiR+cz
ojEDkCohVGjJc+G7pGHszMxtJ9fW6mU5dWznMg65CHRqkHDuVLvtpN+WiG1LrnWuLcHcZ6SU9HGG
uqraI77Bpeww7Ny9yDE1gF7yoCi5y3S1bQy7Y501vDCVhXLmMhiiD3xz6t088Q4A6Z7DTLXiWmx0
y/IrUabVDvlzK7rgITdSNBFKnbEvyQh+/XRobgx9a95Wlu7KHCV8VOLZfbksy2BydKMdAalq1Mnv
AvNG3kAsptaSu8ZuKTxVgmvnZJ78kCYRXbX6rtgoFwIks7HArh67+SMpEWe/bQv7C032zSBU0DI3
pFnWe9Jkoa/Mr23BkPE2ogojvImrRX0tcB9kB6c36umBfZBlgbhYbw4BEVMziDdFfAS67aNJ9OB4
nOzf2pQRcr1RN2ZqFqR5OuRx/BZcj5pU0uQkssZGkRDkxN/Mu3erftX2jnlvrlyBOPrQHcFbsm0C
Htoy0BsYr0D/aPXd75XcfwAeJ7+KcxBW85l1yjqJzpYzsT7jbAX5KQVnbIh1crr0s3xVXTsnNE6S
ITHoaHMPassxavlap8TLJnINGyZCZbJSiqutnmqpiezh4jtKACHwcptajCPEFmKj/QMAASzm2Xjg
vV4cCCp7qKIiXFerBqPo4AS43mMS9KvBDXxBGncITBcCnhcCojT3UiRTFvWJcbaoUEahexAiijZQ
1HRD1N0S6PbNztZpmyg+2ykwJjSTVviZtDaOzh5uzpY4O2QcpAlYpOgSn9KOkrUXvLc2eVqj6A2q
L55/cL9e1ifqGd3ZpVISfcLvpuIV4MqIyDicjukNUBru1cTqMPU5HoEY8/IKZn4V5v3a3/SPuEJA
zYappoWh1gSuA14B4+Q8L0MWcQ5RemNxBCyuL+hSSBNpwr3vU9X9uXEaCng+mtMhQTf/KJNnwzlu
Gh2gfb+4qitYnKEBZq6lrr08WxmCefet8yW6RMhGgq1Q+Au156QNs7hejU/xvtnMYVhM2TIZvdE/
9kySg2HL6kJVoKfxtMONZpZNCZ7HPS1Qf90WRFNZOM3A89jV2+1IQtSSNbVyMLjobVgCzSLCpq5r
XiB0tqkG/7revDxa+Nd9LVxmJth0PzoLegaI9oLqbNqGkyLQHzIf8U0DWFl2hFwnpnAZdjnu7JZq
XbMHMt0pRs2is8dbwwjtjwObo2vlcmGlsTRhKmRA8cmGjafIHASMPs+ojaPPEA5i2TuUezo8xpvx
+LQjTndw3M2ozkdllNWp8EMaqDIb7XBGyKUaPa5XElF5EK6uhi9DHofvwcsmrkan3sWb/gjOnZuO
/ySDVvcF1Uqd7nGG904wlBxIm/9MG7Zt4ZfWc5i0TUW7HRspyt6+LED7InInSwQwyCNPOdTp0i0/
7T0cQeUkbslkfoEyLTcvLJgvdTZX5W9DZh5lzSya9KyJu2jpDhAPFh5EagT80xmsOQ/Hk285OdI+
3uwBpToaDN78tXtxAgDd1qs3/9Pr737DFyfkGgcfr+HxBhKPus5iK0KIKwxXyt4SN9h+uhqgfohL
dgQNj5LoFZ7Lyj2ZKC3WtMfhHSNxE7kqo+Pkfb4iXZyjlIe+pRw0lSLXqmvd0XUO4glVAjFoVcCm
KUA1CV328G57TChUJe5r1r0PyR2prvewGiJ3LE1iSbZGcBFWd8PfAPjrBe3NV2WoXEK5BLZtsVI1
PtoWq2xRNe1T8n79GL9PoqfngGf6PRh88uyjr34/E1FNLhFflR8zUr8gQ63qLIEP+rjDeGYqCG2/
TJJtqyXJGJK9GqYh3lTkgbsj55cW5G4z0zpDD4kOdCtyqAza81VT51cqUUJoTPE6vRGWNzt+/Oux
qoY7iq5ohu0Uf/QI1Fd40eR4g7OZ/fJR8sjRD0GNBrawWDUTmUBoUxSvdNte2Ff2FKPBIHWcQMWq
3ieZcKNYwN/8rhSbgJ+uNrLV93oS/N1Vq2YOkzDA6cmkPe8szfAcs4ld1dtuge5xbeLRw1En+0ht
Xy/yBJgrEmCu3NcCtgOxJVUefs2e2u9yVpXll3lMfL8eY2JfG9rbk1iZypNIGvDsFQoCm0hwaQKr
gLfKiWyd9Ry60PVY9OcgZiTsC0iJ7HZRta0jvBYOIIhcicp/VfqnkhoNwZsTjiMowoaBnGMLpj14
kDRwJ6MjvHSm+jmFjYB0Irf6YZ25eKLPPx2WHFsgAY417jc4c9C+sdTqgQ3vN0NNGHNQ59ONJKbj
q/4SSgrLm9oYZbRFy6qLAG1w9VulDB4ZUpJ1nHsPpPcb5cmIgSFXmPSPle9rukY7ai3PHMqvax+d
YtVZNIyGndMDIAli5V6QMAzJEA8Rm1hV5/NBL047hqIO8ob8YZ1heVyksV9UnT5tW3dOnTpqZg+a
0miEd5Vhshzr+5puvGpeBUhjZMV4tANfOyuBOEtvsnNs7wFINhFKNrp8grFm4FPVicLrULSMDQna
CaQnr0PD7ShCjGLZ+BOnxa4PnFcsCdgN805Yn5PjU1/8i3N0p2OBdPQpnlI8Y2ehMRq4j45DWdSc
TvzqmGM4zXZypKSaGfdd2jLufM9eP3/5yjvsN6GHbiOuDTppvgVpTcyB0abY5JJflz2JUREt//9L
czRkn+L4ZQgH5lA4WNHiRXsO5xQjtDc7uxWSpTAVPEpOu7CaJBu8QQ8ZFHoHr858vAhjttyW6A5j
rQGx3GWfXFiY6XUyZ1IoqSy6ViFF+xFE8Kqfv9PiS+XoxR7xw/qr4STo7/NHPBMRhx+qd78Wh4EN
m2HpKiJ+sZ18lPdM54gUjy+QntNW4CdDcr7etDuGX28Vby8amNU1Un2pnSw+MNdKP/lMovH4dpFr
HyxHmATu7SACFEz2wDb+KW+3bAK3WzY/3e2WlQkDW7NcTy782uHBnn70E1rhSdIvDvH85hEPyW9D
eBm5Zw49A6i0CkL3L/rnMHzP5rD5v8ulnXu4KIvlDjm5hDeKYlbz1pgbAsOQsOM43REwXvaXnkNU
7yRTyLQ8+s/h2fYdpZKBfcEZnTPXzflIJRvRk4s6cJO3+oAcs7WocDhBJwQqM+dQUAwmXcyk1jHv
AQ4Hg9VudiCbn1kJUKzZ45EoD0J3lkKlLBF+RB1g5Imj9WhigeOHhPS2Bqsx7dvQ2ehMIdvDYLNz
bkFwrKwRvMegNKGR+nwem/DuSPA8EWMtdJQobDjxOL3yMFR9j2BU2sewd6Pfe6MCM2CyAsfHccV6
zTkZhz4OPSmaGSAO5mhN7M/8M8IQMc35LZmwOoxv7DFDuSoxvmUy5UKJNZGWp64V2cKesCF/yXpo
0+enZOEMXG7pn7ioZ+YkS+SPmDfhhz9m1uiyjJq1oyMQOxe5O3v7Z47iSv1U08dZcbqrsLewM7/W
tST0NjiW9ajurh2yILmsuvVhOWftopiyIZnnts5zdrFboPCDsz84YNna8s4/8lTXa2d5qpUZmL27
zpPlv0cxUGWRVQF3eC7Bt0hIZbHi/Rifd1paEq2crt2gEocGgqrmQFfseYDLcGKkTT/kOJ/jUSfK
fhl2bK2UbdCuEwv8/SaZOED34y6n6ykvzU/Gp3ejD8/kpWLOoI3vlKMFNQcQg8wDW55iUPx4p6WN
dtLdABlY32X7IOlEO26neGRL8B2JTGLd8bJ9Q0DH8NxZcDmOE7ytsqu2+tZT2hVa8O4+RQAcuRSl
bq+w/Z8izS5CbtdsRu67XfFPwLm1K26vNIRilStxsRz0aNz1PJH9kb0JlfeqF5JWORYqV9vbPDkD
krI3ZArAYoQMDIBGl0tAq+DbVShKvzt0FIMblN65yjR0VHhjnYOHj2IZCsQO3Ub0or9gADg81jXz
8pLfhmT+G4tViLA9cy+vWvWsiAuqtKcBTaJjHeCAGgUlmWfRYUAJSLT4aV82eKmqLjWoDikUBeGl
Y+aZ0aGBOnMLyWs9iutQFrLYZV1Wxt2Ow2KeW1Yg5rsLgqtxT80br1ifqbqX1zlWy9797OaWO9AK
Zn0vN+wCL+qe3I3vaH2WcayuNrhSrVhbtpmrsS3c6qRmJJVGfnAufYu3OZninad7ZDPfYBzRgnZJ
ucw/cPvXPmMHdo3lrVM1DiUwYz+vwBV99f3kJuEzOSveIn07OZ6edm4FGYvmF9DlH6DLTwqd+oGq
2S78aEvecx+6jDATH2Jju8A0t/pQ/QjGc1Wgb7IVl9UV0zEivYlU0HV6xynAyvMmf0OHiuQcPxen
obn6bNU40/4YHTcKE/SN4o5j7H6r+WA+c+SOZ2Wv27eTLDrMjkV8NkTRgb0ftQDVlu55VwzNEcFt
NlHlSlXJBSEbs3S1Ck/F89rfW3qCNIRpckVBJ4+uRsEwdQccSxx2NCE4t48Xyqo8kqvuLQcgJ5Mn
7+TqoCHsGtQ/FRPuYhwIoeZ2P3z2+BH895vp8Ofuic9RcLNdVtsy+9lHhoHB9EXY1PhX7Ebjd2c/
Z79flRR/D4RBNOn+fL2tqmskeulVkheTJLTCT9lden768cfPXu7v2a9Clv5A2dtYeYDfnZx2nOhW
DUWMUew9GKCQy/SFg6QclsiyjU9PzDXGfd6QnKGeGOK7FC6VLgmy39QHyASyLeZ6wDiq6NrXb4Wy
x6fzo5vWmTOP+3GifJ0DvNgu9pOdPR1mSbBPLIxbEioJVU08OO7xFpFDq7vlr8dNjhn7wccozuFZ
P1hvB4wFjonSI169/vUUunbi+Ubhhkc39szGJkGJ0LlKeYfJfQ3QXs3RHLqsLrflopPySNdqvOxr
nqJ5qDO5CEs9PuXeohWU94socnJiV6NDlIBL6EStV7uwrdN4sooj+PDtZFxcFCITL8RTtOhAAE7b
c9l2fL79GF/UPPccn6+hyLVI1i1ImCYOFlsr3RfCHBsNADvtOLGxsGuEXH7HpUHB8JOM6WsbVE4u
FHS8VvHbwI4xdW5uns7pWr6+XnUFqlGGRk9Q6s+q2y6lqpuof6jOn6EKZCXGWVI0DRUwaKB7UjEr
6IHjDIm3V+xeLaIoIeTCj+uJW0CdSOrTDcS+un/z7OknUIXjruMwsBYaKCbGhhOAmZxhMUWJXMCn
MJsY9aQ8x1UWyOJpXyaO7iG/xSQFHNS/Fhzk5IphTYrCxCxysEKu6UjTQwR/yD748t2pjViYRRY+
emrCVycSitXzTBUVfztus7NG8QvsHuh/NdzLjHUxOruesnIf6w41ZRnalImRemqaaBBHVzQEq8ub
9YrcWWZR78E5EHV0dAQF8ezcHJ8fyO1jGcLEhmsSuYfnblJ06CrJ4K+YQNZFWcCjZWPJEV55nZBb
WyzjsFeoJGi2RCESg6rzXIhTtoZYEtRCswkti48vYFcC2oN/X1RZ6N6DaoQSHGNYFpxh/fLZH559
BmLn/MXnnzwL6ksAmJJhjEKtVk2s2hmHTAhQdTC49+j48fu/+OCXv/r1bw749ctfDTDT6OPHH/yS
W7vYXKqGj3/5AdD4VfT4F9Hxr6YffKBvPiSb3UDuAmyqtuXjmd9vAeOT6OUfX6Dbe/KIUiVkBXpm
o6qVropzdLqfsAGykaPpLH/33XcJhOP3jx9H31YXZbmzEHL8y8e/ij5Ld9GjD6LjX0zff0ypXuZZ
vqgodFBDsIg7uSt+qkRUFL/00W9HrJ1Izql1kWUrdV9SsolJvDW5kykhfppqi4dqhcpOtaowIt+E
0p7TCsDcRyAbs8l61ZjbDTUGi+CsH2auRn8fvRf/9osPgfCffJ09GEcP8AnXU1U/SR78Fl88+i2X
aYrvcio0/m3kWsRH9B1dDp58ff0gevB19v3jH6IHJ19n01PVJnLRJ8l74/9tNO67ukLS+dRyGaWd
gy6ytrtNgUcvpBbQwuPl3kQXbbuZPnyYJImB6d6c5uoY5or++3a7Vp8eRf/HdgWTGx1/MH38a5h8
4PkXJsEx3eRQ4o3GXsIXPFztAYab4tKmOyDndbXdYB6+TtJIsd5i6ROWTLpHNFRIxRgaPRyFsqGq
yzCmPBrougVZCuLMGbfdtlNlOb2Gxh59g+VCpj1KCU+5BR9xuKEvRu4I2VqbzdnzHb1ueax4oHHq
YQO36zlTl8YJP478hPRAbLoIPoxORdJT7fNL0mYeeRnE8D4KG9KAheMDp5BdFw1GR5vv8rSWRpBm
O1BKdaut9yK8LAH/2Y76+ZuDwmer/IyIPL4ur95gJsT5W/wHDOaeynGGmgHpdW/ZlEQh6ccTvjYa
A6VYL9PV7juOm0PYIUbGCepU+Dlap8C8hrJKYTMfyNkZadTVtt1sWx2JrbmQ2/fYZYQCFDPkkfTO
JJeuz4rzauvlOVLXh1JQN7KUHXvbOYpvMwIvOac5VHeHWzqvkm/QtFgpVMKhklRpqoc7PtXkKrIG
JtHo/tlIm/Yy2AtuLZ9B+cdcXkJmOEWA09G4gZIvgL1PQVrYtnk33RKwi+F0SFaR1L962PHCNDRN
bfswYgsI2t+NnNMj7J/llS2dSz2y8zmZ1qdOhYkpH+7kb6b3P4N+3p9+cNqBCmcKITAi01yLQzEW
mvCsTBDVE6c/DL9B/+donbr+E27cu/OO3R7NJErSW/elXLmE6IhlnK9j1d7YDutPklL/fU2kBP+2
pnXDiMK/WKIeSnE0EQmfXsWjr159evRr/45SupAkbNzAed7qaBzxiD+Oxr1NaEdvaQXY/tPQroRu
Wrjw5w60bmeqzBEFG+zv0+7XabcrlrplzMazt3vcj9C95M2/f/3v1LVMMeyrKXnzH17H79DdTLoi
eUTxF/Cq3pHKDglKYEbnYMh93vzPr/8Kis8llyJPPUjEb/6X17/7C24G9ZF1ikFA8iNis2wcplBs
mJSjUoKyfXoUcUzKVVqUA4x+Thqoe/USM2sR6+a0iWkm5r2YOlHcmqQStRHWiBh6Ew85ThvIUxwk
AwZEzt1qACr7opgM5TCd6iamq9HRka5BxyfyeMTPTDuzIcalyofG4LbO2/QqrWdDnNuhDiXCtgfD
V2B74yKkCSkc4i0GQqFgDbpKADwbF4uqXBbneIDNvwQXZnBo6cIPCY/DzJuI1JhBnCLAgNLDpglV
E1PzrTAFLC7CJopvMh27lxRrVY5zMLZWdm7sD1BEtYsStj7bWQyqkIvYjL35dTtQ4XqE5/so5+OG
erYl9cVJMYwDmduD+5J+g4amu5X2x341nmXQTdLzHM+jz/EYSkFrNeriFwSO2zGsnFv0yOc2qTjZ
qfWHqacP49wK6DpsuDn4W3UHv39821KP0B8Z6LB5XSyI28RWnnrOPj+Lgnnhz4XnndBL2MISk5lw
2chH/Y3f6I9eri06tKUoO6VUObYTqpf5tW4Ri9mtKduoKTKT7juWAO64y07P+Tat3OabjronJAGQ
90dscZtM9obbd8s+9LrXXTvhQ0QbImQ4x98qZdijU8TlKD4ZhTpDuct04wDg+kMYtCv82tv/SBzd
zuXGEO/5ZgE6uXwCe7+/QD0pgD9rfuWpUpppqF/EuYA/pHgm4PILDQC5t8+hxty+y4Trmg9HVpQo
GDdohKo8x8vIdjCIXSK5pBO6YDmPh2RnIzufXVe1OJ5wIHQbZNeVTB/0SW8sj0sChanvhen3Ln1j
8WBPtrV7LpnWtdV7Q6GKbhmyGonF1FTNEZJfkTkMTQmhqlbwJpLVJrflL2gahD9Xt8ySFVWSGXW9
LekvtMC9OAN375YLGBSK690ZZj9YrZRZmL7g9Y48uyVGTWNCsBIDvqiqy0RBQ+3M8TfGW9o2gsaZ
gGMx7yznA2TbfQITi9AHEEVvvDWtUKADRRPEBjHO6blu5XV/K6ORW2cPDqxajnHc1FpSFuQfAbF8
ldzbd2kIlM5xh2voZaDI+DY6WlSrFbCxW2mIg2z2Ymo/RqyZH3467IaYPRBh/TuSHNW4yOwF4eVe
EIZ4uJFNhfl4wHS1pLdBOgVjAQmdDiCVa8bNwmNO+Lio08bmTlKKXIrla4dDdRkaFwQlQKTyDkfr
52bS2HBxncmRj+v6Pt7P1piljd4V/3M1yPHgzX98/deWesUTiAI7xnl587++/k8j0rG+gqcCM4PQ
dqJLATLPtucY+MTKT7yhfcPWorRqTvYrIXVyopYyL8U9bdCX1FoVwJ24wLinH/Ee9VRBQmXZH1dt
X/K3SdxCg61bbN5WqP8NBveiVxd5RKFBOIt4bjKaq7hAFP1GZUmXLAriSICBVTViBvc40g8ICxKz
hoiecito5NU5TVPNgcNbDELEx7isGF6nzeAeZSnKMzHLRsogsqIukfU3HNoIilBU27SxMxNA/U9w
hp4rWEBEHzgjlMi2LKKzcjzPb0AtLgns2PptrJMUBYkL85VhU2ggednh2wokJAoZuT7LKa6suLWy
6zw7F2doALpGt3aJyptKhDn0GcytaJPT6Ovy+wn88wOh4uvyz0lE88Xn1u11xQ756O2RCc9ZVNgu
dIinVRaMDZ/kKKsuz4qeabug1hskGQuoowlFS51/jNmZQPDiJ2MKGY8FLrJBUsBh0wrt/PjO6gPg
yzOKXwTSSWnywhCm0FPsPKF6ktYQBLd0xzE8l0vPSGu3OotoS0k3TT5foj7hzOTA9iKbkzDYO+sD
5/B0LjTCsqM4+FrS+pZd0Vhk13a6Ace06AdIk9bHUgYHk5Z80wJo2Qr2+bLiVUJOyg3FvsB7CUPZ
e6gk0Mo1kRM9AdhJkgDpMJrO8DjPQx2XVqZYDrqNupClLIgfgSppQS9unqpn1ScweCrscHip3nXL
dK9osFHMjh9PoWNgS+OIg9s1ZpV3cHhCEE5Pu4fTC3JC+H4YCCpP3TyYdeLZ61o/9NY68mtZEgqV
CHsNHnYVxWXZ8XBbnqUr9BzJ8DRxkQM/kMTLFgbsW1DsuMGT9iAqrHjqMn2lE3HDxiNWPYrknBCI
2YuN4iwyZwKm1B1e6epMy4PjD6bQLp4+PtjnJ+HB8eDYz4zN8CPuf2UvO4cD82rbv6r1gvvSdzDC
g/GrItvKQXCjTs87TJ52AYqy25O2kCDAnROw+D0aL2EJjojbwq8/S0qcpxhymM7e9fagchjKFsH8
GSTJtFawkD6bNjpg8Aq3UQAYp8vK+OSu8Tq9niueZ+MCj4SAbY2AdfvOtPpOhK578kjuQJBajfCY
T445CZ1CCPKVmElORoSDH/CfP+M/T0an065brzKRrPZZc7g/oiakURz1g2hlEwSVUKQQ4Nl6/j+l
b4RVf+a70ww4W/CZpBoMT+efbawjAeAJF/rjKFRBF+QwBjg/3+JFWx0ahZPORSMmbPQwgHaxUfzJ
rcuSoRctJZRt00tsbYFbPpOsjg2YorhPLV+nO0lP1aFxgQhoi3Hu0ona6xjLU1GQYR0vLpEWHlnP
C7pyol45ppYwSWDmTSs7D6AxEERVWsYJDpzLQIdAroiZKBodopVJDcawV4P60jSnDePjQCmBqLtd
qK+qmUe+V7Xt3wSARA9GGPQIfwE478WcegjaGB8d44eGgsGV+YnD/ThOiI+/H3z88YBc26MDZPcT
A3hCJXCENEQFwL5QdixxUEm1xtXK7vYdxh2TLhKbRkKQTv4MKEMI7HJHXltdRCN6pQcfpQp409xM
weY5l4ES8QVfv6KbkhInTme5f9+od9bnmfWgtLoX6do+2XZKww/hVgwYBWgUJUUfcFQbNMst0ccR
I6p2NjG6ZpyuMEOBI8ZzzOAK03RvcnYhe0i/yqzREmCRkYb960eI1w/gH0RNtUE0P8YTK3hHwSxt
bjOJjlFsx/yZNd64gfZ53wAg52LHAFWzqJImRSPxpo4ZfgwIWnyXz/BAlXp++FiWHI2spy59M5Wp
4hGFzlH9SSPNdr1O6x2ve3bSvI8R1ciALEUZnaY7MY8UTWPp2jpCd2w5uN9MoJkCvQ7aLagqE62d
j8d7fUDViZ3blCEDqY7HR5RE3PTeW0MqoKvPngr4eaxHt69k3OCF52VdfZeXDca/YKQon1l9qok3
H+SAVr4JX3XFQ30DxHcxqTYkXc5m3axyPHyaJ44WLi/+P/beZM2NI0sX1LaxqP5608t7XYiKcjjp
AAcpJ5QgFZOiMvkVRekjqZLqBuOCHoBHhGdgojsQgzJVi36JfoRe9LLfpp+mz2SzuQeCUmZV3b75
VYkBwGY7duzYGf5jk3y7QDrFd+FUtyAbrH1ts1DONxvu9Km/3LNfOavosOW0t+cItl7n2w/q9w6T
Jj9Cu0/64u6d6nbae6UeZccZ6e0X33Votlrdtu3ChnWmkHEv3tqR0xtwkIHygNCqK1ZRjSOZi+fl
tqgWwHJJ/zxKPC9W+AyXyQmZzwqdVxvdhouEXBRuNDrvKEu9EKId5sNGRdpIz+E5BcRk4tokz09r
OoG7oEmjIl8cCbP03m1Kdu4+T07MhrmIvPeSjk7AxuBls70qMTICudy65nT0PKTvVgt0gBkOrVgK
0jFutRCOSnW4XxhnEglFRY0rY7qSwjmugpRieF+RZ+92nVyU5caMBWX0ZbEQOVlyy5LvBNWkoeF3
N/pRhpfRguKqYIxbyRYFM3FlaFL0Yg+E3sNK3hV+jjBJ8cd3g6h4sW1tLg7F9ueAAnS/6Vs1A+EG
ZKxsJO+UQY0POXp2plmkC9nJjj6ohOmEz+I+vRDOHW+hxfZRn4OSxJ8QRA6dEAqS506JCrgvPAg/
lvUahY8zy1ZJ2h9+ba7OysES0Xnl3s9yklR4cJF8Y1jmqKJ0qVQG/r4NIATXKfk8+fSx1xjpHR4G
97qRD0nGopC7622H1uQIeMprNA4xarBFoIqw0+5gktSic0oCcXqa40FJhpekST1fX5G0U7lyuNAM
r8jY/U3ttazR+DgAPSTWPJlYqx0mprD2SNfJ4pG1OIihvS/D2MaEm9OxQf4m7bEx/ubc79odDVCR
7hHr84F75O/TeBgp4G6WLuJPRF5RBDAfBEjH/COIRdGuOR4S+way0nCcmhJF2K6l21dsci6aKP/8
EBaxD3uIUMeTLT2CrpE6cMVQGYsNHNZp9wsA3h9VrrrMdYfZcYsq3Dl6n0dPXmR4L5B8dGL0JeVI
25bLJhdjEZyBuqCvxvQs6rUPN/J4OzKjOFZQYAbDlEb62V4j9aWsV0TUdxi3F3IXeywe6UHpsYa0
iIzivi3psAV3tGE1mZBQ6yoduB+9ykJ0UXrffiCp00gtJgFPdGrC/OgwCHrLn24VRena0a2x1CfP
9HprJTj1BgMfp5nrYaVQ/SONt3TgbRd5RGbOGGnwHzBInvSto/Sav/Mw92Jqrc+j23dZUrOit5PQ
4Ihs6g07DwzMI1GUHhIEhj0OLoTNXBxzqPgFCVzcouJ6F2Ti4XNycazXvlEB71EpLsJvvllWrN12
Lks5uuGN13qQSBeHnpEWU8HPnZ0/lTlhb7Dfx21lvWNJ/ThXlT48suIXXev2cWTd6CboGuqXFBhK
ysNgtLovt5VYSx5F/vlirIb1E2qPUwIW6TazJZE21Hx+UsuPB0txGaOgGCaG7nraeqiK3nZa264m
56TaVSUriLd7bpbMdoI3A8vczc6sKSpeaeYlk6R67hz3YkmtF9kvMktNd/40hRdE5tnOqSy9CYZr
5GJfivEolngm/Ngliz/W4A7OMYEp/3Q0poJMQ6gKUd/T1/fxjFO9sWJyNUKVTEXlSQ3dp3r2ebTu
ZruCN1wey6q8klpHrPytGrUX4iQ0JrnbOwI8faVa/vRxFpq03KMZsVJYD4fUf/bgEKrVrryl/jD5
0Jr3w5qyFJ5FRJlCHrvWpSAxnlvZxCWbRNqk0Hj/X374O8uDjhXE7//rD//n/8HRSSoTDUdrbsQB
qVpVQ3Iox5+wDlVGcHEOgoIlZHw7UZ4U9Rmn61HJ2s4X5bX6sEWXiJNidqG/uNmUjfpwVdQYYtPY
HnkHyZffvEy3OnMcjZ09yFTCoGrLaXu1k97sZoZhVfV6d7IwrVNuurWkm5NFUDXIP21TziQgn045
591bwzmXnHzsaGVXhzUkbFVq41vSNH7N8SVoi5IuaPN3G40D8FTWkF0Hv4IDhLEzWk+YdeVyPCe3
T4KRMTSgq446KwQZIOMpy6Q4OQ7RX8KBloVg+AosDCtWGxvZyIKBKa9xV8irtThFz8FNWSOzZF0g
EJWOoaM1qncr0eyNoQsC8h1rG7hDl1aCCFNexmKqqIg1Uto2Cot3t10PVdQPcJj5rlbaSB8vDPa6
WFQ/Ms939IWOfUSHZEw3wJpAkOQvBgxaLKNyVNrRzbeuqU7QwO2VtrS9kZC978kvc4B1mu3cwV+z
GaI+dyPtOSk0M7hXjkKS4t5GWJ34icp+l6Gv95xhfXt+wf6zV6++eTWGVVHY2ugSCrLm2xU7QI0k
BiRsQqjm017I4VRsbRhDMFti8GU5Jcrk3ycSgqaCb6QIQbeRrL+tTtA/+IbCgsggypHeHNmFoQjV
jNHzBvidbug7BF1nFy//pBIQNTn1sqUZ6XlHGO30lYb7x3hNpBOkLn479CTYcSqUgu4+PO/+sqgv
eFQmOHO3WuFBIlt2spmfJDtoibqbFRtCIW2UMkz81ftbRv8FmfiivNkQmgUQKfJZzNCxLU8qhggR
Ql4R3I62nZwBE5zBtm91e3/CLhFPx0T3zdczHEM/06HzPQGCLXH7ZWoqeEeM+Ugcai04R6V8UJfY
WalWZSoRewQGz+F54U9jJfypdgL60R1wkL0Edza7E1ZmEWLAgjgFrTong2c85iI5hemeu0DvTjQh
bNy3tPHOHSAvFTfuULOMp8wqnF8zSWKLPr7kblvOv9DCDd1PKNy4RGOFYzkd8a0kpQZY2REKnMJm
yzwm1s7rYa0JLT6IARAMeeRI8Oel9kwRa6Y2agiPtGMDsqCVI4wBIExyqxHPMs8NDcTCjzdAFkai
BOVD2LswcxXhZ6p0gxhNoKDgsRPt10ntZQGWP8k9ciYMqrpPNjFijtrhg91WZCFXXbWKFwkJQ8e9
8tchMpxTOsZwGbequ5uJ8ymnVZmYdWi38LrdMzD4tDnfbefrq5WNj0058xSwQuT0OZ/apSol+jWT
I/Wn7Znc7DbYVtg+A4FlRuYyDem/Mj8brKy8ixyINne48leXVQ3SBsHsf/uvb569fjOlTJa+3xjc
NXK++Mb3Ms2D0MWOSjoCqMboH/UDGgB321MfaiGKj8w9scAx323wFcCtqcYm6o8oUHJkc02QUNOE
kVEkpYzq9Xo7glc9h5xghxwkFAtFU5HndgBt5stnCFIAe3SJJ3vKkgper2Xd9F1dcR9WgfO4J3T/
FkQcfk5nHY3R7GbnjNCMbwY3RKwvx1KQCmoxSyN6myQZwWcLdfagaCiz9g2FxDCKZznqZz9nFoQI
8zeaBPaFc1jQXRmfAsWU8OtOJ7ufqude8IjG2eKPA3xATPrPH/VzlfdmIpVQTa8wVRDNwohi/Jl6
jD8cJEJJXrW0ns0oMVm2IzyCxDl1F/K7Bd7HsYTXo2mxWq9ulusd3gHfEPn/gVE2ZrtmC09HhaKR
Cw7HxGUG3AiBazQuBKBkxNTdm0Sq8MErxfLnhAfu/Ybgg+yF9uefwp9ILhbxVB833Q1rswnjyhaB
HfQyUzqw8AvExrxsti0wLlbtgXRhxqERS0xyAwQMYRmV0EX68tyM4e1irhPMtCT5JOBgcG4u7iah
XRoZ4KEx/jpmaAFJaCglGfnEFLRGMKYYTntMRIjDIeHwMcyVVZPGavrg6AJqPmdvGqy8ruesi/Za
0c28IShAvLuhBoO82N5Kq+TdOw3T8u6dpIwwfu2NDZjQVGergt4SUHW8uRkjoxi/E8gY3Ywu/5mr
TRp96xX8/F1ystsan5xzzFbN1h87UnyDlpLLUkYv1iFBBYOFefdOepE1ePcuDm+My6WbsE9SQIm8
hwpCeOXYLjx5iIoadAgB0bGPdkCILWe7isVJndUbN1LKHncWGbgZNpPPLS4ydnMjEA/h+TWo7j/K
eSaB9Mez1UfO4PtIPib4RPmYULQIM4KyfANHK6JLtM8WtjJW54lYTo5Jv4VEavIzopMk3NI+M9jx
WGxfKppROZEh2RC1T2XUGRC8vusk1NRaqndKW4nJ2+qivrF//EzAJOGFi0Ce+OwereuzB48fSOEH
qvbofLtcfP5uatcuZij9NNasn5AizFkZrF2ZaZIrW3FZVAtCNZJQXtEpeLAfcsQvqwJm7EIYvXzy
9TOYN4dRvHsnH8lDb0fBriBdmeQhNySNkWIOCiNvhsJ6ZXM7qlRXsjjKoD8c4lYRdlOznfSxO/gw
Go2ytnPq3ZQWipRHX9YdRG8OJkIvE5ijkZ3Cj6jpWZRbrZkFgcX+2h0GdMiHVcNpwkXB37jZkewm
Bm7VePZq/TMD/WL9ZkAv2msrYw1+e2znlzbde1lSbp0nQVOrL827wJ7h1zfMuL61enB5m0mJIh/v
C6S52a/jOMPtZLVC4SG3QraJrxUqZV+cqmkX0cd6ZStWbNYZD76S83j4A2wxgmCE9+pGoavdMkKy
sXA/eLqIXQ3ivjiFKUYE3FZMBu8MeHBvRYSfWZC3JwjmsKguWHktW6ukCkw7kQxYYUa+bjZYUnxR
Bl89f/Fs+s2r6ZfPX6EAhY/y9F6ajTTVTFqISKVWV816JxN9ZJyLgrUsgZRIheWwK0Ljo+1mnjN5
i0hAZ9R7U1WDpo7IXOtDAzXy9F3LbWy1FF54UtZuPk+sdfKZ0BTB4Fd8mqP8aD+OYmu4An7Quuyj
YAjUuYbMlDsbBHlHSEahKSdjm7xibJi91lt8ZZ5J4RUeisdY+LKoK7y7TDnsdExdq3KqjL7yQbwr
tueoVHv3Lse7BaYEdxCsyLt3eE3yL47ETaMf66yOQiSo6/PHrJJAohgKV+D7HQjTCNzviM9cnwI0
zRwaNT7YBKiDiV7xti0E1sJkwTJysgm6wR3g/A++iPzULvN5yw0penJaNiAKAXrvq3VCfEZcJfxX
rVCQqFUeekcERIe2BUMFmgCylhegi3+m3tpPhI20GkI4iLWguDbUEK90Fa09Jys/g0TzG0QylMN2
i/cCrFMjKbC8dPBdb3TKNxvwG0ZtpsTh8N/g5ofC02ouMELBe3M6ha+lg8hLV9ePZqHpC/kdNgoZ
aODWyvXg9s1pgzPwt0IvPX9NmpxltaxmjWC94GsaI+xOyvPissJw5/Wp5icjlgb01k2BOKbLAm/V
P+thpNVqm44RscQomlLWisPX8If5+iedp5AEWDb9kkv3Ger20Apcree23pq1fKwMfPOv3z6bfv/k
1UsNLdi22ffkzRB9hxDEKPJqdjTc1NUlqqYuMSQSeyafgVVi34sR4RTbhXHQv36CFXyi4AsjqqFB
YbjtR5SQVaOSULV0ch8q2tJLsUcaO9IKSKNHKX5Ko4nm0kM59GkiZr+4rKP0cqyDa/VOS+Vdgtja
5H2hX0NJX/XUV5mWT8p2T/xUZVNBrWL/UKVtyeDQpO3OvF/tUE3xPY+1vRiFDBPKx+STqJb6n8ub
lgx+jpI6TI96Yy068tTU9mjsaNppNjzwkudgSa5pbJOiZMRsYi7gpVsuFsC/14iUflWstqyL3Yoa
NwzX04YvGKQTRDS1wlOjkR80xQnGR6yrWSyH0h4U+wGERTRBN582uGFOb/1GzDgJhNjlDutRd5BH
mnyF7IbglusznUWCGysY+oLwjprdZrMA2SC5rT1N05yKorx1AAP1toCbIKM0uLQXxMRuidnYk8y7
Sd3QltZ3lNcbco3hGbBD1VZSFeFDs6U+ofxbspz4+7NpIf7WsU8IuhjCHwP5kgmrSY9Bfo2lN2zK
/4Fo7j8t1fg7qESPkZIWjuDf8MY5YBU2ubA0WzYao9li+Ln1xVSY/RlhmMZtArQtbay2A0zEqgv/
tHPyAwdTjSRPFlO3DMgGMxCZfRoxuFh3O79F9EjVbfsh94IIGiWJGVMmu2bAW+/7NmjJYhxFzzcC
ScsaiVDiFYYjiY6nOno0HaLpeLoXjktUTIn3Z2Qp7PDR+DgS9MdIi+iV3HJTW3BczgulXeRA5xqE
2VmzZ4conem8pWLZN8Ina3yChNZWUhdLIrzvL6T1MCeVkNcOHJPNnEynqxtboY3wDyc3MROfEk0V
gZHeleS51ElfYhNBSB4sg6pcXqqMq4EhPyQUlUnnfrskaonOR/jfY8sNQVQl8GdUDNPAhPsIY5q0
LUGa5u/JMYUms3bhGMsUhsq1jIyZSowo6pH9gV8LtumMq+BfQfnIILCBODHxk8M8fXwuoA2w3ruH
c4li3tCa7czsh21SLZBIiRzjxH4YEjoZy0QM1Es3n7aLFPiEY/dFT3uacE6IJhLIjXg0FF74uC2b
5h4Hta81A/xI1ZfvOOnHKywxXh0vWIQLKgRt04ruJTsFXKQ6j18EcEMPHiTfx+M23EAsglBKiDw4
ZGhs/OYRBRzBN9kvwKT0/G3udPsqOCuhxLR1vUyGCBtzTXwG84zPUbeMi5OpJWl/RAVLFXsMK2aC
q7fHJeGs5PixrKW1mI//KotpmRXvspYM0EgKq+EQjWKcVxtZtFrMn7WM+rpwVtHSfzDUSOQWgmnh
ZaDWYZCGbNLsUaBcwtoIzWeXwaA0AkwZ+NUpbC1PIl20CBl2D6aI34H+JWjfqk9XmFeTrqzImFSq
FWaVLDNmrSNjFbXbMn63R8vqtmhvXOuq/ZHzzRLpgisfDUH6ouc3/EwO7MXiqrhhJpaUq/Xu7Jwh
TYA6B329/f0kiw1lopodDx8fx5Y3S8Ocnfij1jhaHg+tzpRxlxxxhoi5S6nMHY4PiuystgRO7BZj
OtyYqk2bYHx7VcyloUudiBUIAYgHsxVU3zPPXwjV3cypS/TDuYT7RnwOCofn4EsQ0d9OUdNQsWuM
McaXpIWoy0Y9nRA8G+SFUeJkWoJ77WpdzxtOtASfhvzRTyLbKE0fed6QkdAuDvf64iYn04hpEyU2
I2qxGwMMBtZmNb93j0aExkhMTjUj0bOSkNeqUfarkWoratvQ1j9Ns5Et8NWxavZTrc5Spjpac1g0
oDCOgrS45p32+q83Lg52iQzL1LK96iJzChzaTYnQJdzIZmQqbeP+xilPJBl44lmSzKhq8IqrB53X
r+0vT8UJK4i6VGcTcbfqy3LeD58lG8t5M3J6R67noTLQxDiAdW2yEUe4lufwoHXUao+/tTOkRYPu
rN8tN0WPu1hW3Vj7o6hDqUySvTVdsQGNE5T0jLY/1ziw9ZSmNfmyXm9eE7OpXwBz+SMU/UoVCQzW
nqlaTMA6AipiBC5QumFYZMILwJC6dVMpjamyyjkWSzL4Y8iHY9f3TdVWvw45YMWQklF5xjzlMkq7
FCyIoMv1maHhOLmyEVDvz3SQYgqw2fpsVf1Yzs2cOpBWOD6SaB1bO4QrXVI+4QA9TEn17uVJ2/4E
KoicKvmXLhZXtNu1yYaS3b3X9kC5ipj7n3r+dGwn5PzXdAno4PTk/GZzXq7k/TlMFBQ+MEBsAC4A
pwl0WFNOpOopzw5jdlvclICeSipAS+m2LDZaBGRkZKA+uCZ3Nd2E5Jo2nwv1yYLZs1Ht44xnaOY/
NTkaeHb4DtQ+dsjhVupptBTFLj+xBAQTpl3MznVaQRySeBCy0531ZEZfACQf9eJ8zJlkHYO4DW3N
DU1NXKA6mDPPcr0GQtjWNsk6Gz3as0EnyoSbpAWgP/HMfMxnBuRLWiljFlEHImaO5uperidN8TO+
xNKpRq8ORxlLkwXNRI3fddl4lzR2x2NQSOh54lpR5RmPZfkpT9MeWF+jzYF/wcNhff9Ivs8CNbEh
MaTxdHiei0NymtDnYXEyI5GscHX7PPtR13roCe213PjtlIKdHcFXhAo24If74Z0zGOmff3IJxNSP
5vZx2rejB0jCoCPpfO+62UUd7JytMvvBn49YKwBsNpYeygNbsIQjkXxsWOrhMM3200WlFmuxzgG9
/TGHB+ooxsnRYXOc7oW2dqgMOVlM53R9rcVONWdPBw5zUoWUehfPLeOzWlsSUcfq18jENOHq8yM3
HcpmuqL0Ym0wbM7bXhsmkC52pJs4VihjagQt4mS0rjVwS2GOHgfWiDDK4QRxSk8r4sW4SfRKK+mW
wPcx6W1QR2NxkgP9pLkqDdgoY8yslQuqhF5wOnPLYeQWEk8Ofhkit066L+BGQ2uORGdgVoeU4y1b
n92hQ1OZFBITGyBnP/aG1VS2Hd1JKP/c3paW7RWYgptZE5/sJJJw7nnOBEHC5EzwSiToSwLBMCHH
bGsjQWzXVwU+hBXaieTRapgMxO1FsCVG4U3vivrrFXtnx6PF0KHvMfYDXTSxkCxVHfmD/OmVUKOk
pqCZWCtYBq4txAKYWMDCONyG/BAR78LS4bB87qpBCMxBipqlIXfh5Ky6LClLNUWQo+6ir5vpjxwq
e9L4lZfFDelYYNNgfBggFEOi8ZLLYLTHOSZfNnF9yOSXN0N8/aLLGScYx8PNhg+M/8Lv3UcGpUDi
t80lxQE+wUgl9pAhAvmknlOishsVX83BTNi3iwWJHIhzoOlUYcDw1wqTTAkuVBvZKq4CuqKhS95i
fVbNeq5TV13GXUNnu7rmTAydifXcPZdKfP701oxMGRAYTxpWVMT5e6C7jzTCiR80ljydJaHMRrR8
XAU3frqup1DH4qS71bxYzc4pPZ6bWpgcBinl4Mq0GM072JArN+bfdWW3ulyuJVX3EE0qN6ttce2B
BjMcjiTJGo/7WQhl+3GYD8vqHP85GnswsHpKzh4wNg+u+aOgG/ZYHnDFDEXzq5KXh30p2buZ+E+L
NQHDVZAWNXNQjUUiIexVd7IqizBllYgoaiJdyTxthVdYSB4pMkWXdOGkqnLCeYPhH2DW2xRY+IJO
2Kqp5piZD2rcQ1APDI1wtAtUfQRvutnFAH6ERQ+VDex+zyXxboJygz412Y/7x113NHjbhK7duFb/
ZwPgZCcf9m3ujtnYuUqO8NOejiez8CFhzlzNvtt8MpAPNoP4YgRMB5mn/yVIEYvtesDNtqxYVLBX
keFWrmRuhE9T3wIm60dFWruybBsliG7bN75Iafx86xrytlqK64lmtvgEDcV8ASK7hSyCcoZGtph/
0BRTI24ICrVTacKxv3hko6SXSSsxulPBjcfpI0Iz3mJwnQ+kZLaHE4iMGGrk+glKH9xc6nfw/Qgo
gp9tippVkIJaHG+/DDaE3rV9j5QvWh3ZX+zr1nVxJgTrkODmRn4YBOxf1Yg+wclLX1BR+E6bT5ub
hvdn4HYBZdDFN1z36L4xxXuD5LUc3BXfgzcojnRn95B7aGexk9K9CRjYsZ57AJx1bMXl63C9Vfkq
yknH0eBBHjwJ7fFq8cA0e5exS+SRNDjmizKSLFHizmSiRtfu4SqhQrh98lrH/luL0+DaeJPm4+qt
0ebjpX4PmzDHX0GXtlIr/EO0OQ4Rj+blgkhD+M6RFD3udZ43chJTj8PlnC0/Tfg8PF8v5hocziiG
G8v/btTxpLt37+LKi/njVVFBiezRN5BS+zhxUMTQZ2bMyWH9eT85HDjt5hqO7uV6i6jZ+7f88ps3
r5+9+bzf662oKj0D8Q84DpZtAp/plmyeWg/t6ixcR35RUwCCQPHwY5/f17mHdkXEzlE/hJB0C+iK
i4xmOUyOnX6Dl6K3j3a9wRyzAM0w9ixjwGsTumcSZXWF7GlUCzTclsU8ZotEcDdDfOZ0TNFp0Frs
NpOimCpd1H0yGPYPlX9hciRUe0yaSPnPaDQ6pkizaZFDd56TuIVc42OtmIKZs87eDjoIeMZW62Ga
udByIcKTtg86SF8W+BOqrPq89P2sRdshCHqs/lEqFpc3qaJeEwSHFB0C/hLGQ7IpKKaa2Wwfiy9z
qHBZlMWKotMDv5Q4GBvTfJ/pLj53tyZ6haraU7RCIbyn8Av9vQ/xZtaQcLPmkUA7r65zGEWGswC2
oF8pGF9Sq0Cs0kC121eJu+Zz4g39uBfdn1OneQw/DPr8KbLfCsFOoPYMCKwMQNDq9k2fwhepAN2F
IzADAL6NYkjHhI2ajKwxePNbxETSycyzmNhIyH7YOzAc/oFyCWb7rjkzExxCymxIraxwpZ9C1w2L
irznZEdXug51xZ+kq58sCX6+9rDdbGRIgyDqj6OLzB3NRkAXpjfBkfUiAGBAu9VtQ7rLcFz1VjAe
uzNnRB0MIYRK1MOn2Cf1DkJMZkFTCxCyVgRBBHxfgbVxPjiJhYFeG3RqG4VgLvbgF+szqc4Abk6X
E/l3v6N22ixE6a+cVqCtas66dEeBMlU4uVPBDewI13DXDcEZmFr7qg0Qpsjrop+NpturEHfQwcGM
SCsBJIRFISjTxyBh8yTW0kgBGofeUR5ihjXBYMAhOVljuzpHdaJIOuri8rRjxN69IpH8xFDOJjvM
WXB6Y2FN8wQFblp5u/mGEW3MUr5oSHGnu8WCJAQ/h/LNAu/mPnn2d0TV6oLoq3hZ9u23u6SwlWEh
PZCbLoqF5C6FrMMlV3SmJKV7MznzoEZS81ua8zH3xDAayoT+GxO7aqOpsQ8V2amANNn9SEY9kX/v
dG8laqaTABleNL5WjHCxusFE2xFtqYIWR4W7DENBQPffrmIaUwNZyoCeg/7zl2+evXr55AUBh3+u
kMJNAoOW2qeLXYPveKa0f6I3CqPaadJDSx0+msjvbLbQUDT0kgqOJ2c/AOG23s22jBBA+AUEu7w7
ETkZzUM249sby1cpLwn3OXa8fajPqbKjifcY39CN8m/1SvuQOPL6tCbs6vauOVOOXTcKHSxFggGT
rl1mCuNDVbavreRZGaHSF8k9pRy/8v3nNJ139CpFm5pXlsGXB+6XNgpzTBZVQw9UudK/NWALhlJ7
5tpQXAJPJDZ/LwCQvmkJiHCfDhqbBavgF71YrAGFK5lQA8UX4zGfeIDtIIWR4kyqRuxkWjF6QXn1
l+DFOEBlS8QdgIteobzzWp02npYVJEK11AZeTKvsAw3fiPUH1ukxAPlH14aGuy7zNugtZbpRujJ7
aMdBf/eDwUa05zLdOI52E1zGZAcTO7puVy0dAQFPgzvaG4Rls7crjIyv7dTzsu3seySIwUpMNltM
RTWWlMc12dI/VXjn1nOhBaXKwTM3lUfQAyqdQhROfH1rvnkz0h9Hz1eVws+PtxOoNQWthKBPTDuS
oa0tMhoLnmLnusaRamfvCHTdhvWsi8gnTlEkslOVRuRIHk/4M+I9bdfX6k+gA5DDoGj/OItqkwQ0
epDKDYI+OOyaG3OxwF+xz/SWxkBGlvAWrCGZmRc3QPjvdxW+b8RFRhVyGWvpQzma+43cD8ahCoZo
MCAhqeWF/EKpI3SC0sYurNuXsn2MtyIXa+9koUFQhjvY7xJxruj9qtB2bdfrRTMtV3DmKLlQs2d/
5erSLxk77uhmcdfjGJiDIk+6KM+ymQ89UPM2uEPG/ndwcdXoMt+OdkuOHCrZmSdH3YTNiAGTa/xT
dnccwam4Kj04nQ3aPVdbce59QFnOGwn+ulrXF5E2mjVny16UWxVJzQpvTBRnPagFaL1czePswt19
jZGuLDbtUape1h3fUatJBodNxpl4JBHPHhxMkizo0+ucEx8WLaqXYl6hTenE4Thg3+EigS84/+qO
iBviX9RTY+SZ+5c3XErBpE6lD5CLW2pgZ1Tps+QyBqfMtkxpz8oDFA9BP2zGh/OxYoWN1BseNuSR
v4Md0d+kKMG2O43IWikpnK2l1g/Iudenzjq2oMJwiTyyJtl+CLsH/KO4+Sm7mfG8AxKXqEi5rtlS
NIop7bwAWr5v8iRciD6iL1JMAndudYsGnmS5Ri/MFaPKiYlKOg6sBuu6OhM5JcL9W1g55+IxJiOy
Wo07lIhKLaSasxmjnwBF8cU4qzZoqpKtxgjmwWudkqUFd6D23iV4jNnVfOA/juz18LBDrSQTtl8J
PcwjsbEsRCBqRIgYSvCaos+hQHpy2kRnScvWCy94FBswDtWAVNyUhGCAxkrECSSPTwouJMdu7PGk
BCmmbBxQbwNKpRQUFRAoTch9vV97IoIrNQrBWuhp15JbyDTgJFwEaoH3AEb1MvAiWY3mnJCuWmER
R13pArQGCKzWK9qxqYoj+ioZ1+Xp+B20ws6An4no23z+bpQ8d2HOTbQxvVjh/OFVR+6qVjK87XlN
EeXow1ut6wi2qSMEJp+14f4TqKljd0gGCnecRy8vIzj0hRU+ircmsdq5u0/dHjrKPndkvANu9ciJ
1QRi4Ch5Ddgb0oTVOcXF2w/h1v0Mhu+kCXCwWH1W4AK47je3MCh3tyK5L0ZNksKK8xdkHfIg055z
h99lUGjFkinCJuPZjvs1yeJKWb8JxsJtuuumaWy7LBusagjBRjSarrdDm8C3uuVCzlqcihZxb8oa
hEnlaGRlB2Nc6nBKC8Vg5hsVN7EwbsP0VooS56LnANro6RJgcPwsBaNpaUJjDUebOeK4OPZBKjaD
RbE8mRfJ9Rh4pc4fyazYUhRntCfHHY9jBwy5cf3L3UPpKC8sVc1UbXWXn2RA9uS1CIuOTgkRvVXo
hGk1u+dx5ZE7M4KGlIerQ4OoGJtOyTtnGiM8j+iE4HDs7MnJ04jqC61rTsq1Z+YzQ+e2J+ovHSHU
f9D3qo+acpN1tCBT1lTOI9BU3oueCikX6N0WzkXrwD8oeBLGPmdPqJyyZgYwC849HPoYMRVbcOgY
id6WMWiUJP+63nGcC/qr841847pkklCDsUiL5N274fCbb98gBLqKLCNnI9VqH9WyfTt9ysgdSCc+
uoptO+UI7hVHbmPEgdcKrssYK+EmuM8g/Mky1M3XJfN1bseNGa05DRERuVq3WFyLQMG4CnJ6KvK2
uZdx7I7y0OZGNvx/8ObD8sJM2FUdZ9sWIOo4KHeLBXfwbQ7vxZXl17f/zRgferufiFEI00ZiokuQ
BNRmUrxHTCyIyxeGojidlmTFVJXts0ibH3DgWA4Cy1GP/fQs77zMoRpbGjSFPI9w1fe6pvn+zBEQ
v+Drd8+hCI8RGBgckYT48HVena3WdTl5xhkgdSRyzC9W2RZMFIOTT5NbCoqLj5eKNdGqZZBolIO4
eu6rtBTXbkYKDTtD2SrsyGrEZzvutb5HVXterNpxL4T4cGsF9x9859oxcehSVSJTJFzB1yOrCtZ0
xQygZx8J6YIB4M98KVllo+Elaj/b/L33MGRkbZ7idzVdeJvfZruQn//8k/hps/fY+uRP7DomaX4D
JhLJzALFQ3d3F3ucylLDvv2Rww60GwV2z827KkGlqcO4lLY3gDiJo5uHsDdOvBBNIMT9okzZjL4m
S94bRKZWPulR/wPfGz0SfCBNg6SGbnDT8CFg21ut2QZ+9PCbasRNxhF6/JGcGsjtoYkp6NSrs3e3
B3j1n/wJoyF5oX2kDHPB7DMFzr8jbovOyN4nP/zvH330ERwZZZHHBPYX5RxVEu8//mHxdx991FM8
+Cv65Sv4BV2dL6s5hngnVwVlK613GJ+sU8FV9IkqKL9oZn8gCWCgMAhGqDeyXxaYM3qO2AIkLZAn
ikpRKKj8kuGckyguWP8PjGGLaeV3zQ7kK/J312dK/bVu1F9AZOrPZVE35wS/pzzcdquTHQL2lPNp
tR6cwp5rayev/yk5EiAOA6yn/i1P+leiEIVtO53jrp1SBO5qbR/YdYNJfh8PzG/QgRCl5BzZbddf
oe+NHadxyik85FKdF9vC57zi7UM/eb8oTx67uelUmIGDKphFH06KbZzaQp78pkc7MGhghkS4uWc/
PH/z+s2TN9+9nj774emzb988/waTj3zSa41KAApyULQ4xkN5J1azcspY6g8jqvjZebWYT9erKV2f
Ukd/iZQS8YWLZ1m3r1ZHucHjiVeQ30J3dZgUfAv/bdcQu4ZH1RD/4bntlMsNB52bv9zX4PJijj/5
5sxXz978y5MXpp54tA5SBqL0rdKv33z5zXdvIsX5mEaKP3v1Kl4cjrIyUhPropRAa+Y1Lpg2/ERY
nJyPDG2ENvdwLXnUCvzX5clSm7b9tsqBpkNZq6HuwBBb7lGWR1SZY8DDqnKs9m7AtgTVoqC/QrMp
wairXNCGvTKkJKvhTSmrEUQQwirPv1EZi7DRc7okep7HH7LcSaL/CDnho9wmiKwXegxyff4jrP84
tykk88BRT9WblMlzRAy2f3Vi2ZgMF3ESvAZPVBSQ9ZqH4gsQHP5sbeweUZsY2uzsXbeKNWRDg5hi
Zmspw2FTB/c0P8DoN4sHtFVWTF9uMbhZlhuUz/G3LOucBVLcHSaBxaPxqeNei98r8yP0Sh/pENbn
5EQQCmq2u6p4upKT8FS8hGd1gRdMZEoOUdCKxS6bnuU6jSYS32sZiX40W6yb0hudjCz2k9oB/zeg
L/7qUeS7x853vKpmwE4sQbWFwVYwbWYj+EVZT9aYYqXaAuOyXcQozdlNg48DEoJgLbj8QLG5PHno
BpdYpcME4s3o++dfvX7+B3Qa/nJgl81i+61kL2bn37959uprqOzWS+4njx7/dg9niKA5sz5uix1C
uNPGQzft9oKdWsxa/UPy8Po3p76912oCBUx6lVP1ca/9ENvMK61P0myvOH5oYIpim0aPHtVlMQ+d
/kPKNQ10kCg+n4Q/oM+K4g9TV1KM5sTj2s79aK4Kcxn44zXXgWH4QRkJocNoZU9MEcnyFcFCDsxG
5LIDuQxNPRVy6TBz4ut0wy15BZWAIvqdiPFSleC2Bg5O/LxctKcsRLHCZq2Jgn2kmI6E8/guC8qa
1yKZuCujBGtZEidCOCI932HJPEcL5+CYD24hc4zoD/dHDwjdi9y1hAyvY0t86L3v//B36j2K8mxz
ubqavT/44Yv/+6OP8G0Hn4dPxVyA+qJ58hwT6aJKr9hqOPDXuxPlUPY9SJjV6uzpenOTfIst4la+
vlx9/1SawS8TSkZ0XvLO0IJDOfstuW6IzeYEPopLirlV4RE3L+rwuWkiGnoCaMazUcoeBqzq9Q6G
H/6/3kHytCDXcMogAnIgxlQ11XZdo5/oZdWoAB/8fkh4TlBngHmgsJJCZkUIakbvI6cKgb6qnEUF
hnvQY2edJWYwW26HTXFa/rzxC12TiwCTIqzZRpBy5P0kwoR8QpfOqrSeV1iBP+lDslk/W2lmGzkf
u5oOxCVvJJD4cuMfBCgCjcJ/feq+JNK+9B5jqhl8+Ki/b81wqmADcG8QD/kfD5uEk5gO1Bhy3W3u
9ZTZ031q1g+16ugMhF9zTDV7OTbJvGpmwFLQDcWiEqYJpZRhcDhFOYgd8Byz9aJvLOZTg84J9lF5
3lA9glpDT041IQJ4VMQnxjQyXTogfqqxSfL4IXp2lrP1at5IMlP2QyI8LrT/8vMFASTxRURGvIKI
nTtpUSP4e0qz5qe8roAcud679Ga3bSehiFYBySWqI+CVctwlNAlFK/gURoQwsF7TBAeNVK9DNmgC
gWxHhD3hokhhEQGMfoqTdLxseCr0G5Qdc9GFnoiun3OVoOAJiAcXXRIdTW1iDvcgdojDe5QWQalF
qRFPxyoD5BJyNPQobdOW2Xhrj5Ec3Q35gP2w7Tx6X7KWVD5XyWfBDt03vAFOVPwlpybKlIfMv1ou
y3mFgcjPtvXqpnVrLFalR5ebne/dXnb4yOd56uue5kTM0zUzI6EHuMIlJwzlC3Ne9npPXrz45vtn
X06f/vHJK3zz9afJ8MHbt5O/H/3b/cN+coCuklYuqqKxcnBR/lFON9jjN9BoA/cmpW1Cv5uravXJ
Y3G6cfu5D7+O+27n0z9+8xoVY17JJP2ncSqYPJjj41L5DQ/g38nRcYe9EQpginnL0hhLx3cpmj7R
1M+WUKScDfq4VsP3iXajt1Qnl8oxyGkkHQlS7KWC9x6l2dH4sRXlgi7gcnhCEP1LhTzUzIpNSYkU
4MW5La+VaIp/Clgkfau04zB6kPRGzvojQDqtv4XP69c36Lh/jyExb9/+feroorGQjImkeoS3nZ4g
HiIQRENt5Ax7X8p3E2fzlIIfju5MBd1a01EqlFHVFIvVbuk/GgIANSws0W+my1vqyFQ0kEJoW6ap
wax4UoZVgFx3vrnI6Nnxfgevf4T2h2v7RF3Z5TUcCrjDajhyILqe7ar5OrkafaHEqO0a2VvFco/y
HRs/QD+jR2bvsBxlDEP7heWhdr5GuxLUF7cq+EuR1YNUt8AD5ccQzkNMzHxmG5WLB3N9UUez9YJg
YR2YyUF0d7n/8IT6QKvQX7S+jNxrwkdvDj0z1OBx7OKVQdwtIw42/Xn/U0IevFXwifL7gvzd8eEw
Up8toKjfk6nefQrh+kptX6A3HsBNidc87JPr1wfz6AJ4gOOBK0a/sbyMsnhZb28GUpvNy+q/fSxE
8iA+OeANR4+7fnaroGx5ienkwKgWLBs9zUG1mi12c/7lcgin86Ss2x1H1NCtns+L5rxVRscfB3ZN
G3+hvLIQy/x4d5BrCOpD3NadlFW0ELQIyZMkhXGnSYB8zi+geYU+Mmhohd603DsKfNs4YR6uwkV5
g7mhrPARGsFNclnU1XrH/fLZGo973iV+vt1u4ODjkRoBuT3AW/oBVniAhx4zq3gV/tLywPuLgxcY
9RBo+99fVAvKI+QuNblfG5rxTtgN1II9Syex1smfUDPJsTfTKWyIkA18oNM6nWZ2YRGPL644IAu3
2Tzr3JLFjlinKoofVVn820Y8qtmCr6aXO5NFFdDWdrM/uUFvSC/6p69a0dWcNqAJN5dUqn4iH5mL
q3EY/WvVl0KUsgOaitaJc1SdkJNDKgTannkrHLCOV8LmhFcPnhvixDdwx+Quk2/P2Ia1ceS0mhH4
6K3WDMPfThqMURrVk29Z6AIZ0T9BF1dHZnXR4QVmwqV6pogzMNm7NFe0kLWVBOauiMhxECYztt3t
OHBREWaHYvbhQPXToHpig//odWx4azp2pr01r6rKxXLyJ/cuEgpmRot/BqhKWysGh/ia0j0wh6Ug
uNROMO9i3aP2i3yQ8TYsagpFMzySI6y4Wcx3xMMs5x6fZhkLUyGhWzLmtVCDcIh55DLcG3FbFo9l
j5Mzj3Zr3JFB/+Ls+UOZ889lzZ2MmTF9LEcN3q6Jc+WLTBoeBxtsBDdCpzzyz7KVTDN4VqlnOhXB
tKRySmNmGzctOIxFQdbDoI/Gw0fHWSRvsm5ZH9vOprkxbGsPy5v22pwkul6ohBCHTZp+TSD+yO5i
yooKBxrD8fcJQF1X5nMaJv2Jj/m2pjBPgLklj6rxcVStolbVuSxaEjJaq9t6l4T7hRfJrQ0G90z3
xPnqfAldyM35JcXXk5IUd1DuTHd4VrYvS/ot35tUKpjVLS59b2uOb9CoyijaGBTYpCxm5wmh1cQk
bzLtk3xNyXPQSZK6wpb8+Q2Mun3CI6IPiBQV+yFzHhGrsnUyMhSFa6gbMdXpEIo8HwGw0u8QFMTp
VTXQhhNKz6eE80yU2BSuvcQntztJdS/gKKgdO9k1B2zzOlPtFYrw8YiO1gDlAKvOcmfb7uApODhC
p27mYZoh+u7iFutQ/NR5CcFL10f7gKVReUIcsTjG6bLAxOFKumrDuElLFQ9PTaOVbQ/vJVvFerVF
g5YsOcfXq7eqGwfrxGBOVSfRE+QFwinDVezxSgYt9MRSnXKSFrdrNUq7c2owBvyFJa3sQtWPZazf
V2YNsIjOUy9QhPaikFm09b3M6B8jbMT0uiSVd3e3ZKfht6/I8VjJHsitnVI3PVFxNecUw06IIwxG
Z7Mx1kQq2LPlPDz4nrISy4gX6oHx3FsW10CO9swOPKqCEtVytzRmLlY44LyohSYZ2KyKQ8L4F6OU
OOBx6UyuSpl+4AYAkP1Tw99LyhSCouEFskZoGoQBDuhIkR8VayHmU/3gPHBugkt7BYDe2Zas5ta1
DpbVeajXQqOZbhyaOjB6okB9xL7sUZ6h1UgyclP+QHs9Su7vq2JxgX6SKJVoq+MQB6cuKzYX6hYY
lPORt4JaNX/gclNnXWluDrlkWVgJIW5I28JZiyIFTEojL9j0wAtxGzz/hm75PPE0/yM7jOkgblQ7
aDNIHriyhO7DArqZlwzYimt9pXY7cXabA0DoommlLsG4R81rWTdKi6k+Z66zOyYF8hhLlweVwZzF
dRS+cYFUIEJ4GsussbnhNMOjloQoDgSCnDvEJWpsuyudGefHfcdtBqzSIGXdo3z5zbOXb1qH6WId
d4ANWQYGZxbIjH+5RcfW0p8/H2eEEv71M8f4cwYVpwUrB/imntKtyMZZdSorbUSv/TdT7J0UqsTM
aWSUHj5wqpOe8RCoTU86RRqD99G9K7ZqZL32MOG8H/7r8HA5PJy/Ofzj+PDr8eHrvmtaw2rLC6pk
2tNOKN+CrPICVuXLarYdWOFnBF6T4LeY5pVMsCgTn5bwRCgblSFygH7Bry9XyqdLxbPBXbkofqwW
Nx1ZQlgEvShv7PBKVmiQetYpfIT4MnSXmLhLVfU4QH/WnNl+WyAKybZc6iahutWpEh+l81jhnhsd
vWtB23AEUU4TJMKo04g10ybsLIg9c0VXPvXXFC5SKkHCN4zHDLMKBuTF0+mTFy8mT5O056KDoOke
c+itQPxDS99udUGy0XpOCEbNenFZmlckCgUgjirLCH71frcmD7WqaYBCes9fvHj2hycvtNU/vZf8
JXmbPEjGyWfJ58kXydtt8naVvL1+eIL/mSVv61QpcDBlNTs1o5kLdtxpjCflfKUcPrlG1nv++vvn
L7/85vvXAodn+wzI0vQw2cGU7LzTedVckDsMauY2yFTr9L/DU2v44/Hb8du32RdH/318fB8t2FDk
eWbbq+n6t/AyMNPuGXlTOgM8Ei1Gs1Gigy1LwVz1iC3DNTel5paO0wDiypvDiB7yg2Zzmwk0pY1E
BaYk3NwWFPg3Wy/QMjfOpCuyq4ultNm4NnX8WmVBJuXsiPxVCB/aqiazuG1Aat20IwgG3kt6vRQt
tPgDxc0z696eT7frKSPMqpyR83mxtUOcgy3q3gKqb2eeuvSzWlLV9LD5p8OGxtRscl1Wws0XqqFI
rT8+e/Klquew6mbD04JTNUXP04CqeJ4y7mDiFvo1HsKSvU3QXwMaXFQnI/q2g9JY/zNpIScfxlcP
hv8wLh5v36KPxwOXTKmN0Vm93m0GQXJJ1VL64LCRNXXLRxq/HdOdpivDPsL80m6b2djW04Yilx6V
3Y7aXrtwR0HRuMWIyEzaEBJ/FxJTtDeHlKSmQ04kyY0fPHAbzyzPhCc7IJ4ga5XwATh7pFDaESIT
55vWfgnGQ7vjht81Za1QJJoGjdo5e59OsVHOG2ASTF+WUXxb1Qg6psif/nXPbdOx4D/dAqZLyjWj
PvQ8NEk9DMIz1p8srUlxUcLLTcCAfWF2Z+OzqJEauu2z31M/zZxsbFdzIynw2Dur1OU2iGjdhbjQ
UEwpptPhUA1m0gfhk0hh50OXEJ3iaLraUSM07XAdryGdecYsdVerq/UQiwypdBpvydqO7qZWQ6to
GkhPqZVhPdvby/szOSma/iaHTTIajUwCOk3oGfpFXk9PFkwLjiTxtrk3eDu/n9G/r+9nyWB0Dy9Y
cxydoIYOb6FN6BIEMtppSe41nPvtgau5W5M/5hUHU8AB31R25rfnhDqqtHJJUy2rBdzahFla6qBV
8vECyUoLf245SxtKc9BGXOx5tqgwF63jRs6uSyyquTYAdMugnBRXMw0/kxMjiiWJD106oG7WgrPB
LQIbWvgBcVCOfzQaLBQToWDrm5LLu8bomXBnaYskdKdU1eLjiFYVrpTdJV1JzA1yP5c3GSo/SPXC
+CioegIB3owM1guJZy7r8NfAmmHcCbTbmYHCIwQd023meaKJExn+pN+5l/GWrlltAIeTeMvkYca2
Cqc9pQ1zndk6jGq2PsFaHAuiBSsA59Y/avjm21z0qlMHnwvamEIpVT3+2pQYG0vtHglJY90tKZTx
zwNxKxyob0TZ7Hg1hQivuq3czoAzRTsq70o02RLV6H2Yq+H3TxN8GDt784GWAq2f3C0VwBVwDc/8
salmFzp1PKM20+6qJFtt9g691RTHjRKV1SPwv8he2/h5ErWlnD21Fv77p0Ocvmtn6dhwBX4lx1Q6
VpvsEHh3OlQK2xsc1hmxBsd7MjlIcrOve1+iyoBsHWyrKkpWyDJaRCsVncmiaHtUju18YG8P1ht5
0pvdPf4OUzY0ZSGYZAFUF+Nrrxi7mCGx8aFrCWU2vjQvlT/BrOWo4CiofzZgW2P0xucbuAncMsWr
lqV9KHQcYIxxH3KAkLJYmYZ/aZuxaRF9kXAGLoyBhpdMlRTotOrQiu04jvE0p9W1xC4mrLlChOoC
weOuMCwNPSCIeV4RkiLqKq3AZGL7E1vrldxP+kmfBbpFB/JhVMV8UWJQEurBvn72+vWTPzx7HTqu
YBpkFlFKhp4bRbV4ggkgZY7gd/QDTJ+GDXLUXCQExOefnXHjmPkIRhb3LAkHgmXv4JqCmaW9Rnqh
zj1qybprDgq0VFL0cIk6/BH6UNShSxaXGrHyHZ8EBMCYZv6D2pV6PLvAwE0PqXU0duP9Z48fwv9+
N+7/7LYx1sEZNxnu2QqiRp5YKOLuith1MNliMb+5a92rR7+CqTwe71uhz8dPnN7nVV1iorcbtRLZ
7Uvx7Ifnr2NLwSkyfCfRne0DcVWRujISpYe3JP+Mrwx2//ju1YtIgk3NxFMuD1LTEbR1bPFQkrrX
diCovCxA7olBXKryDMuOcgmMgZiPku6dYUThprBW9MayzOl2rLJr6U0Io/gy5o2HFNYdjRVzzqL/
ecFt5NOVPhp9EnN9xmFiCB1pmroSSFan7e12NHuISKpBAGqcO9nXajqs0RsbxQp2IosVIgGlhUpm
61TSeBgCkcSATB5IFCnp1zp1VEgoXM9+Xt/wpappVsd9JAPa1+HnCTaduRRE6fHwHofJ0QCOfW1I
qyIEq6pp9wNNSL91GXboYY6VbQQbC69OfI0If7GcpFepN3Uqqz2SSGLmE0LZFgisBQ5L5LwakZm7
czqy5XVxrbjVrw+lCs232C9PFBBKvu52lPJ83SyRXkbAfYsewvJ3s54mhJgG5QMJVrDQ5MCH2OSc
8A1VJAxqAiXgueqFSHABu5cw63R7dfp5eeF6K0ST4grwm7XiHCAVWXsuykH+BUuQQ5THlAc+31rJ
4ORGIzkXLmZKQZQCBwFmgN70ypO+COKgyB2Et5cOYerHUK10zADyT+CGMxgGnsETDOMytEG7GnXI
3KgdIpnyXpiUL445Y/0izK+cB4Dpug9MAjKIwmNvQqBCDidCPxjEgYo+ejYtJKFpbhMhuE5i2qj8
s337O3fYeshGPX6hfH/i51Rki3+IH1ihGvLIpYn7nrKdXrL2hgVE3pJNkJ5UKRVOs273W8n8E3vD
Y9T6eleTIlLUrKFsQj1BSTvTn5hz1WUzmzzKmWYnjwIGhyXlpKBIYBMzyHDlCEMQZymMAq0XlXvA
GPJ6Ti9WlSwPPy6uipuG/cIH6hm2PnVllBWUXdzgnUbh/OWyWG2rWYs3syiMYCQ5aRDwRYd3lgwf
ryT4jQe5uOnHTQbeIYoAVR+wz/R8zsiK2NGgWN0sYZJfAHemJHPcpcs9g3wlmYGH6gD4OF0UEbGO
NsozF2JByxhBRYJMkUQJ3C+c6HtUyRZRQXQQksDUzOU2SMK8Yf5FSncqMXIYWAu6ANU7bBRGjfLm
zzlQnnvKHOJEc0bXSLiE8Ra1BpTsMyKiCVmlu4wM9u8idg4Rp6SgXxMEIVzskMyyZK1AbRo4pNCT
I24ZNDQjEGELaRaEBwmRBiAdBzpxIaKGsAcmv4x0Vvc98w+VnORpRRRN8xBDiq393K3a5r9b8Qoo
v9XFDS0JNXTrpLnZ+LQRYNGOkIQK4zT7JVZBg0sOoI+jT8fOWw3x1nabuNaU2eHqhmbX8POsdZcZ
+Ko5Jz+8E/Rcv2ZM6w3yto8//viWNJ+y5H7CGl82ayxvdoZeU89Mehw0k4fM5R9SnBOlqm8cGc0S
ZUEYXixKktyz5DWDuolOWuuGQwf8AxVbCDt0gmn3gM0PT2AZKc6QvjnfLhcHGL8/Ox9+MkSE/OGn
o09Gj6w27P89fvzwEf/x6HeP1Zd/2i0J4MPKmUFL3HMjbHmGt9mjcGvkmoDtoAesLF6W9LutYP31
SvdDr60GM+L1u679g0ejxwqUphmbUaK2bjjki3Kov/V9YK3Cqften/lyycwpEwNonHGfzqWY9vzU
iiptDL4skZVhIEpjXC/kX3PfH2gIzGD9D4JJxGbsqC6YcD21BX9J9XddU7QKWs0GRwzuBCwiyIXD
S7gSrpeLhNwCeHh8OYhaO54OVPrKWfbQ0/GA+yOMj0xDH6TcjIz7bz9iIiVMWqVwH79/+tqwnmyE
jJE1ywNGjnSUcxGytNv64esXd2pORQ3oNuw3/OmppVWJqNp0bB4W9d/t7HBwVqAh0kQvoF5sII9K
PyRcPBcohAk7axFYYwo70b7FwZVt5VJ/WCdae5V13684K61u6lKFkuNIJ4gKBaThAmEYoPhYwBMA
dpOcQbzjOlDLlZM0gcDCkqY0DpBhDZ4GTuNx/GgaTsRoLRqVwTYJ9helBfTepS+c9yIwLynkGSiV
EjQjpFm/IlmocnijqG5kAhSzv9otSwz4D0/ZjyDHmJHlpn8fF2hJfjrisyOujrpiFssGt9wHCkMy
X1BOtEMKL0O5jh2DNF+XgNvb3T365fUGbmsQXNgPj+yyvBhBjsNLlSMEk6ss2d2xCR45uI+KlAfV
aktqVCeRbSzwGJNrsU8tLhmTlO/nNLr3lL7fliZwK2HPp5H4Tn/5zZsnL15k1rMHKwiLWDZnkzSV
N3Hw/qEeSUug0OUo3s6+R6VUExEDq+RsB5w9IWslvWu1XDhfU8bcLfocn8MT+YuPv+h53F56Hy4x
jXVfvV6Gi/UZu6w2ZzHnvTx4RQQSA7Z/HzpIhi/T3t7sP5I6dcH+jeQYQObePTOQkvzqCv3hKVHw
xmrj5bBA2aj6BIlqabxt3XjbpnRTTUoGmZjeCJ8xTvQtsncOsqDXXRCuSP3skVQjWD9U/81ZlZRS
B55eSOnFImmBLJ4p00vV1FAXgCFV/KTdZNq432nonUfAq2+NnD4Llqp9hRhzwxv3mR537PQTtgWG
8uIrEZavqBZ4hlbllZu/GVsCWmwfJ/xYbsufN1Ro4xcaqpX3lF5obVfvEtglvXFPvWhwJEj9DUdG
jXrP6WWAsgT7OZOC2pJzdGCVahYEe8Z5JrXWjvAY6BfJi9T95IisEIkKw1dx5SZ6pEw1y6SQdRyT
XoemG2C+pTuvL3XhI1CGxPS8XbWUOdI5cE2EF/129Gh8fBybghO6xuPmG97WY8EbqXtzLyl1rnJJ
Qe/I1ZkSrDQ1Vt5mzjESqmepMwN1dWyL7BzWogik3p09iq52S820847+HwPi7n8i3N0BSInsRi71
2A68nmUTiYPIooP42qvfYkXV5Tospj5iS6ud8T8pdssee2AMV9bkf4GlJY8rHYbDDtCPWlZ1lWAY
L96Vu9kW7bksX18SlCvmCZzbAUBRd1TVB5uZtAw6UuJKFnoynK73cNOTZ5TD+7Bq2hYMvofuZj/n
NKXKHBmnqm/FtEwb77p7aINcu19at/9Yzj21gFFZo+jLAMg4pxk2uk/U60X/l+7d9d4yD6nX//Iy
eTT6hOJGZI/WK5U+xkrHLUlnBozXAY8nfPt67QkZPvwYrT5rWNmTasvxx5wn7apE2+IOg5LXqrNK
deu1haITZ34ajQJ/Ka6hxQx0T0pjjnGG8JRPouV9WCTaPKkNDun+bnL2mnMfWcyfX+LqdUTQQJX1
5vuU3PZqIJLiBJGZgWshyCFmToERr68aOsuUlZPignCByD0Mnr+BD8Oe4N52ZA2ecWJsH/uc7e4k
2G9b3nE/ud95R/ZR3wojkFgWParcG5OnK/HfyhIj0QsesvCdMUAK5Ieku6yUE2Gzrredqs2mfL8r
VzOCUEJO0lhYktIoZ+RQMPwV+kJj8g5U9bHdX2n/TO4PHhaqcehpsvLjwmbn62pWtl9iVnwHzIXe
qH50boWeihKN9tXLr/HRX1L+1MzTruxW5Lmj/HVAtMEx0WXyArfgWwsyxYEHwbTulys70tl3M8Ga
GuUQiRKVh5ZhgR9Ojl4SXxHm4q2CUA21kdx5FqoC9r91QyUhTYfw8ZIB+SzDIsJ44B/4OuYVRASh
0AkDrwGgLXqlUjGmtSyW89xSunOiW9aoke9enbZe/1gWdYvIo7VcxZ6LWDfqNKfbN3BynuvebeCv
8QgauxaWj2IQuaqdFiiiCN5IzCeOgl1szJAo1I8CCwjFX+xK86z9MYQ0mJbqvgPkZuAxzNz3u87u
Ai70N5GVomJSmn08aRVO2sbrNH63+/gDegoFnj3BmNg55czYuZwsxvgZjjY6ecFwT+BdEjgIRs08
L9ZnzyQXjSDreCBtPd2TSoJGHwROX5Tvxkymg3op9yzbxmRsqr6ToMmvy17LiMoi0/AUXNhAZMww
1TWjmwqMEHoHi65l7qq3xI3McgfLEk6Je0ECZEMXXC3LALJEtbHDC9RiTBJnYcjrGrlxnxzZxbGe
f3dq40JMEmtJWmqWlnM9mw1VxxNVUpDruMlJEgsngV/XGxxsv9+pANLF0OjYjEXO0Z1q+nLyv+D2
SD21WTSP4SXNwuqSI6qggUs3wsq63wkU6ATEQpRJtKhvdg3uQemrJeLNtIXX7srPZ5hwoNUGr5VP
BtaA7rc4p8T/l7L31Jn4EShLfKIhC+7Umlr53F7OXE82v1tjLbFxbFyyZ3yHWewxyCSS/YJEDlxq
3C+d7nD0LV3qGGAYZZy8YxO7wvNvn7WWhV3ds+x5uVgwHIj+PfPyluYmNykNHHV/SxA4UfU4yMIk
p3aE8naNuYAGuqEbcqsWxgYi+RplZzsmE+TWar5e5s+uYc3oVsSnAWV/hP0YdMYalnhdSgMjCmJ8
zT4T3H3gb2L6uA0baSVmAcWbb7HLs/x7plOZAc/FvMJ8H5HbMF0CTxEIc0RwmC9BfovAIqhGRiv4
/c3NhmCx9ZfPXjz7GkSS6ctvvnwWRTS3DM3qZhio2tmtCuz/vwDk7pvKxhO53TeKjcNscs8qNx5u
EOEF2Go1SQap0vyneUou1Wi1huU7XVQztASmu5Vc0vhB+Sml4TFO2aRHxdAYNDUNYyPk4kp/kuPT
tLgsqgVChMWaqlaoxsDmsAbiUi6rhmzN+Fn82VNGWLjgv8TsPg9DbrNeGzqRQrxQLkn0fjEf6PKq
Y4AjIw/kY69Eo9w2sgb6I0TNIB7Df/TiqQ2opNo9/yFjw0aw3fnI9qctFgsrjIp0FSy1eWahuUnP
epf+FV6+AMFx8piLqyP88jjkCtisepWfBUPPWgKTj7AKKmkeOWHv89FFeePHQsEEPTvGCL8LA1gW
Cp8aFRisemxmaJYFYVe0jijylGmjQiYewzu2QKH2pNxelXCFaoQqFXB5INiW5/BYucScqPikJi0a
J5Qjay+3UXF1ZUfGnkhFukq3Cje75EDCEzbUwe/NGnPsAEut14jaPx4YjxztvechD91H/5u/DDP6
6/V9+nd0/wv498+P858UEJEiFsvRD1ON5+TU90HHJbDdKF6k/ZnRd5tSqKeWw63DNqMOjt6I1GDU
OMw2C8Phs+fkQ6f7Ed2zYA9wBLaFehzz+8LCSnkckmiQD5C2T3J3igyEG8+ZHcgLIQgfIcM7XuP4
89H4t8ds0T76rZf84kDeb7P1Yrd0XetnD/PZo3z2OJ99ks8+zWe/yq9/nc9+g3I99uA2g5mf7qXK
0u779KOMyMOnqv2cUrcNOGaFoHOarfoS//aU0wgO+RDbTr/44XlEfXy6konKwjMdPWpTLkBbqLD/
oiUXh+bJhjLYtnYKT43ipJk8yuLKAE1eI7mmlLDi4xs5BhkZzQ93GI3RJLbqsq3SnoXQzKIdHIq0
klYToW4yMml1p99l1s//ensgt7s/mvbT5tKsGiVS3b99nBIA6ac05tdphLwlDct6q7PQl3Px36zL
WVldolIUyF0O7eyhN5KlxZJGFgMWzzg+FPt5kOK4f0MjvdeyunResMlo7qJf8hx4MtptpNHK/dT7
Ac+4q7jzvbnHXZpBh4Wzq2qzVdwaagLDUEvyiwxObhsrviTNks9b1YksOlAII9nOMRYa7uv5mtxI
R6MRhracF5sGDZlXxQp/bWmo2fL9viQt3ra0LakU2CgzgXskxwTJdXV2vm1pC5Vt1ZbUZqzX2643
wwXIIwsTNoP+ghJJeVXNypaWBmu0WkF3ql6eqG/gTVovYX0S/U6gUJyspSUTZ0ojAnGKDMmSD7Tx
4nnutpcHyUVZoqvfjR8NEHfQ9oHZxVNbXc7ZXjrgQPDI+Zi2uF3f9XAeiDJUioo6tBe/Gb+O8I1Y
fXyZ4j2CWSTnaD1m33Inqphz6smOquc0knPoq24xDvXm62IY9j3yhBn0J/ThfpqMuxonOt235S/T
zrbksbpva0+7W1Pv5X2b+7fu5uwH775NftzdpHlR79vgq+4G1Xv71uYIV/xhu9TsiF/KHtDZaPQg
/sx7HOf9qPUQWWN0VBtd41QBfIRstsZHIMbuMQyqjtvjOINgJI9pJC/4cPyKPvxz97BYEdI1nm7x
4g6XfxwzFVs2PO0W0vH1I3FOEtWWxPiCpzuJ3PFGgBjvKftw5+bD7a+9MPyNZDb9kkZFO7pkzBhl
d2B+0QjAdsSd+kOOzC//KudLL8WW0mQAXSuIPe3QtWWnSEJ+2GaM7VMkjX6+R1/rTPf4tNZBjbkD
y9KcY7A7iRtjEiOsqnTrGBnAuBzmZLgioYPKnO4W/DuOtjq1YQbPS4ZeuirIIZnEEwoP0g8dEMjs
6EIUQtZ2E/OyWGi/FTK0UioLHDwsBz1QKL/FNhnyzxTOhXKW1YiJtMXzU9S2+CTRygUKhDAPS4yy
DUpGolqvWFEkxl1Le9Ks1QCTU+iDlCkVjv+vrz1RJpLk7jaS+XrWYiJBatzbQHK7W0Ig9GEAjh3Y
tkM3eoqGhjGhTeiZRFH+/uZNcYbpOfVTxUUml4pt4bMeG+HCmJQV+3iism6SF79vyaGjg6aRckGK
qdZxUaE0wIgi8VIa8HojLGIvbImGWy7cOpHermZDLgvPrYfuKhM1KxJTDXpdY5k0vDzsqhO0E2hX
jsgl1So8i09W/G17d/1O9IVBwow3XP063G+st+h/2nU/9vzi2p/9ND8foPXZey2UVeavsG0tKqEP
H6oxL/01RruXqN2uwpKskvFjFOEX8ZPEtrxEwH2tH+AmjCeexl7Th6H9TEti6Rfhj1r2iv1ImIgT
PyF0ZEf66jXRj/lFNs0eDwDRsysetl27V5BZcoqBv53dccGQ1ylRWtqJi5N6yVSx/XYxXNS04/cp
31t2F7HLi8q2dISgarqtcatamZI/ET60LgzLYnkMjDt9s2U692Gs5EBA13HcwBclmGAYt0wZW6GN
e9s5rL3GTyPyRt67O7voRVQy1hFAd2ZtLsczG1fXDCJG92r+9o4KnOAWhnEooy7afsNORDYdO84A
YTHn8rZs/mFJpS/hgsbbINY1civFztwCP1FskVnG3FpSf8m3lF1RWZoNt86d6WdBpc7HL8V53yb4
UKGQXdt1xWY+4NnmesGzD9dE/Kd7pNu6OeYC+I8f7ALEN1+UlMW2EXFU4Z6gM+RyTRrz07UX8Ky2
prmV7dsth5tmGoqsnSVJm3Kx66N25OW6W2COsGy7PpGN9tvRPCQb/1wTjx8CerHP6jGGXOA9Vq4G
0kL2AUqsX1LB4gdVjdtcgyTYykbPQieysQ979t2rF2MVkIwZMht46l+MVuUWMdgeYDAVBSZva+CG
D+ZVs7W+c1t6hZRXEev+7rvnX46T0/nD+W9OTh8P56cnvx4+/OTRw+Fv5588Gp78ppydlr/7dVHM
C6e+GNKSx49+ZeO54Q2X/HMFkzW3g/Xza7hk5rtFORZVifXTC/RveypXyBM6tzDZzUVbERgC9v7w
YVuBL4HkoMTDh58MYTaPfwN/jj/9ZPzo0+T+Q6iWDL5GTQ98/w1cZljM9j/+lvEVqrLhRr8jCp6r
9h7BEiWPPh1/+pvxp7912oPvX64vpb0uPyflC6KiBH95bxCT19X1fEjHKTo++GWhEPxXGyc1tEyC
h907aKpV+jdqIJ6rfBCXjgDWAnpI6PTzoxTzD+2JIcPaFsfG9rIlPqPvKct9RU2etFYVFX7od8f5
q3HMKKvhp/RYJRGX0FzSIhKYMkpZTslb1sPYnqGWlt+Ps/1WxmqCdGjxdMUOQC10Q+oaP7cx+bra
uYXJP9bRTaXomCqCGsE2oBopMiBGf5hPnbl5dY9bW5aXRVvjWHKqb323Yal63NY0SfBtDS8lGzZn
7b6a4X1PzrpuH9TGcQSjR6pbbd1LHj2k/31AArDpFEFTOFMcldPf2LnFrVG62cWNR3ED7QHPoOx7
qOaG62AGD4jv3jw1TsSoVS5Qt/ABTJRRzpRfSorugEP5/wT+fyz/nyWDo/vDY/prdA/4jJOoPPRe
Cc3qUoE93Tyks7bM59zNjxhoE5jOD9CIhi2I8KdLElA84iblTm5sC9ELFu/uWdSTeBZ1DM5YzYua
6Ods6WZSV8lBY3g6VzOUWLoz+vGN012mLq9dt86+dSOuV0lKTpzjfhaQlos2JMHDw89t9ByDNKSJ
zcDyGDie8GZEkriWXPXYiblVOeCfWrGyVFUrdfVRqM/goRVzwTh8QqKuC0bcbep2nw6BtBqSd2PU
uUOYryE6W2S3/LxxEzsUgdiRcqq3QjCsRZCMYYhn9PDYAVSGd66vxZfWvKWKXuu6Zx08LF8Emfx0
SaD2JToTnReXJSdTUuhVQEsfW9DduKNHvAgoODh4S8p8pFt1jgtV7fHJMDYhRiE5Ojb56umbgLXS
t1q8T6DqaI6WLWpIGY7c32m/a1Rsw7BUSWM56plwf8lqdhQxYB17Rx5HIU8HFbnS+mTQES3jXovk
oCNm2rSBrhFosaHQRS9uRzfSGbCDVd1oHfqmO1THqfiSyA81yXxZx1+UbvAC17aCitrVfaQFjvS2
bM5autLlTfvteju+3Zuzuw2qXb0caTeipWybFMkiLb6DdJE//M3w8e/ewEX+8FfjR49Gv/rdb3/9
yW/+W7SCXFh3nxgnnmHdCkslxaaeOjLJ3hMipIEukpDwJI8bBhEgcQqn/lrJ21ekBaS+2YPUWwes
mCi+9jlSjZrLsn1TZ6afvVAhd+iFAfKEuGAcNqTSgn8/DyM4FafI7ROVmz3DWK73f//D//rRRx/B
M2FarUdNcVriQN4f/vD//C8ffSQMb3PTk7+am6bXkwBIGjHdrpsa81Nv6kV14odI1jdck5RgtcRU
YUErQzX0+Yqd/emXEX6wxI1md8IF16fUF/3OuYEWFQGyCki/hJOruD0KILCuK4y7Xs0Wuzm5PZjE
8+SQKykDGr7958l8V6uckiCjLdwU0wwBuFHBRddtUjzWlGyx9tzwyS3bgJV7TptTDD4FulNtA+Wi
md9RGqFbw2yLqRhQMUROJfWaYVpAGKvXVwluwLxxsCRowLuQq/fTvtIV71rxCCJhr/30sEn7GRDd
LrTEpf30gxrl7J+xRjvAEroHpxUcMFGQ6ftvUzttmdDv4PpozFy4uOZnhvtERW1jg97abiFvMVHi
K64HD3NJbqvLDT/JHjx47HLCP5nSfuFh5b0wrVFWx8n95PoIB3SdDf80Pg6LNlIqHY1GlM3uiIeP
pQNC9ajPycocJb82Cre5gkvWdilJ/wZfUOM+MG+gJjlI3mDQHBzDy2JVLRYFDZMOMyE3mDxhhgmg
31IhfmfB4mDXg+sALNegl2hSAl5XXo87kk7GMnZQfnkMhD4J3Ih8tJSpyOLKfkRV0yl9PZ3aAa7Z
7bgvqhtSV6M3Ugs2S9fsWnBKIrP0etQDjSZP/uzosBnguc4Uc61WwpyPEX1AnM5gCx9eH15/Hs9u
oNYr1/0C/Rjfl5Fatjyp5nAwsu6UkuFxhjsjIrrGDjSUjB3ntiNNxf0DfaeT6p5WCl6XWxozgueJ
9DJ5/OlD69Y0+GA4ArooYeWxJq39UJQpCu6EUZl4K/iy+0qwjZVkkpzuVjO+I+FypehSA8RCAGMn
3DQFXQg+JuddQ+VJvTW3rPhYotZUTSWFY7Aqa1S8JFuMLZGnVU2w3CuKd/VS/8AtvqVlTpI3lLpG
xsfH/6pGmYtfaEVNr1GcJ96/D5SsomoUi2p7ozE8YUrzop4nj0e/TvDKdq7+A4otKq+syZAGSbGa
RoQiLdRk5mvD4plOCBvA/ZXVsNHfSA+IUcq/fmg/J/l3RQ1Z7/0//NATeQ4esLCo79Mf/t//+tFH
OIHlGm0ytOe82iQv4T7AblawCj/iZ4QGwe8wq48B72x6Jzew6yxgobYQuAmIk4pmEHVuMMswK/QZ
DPOiLi/KRa4+wtlelrDkq911Uu5GqED8HaHzGLES1W09BABCpin6eeYvn0+SwSf5Q8tleTdoIheS
lc2KJSjg8gRmFWRcx0zXRUNM95pYrtQArhvHiUNOo8tE8lxiQ1nPsJrWQWrhjtuQj+jMyX9pcAV8
FFEs99dwO9BXgy28RDJbUSAXh9YVeEHuEjx/NH7EBotpGtNzaZUB67pcFY+k/8TqekC3Kct0QQFX
w9N4AitUzOwsStviDDWtBg5PvphO7c5VKStzCpGmDQogZe48NxqduHBYJjuV8XQ6bbbV7OKGrxX/
kpeqRykcFiyBNqHEwVyekWUb92zA99bATFTBamS5aco2yISgC7NFiN4L3ylqeVOcDRDaS3afv8TZ
D/h0diPYRcCR9etR2R3EtD9QSMJt+qp2vOVmh/IfjFQy/Ix0TVcS03gQijZoHgbD2OrZHMn4q1kS
rfGRq1ZzuJcmohrG7cVLbWI30wtYCE9Kqj62eli4OojXxBK/41r/UpHJfaDcYFUD2QhVcj5wseIM
mBaVVcELZ5JxvYDIji5Oi5aC/Mb7nx3WeFCUrHU4/5yQvpi4KoEXhG7N4Z3EWFCqvwPxVGgrT4CM
2b8rtY5xOqbjmls/yCjhF6QC9YN9zOAngq/Lez9p9cAf4RpC+ob/t6+AO24QttK2LznLJ+Vys70R
+Lw77BSaz/ASw+x/iwIYEt2bK7VQMgv8cqBXT8ZprwoMV2aq6dOsC/xIiIC6Eq8xmkBRaX4E19gj
K7+BEZ/TIi9OTuq8mNXr1c0yL+ZzuMqbHBZgUW5zEK+K/CQ/ma/zk+osP4Eb+iI3Unx6gs4z73fw
ps5P1vObHFoCdrpdr/JZQcJPPkNZr85nIF3muCHwn4XdAnwkE0iO7pW41vN5PgfJYH66yudVDf9/
mc/h4zYvl/B/J+Xcrn1alYs58MP8dL3C/9TL/LTGRYSvzh/l54/z80/y80/z81/l57/Oz8tinuNC
201UeUVV8mp5llcrNIiB2JpfnMzzRXECI1mUZ0gLiyqn2SMbxThoq4llscmXRf1+V5Y5zGEH/9kW
+RLVTTjb1RqWZbXmwa/WPEC7/mrdzOpqs83lwECd9WbLy8I4dvkmB7k5f583uRS1qgMRYq1miY9m
IJ9VjkLkRYn/rGGkzfZmAR92J/D/m3yLfld29S3t3Haeo2KENnx7iqbD7Tn8H67YttpCjW2db7f5
Lt8t8uvlxiGCAg4k/oc3gRbzvM4R4XdeXuebAr7JmwIqXRY118uUMS9XtjwHb/U1jnjvq8nHDUYq
z5MbBgQakdtazFUG7ZrXxruFHtTDiO+be91iywbiqi6u3GGCzEoB80Vysr5mvcOsYJh0PPqFluhE
zqcS8jhi4xRrIOfiNbW4URFYbAS1NYzeUkDLKOB7diD+lgVIDJ5TOtXYfeTPJDGprS65CMF4oyeg
msco6RgRldTMFx69M1Saqg8WT/XSLTPYFXUz4Ubcnxjx35HK6HsaZMkuVohVg8425bbkRx8pfGk6
VlJYAUWbc8Jy/sPrSw0ZVUPqb9+RpiYUAec+YfW+niJexeYDQi7wi5c+Ir9OKJuYBUBnLhiQ8RDg
2EtlVMFJJ0Rs0upCgbTh0/MAj2JDF4wLBMcSJxY1EkG7Yo36Z59GJUfQuh9BM3t6ZuEGaOV8RQIp
9Lys1768HPbnonOpRiwtDr3YYrZEu53WhIrqfyfA7i72UCRHFmM65eMScXDSy4Reb6ZmL9LaYGWw
qzmKVOAhk1Pg5ifF7EIcUGqcDFwyF5REuLos5FBQ6oHicl3NaffPy8YgaaNAxymuzEj5mPIXytHI
4RoH5vSxwiA8lQNKY1cOlIyFxZws3sVVtGm3ZWFNmQtxH62oYhfm1JV62NlnPsoIjqSCi91GkY6L
ckmeJSd/irxp+OhhGWtwKGKqvORn0bHh951jC1E7izPhHsa3jngJxmz6Z+w53qaRU+Y04sTK+muB
A3TXAr7xlMxbfH/pVytNNvJ+CDQAFp+k9/dakJSnKAWgM8lAuoq9IZkk+m9X/eQ+SM9pAkLBPa/Z
zHv2xxwL9RDuT2zO3tYh9PTZYYOGUbQDqQHm5oFJqoGGli3LYvDstFfKdovFQkFENBARIHZDadcd
jaMLTtY1gwdqAmqBuxZmGF+YkOlhS/LGlnbvx5bF15urPUc1ILwf1quWbY/uxAPZCNV3nu2DwRE0
87lZEtOUXh0bm9LMJXawD7TSs7SK9uw4HUJLxzcWnZuRq6ZRBXwAyEUIl6ugLlWV1qx39NbXg9Gd
+vkIVchG2XRfgToO18m7t1C+StQlMAZ6QPAx2FMr4Y2Tlxc/N+1ayMfHFJE29dWQysVccSi7mRgY
6AiGLi5u01hIijA87nTohe67mUkkOyjcZ5F2oFRJgO/s+y7y9e0Eq+t5tyhn94wmTUyTw2aiQbhF
KUPNWGuuNypGzLbFR20Lg/03u4qehCytQQNwdF25MXRNx27E7RJJkD4bD2Tv3nKVrFH1uXUcwjWj
1uWkHV3fx7g8uCZu5J1H7yM9IPXaixrHjvBqoaIT40UwSP8RrhumYN0T1DfCks3N9NKGyUXxmTfA
uw9fcxnauhB+p67mwGlpjCLDlk0bOLN5IDi9y/351+qa01FYujT1Gow9ETNJi8grYquXEku/FJD8
ID2pSb9C6gVWCKBm5LxmVQkpVkiNEI/BTVkvQ6qF1NYdiHMDL9EdhlMkqPVKROuVnCRKfZGczNfJ
SXUGL4MEdVYJPdbnpyt4qiVUIDLCtEpgcgkNMrk4mSekOEreJw0mCdwkrKBJSEGDALxkEKouymhb
rLTBPUONeKKUMsl2m+wwT2Wqpg9kmx3/LJ5LVh8W7X4Gz+WykrGizQcnIHhS9lvkppT+3iycju92
JqUnJeIqoZwr3uWEtTTEtbQqaMrsvMVR138FceEg8HmHCbyzZIx//APqVf8xzXL88Jn+dqG/+1x/
d0bf+S39g/4diFAq9dHpSb7crJugmqdRmZ0D7Z1OgyAAaOgv6t635iOI2LaQNRUFm3rKk5t9iyWG
GzliBDpO4P3QYcXkKGQUXhJm6d5y8HQZyuoavwVX63bL9bbTNnh1Naue/KtNmOOXJf43cr21toS+
O9vT4W9TbYFWUPJpPAmn2YgRcIaBSonOS2uWotfThCX0CILn+8EP/9tHH3003dxg8CcIdCvWIb/P
fvi//o4M/8raX5I7CJ4PSh1OXQ9Fd+lm6eW2EstEv25sH1DlGHpxtttWi17POIqqP2U4PfHWRwjU
6Xa9IW+ygXUCSSy5aXQyNcwS6ISRI+sO4tikhiXUkuspln3gmGEJivVEMA9AkJ88ilv4oVDPAtqQ
tvgEgDQx2tygwSywjkNRF08h2vhSlENslX6xXl/sNrZJmmO6Loj/DNQiMRCbREOYHGOUvVhlDaMP
g4y8hlRp9WUmLkRKxhylPJsj1QE6Hh1dj+yMweZNS40cm6HB3k0lmsfePdUW5fcJdrin5PHHTT0z
Cl184Us5b/EwHxP5qDh2TqytR+2MF7Ud+ge4RefyDiMpk/DyZxfFmYvNvrkh2sd3u6rJz/T03mgT
Il9d4PFWe8OiudkhbisLntY0Yqh0TIRLOTus0cmOqIK8xsh60TSnV1n+VUEtfKpw4r/+FDquZhcL
GMqPim4J634i34/mu+WmUS3kibiqSZkf6UBId4L59zunBPcyohQ2ZeNjqEkhwrafE2sEgauZVVXq
TA9/73lzk/1oBhzaw1NTieo1fdhPZi8phJRV3ggxwnSDbdpWlQem+OKUOeaAPOxzRTYaJZEnHM5C
l+OzAPJ0wKimU9z96TQbAfOhVBlyrabsZ7YAoQofBQsYBZEf0+81xRTC1w5V07fwX2PV+qfX33z3
6umz1/8EN43ZoVjBZy/fvPpXLMbhRPYiQUHZKObZ02I+Z7skxyMqZ3y6slWcR42aA77E+/PyZHeG
gCvVmbj40A8j005/ONT3Uh/EVrLZTPoETA2fRSk+MR4DLBM020nfroevCZCRJ31cyb4pic7Fk77k
nTcLq64xro9eaCwugMSNENW0Tf3Mmfpsib58JdAL7BFPSc1ejYNIAX/AFSDpeGANUoe06q8s7+Yr
po9qPXpT1stqVSy+R82WHaYwLzfI3pOjFKgBndN4YJQAh/9yApwDb7nPksHj/DceF8NGlS6oD5On
HfSi5bdXIxKT++pQzNW6cfbYHUVKsNPf49Gvh/zXJ6NP7t/vd7nb6na/f/Lq5fOXfxgn8Q5QjeV3
kvSj78X+fEduoamaSiq5aoFj3YyS75ry9iaguiUVmeKU6nhbocZEk11dztmGaaRnRQk+B+m5D0cl
FuHG/aPc/69vGjjxz67hyhGRTYhuRESXZR4iEW5eLIGZTZAu09G/RAqLLtX/PbL76vCY4yT0LPmQ
dZNhhPHJeuGvmMrG7XIa7SajLPhS7M/paV2WP5ZTFbGTjhPvm5+UZOl+PTD2bfpXxcGb3JfiBctx
tbuG4WhhAWm2ZLDXsd6uyR6Kza6n3J/rFazzpJGJa4pHeopPGe4K88rqa4kK3m8tKcfdvcVU8rIe
P2iD1vkawqDZ8rS6nqSptwbPt7SlDemN6NnEk19TAidZkMa4M5yo+F3xTSCuKdKndKVdBxY3km/6
mYAI6B1vn1wy/NwhmSPF5UZsbQNmNlqVVxW6FsGcAgwvXRy9kEAcbP99Hf7oZ3A/dpYKZVP02ZSJ
ZkrBDlLQ2InuVqtN9yGVHU2n+MN0evTwOLe+FBcu9PJPeyGP1FmAjaiKAtBUuX5WWuAguZxfXCNa
W7WunMHKVQCZaqHRnmyj4fas+ellx6OTRKUJi4QymkdEX36DvlpqUe4ny44rwStKD7b39374L9Yr
1hACsBJNCO/v//DjH3RYIvoUl2hDV29O3LmmFwtaJH06otn1KLRb9By1EqvpEsHUF1uU6VGkBebG
RcMB4fqrihxFOC2vQcxaCab07znE5omqwJHiPYSc3J6DXHR2TuqWIBhHaZow8AKIujUk3PMWYdXK
9UwMm2LTYFCSjiAkKrBi5/MVeZ5z7/9SlaHPE37JD3H20+AT//w0eSqxVlaIJpaVrL1PB9dZwlIZ
ltrU6+sbHe5DMRksOJ8rF4bkmoI5ShQQd8Ui0Y0S1DdVl/P4lHO8UcgIH/CTHbpl3VNDuYfVnpKD
EEpazL4pRox43Um5WF9hZ9q7AkS6nXYeuuKwz4RCPXgUFAoajmfgzv4pZsFTy8CrTRjjPL1IS9ey
mNqMwpAO7D7Cc3V4MfeKcTHFbrvGENYZBc3UlM8FQxWouW84oe6GgjwwVmaBCumm3HLIh9UZtASl
kJAJE9x0gudBaNBeQwy4Ncsi+6W2j8hBob8xjIJujxeDonbYRQWYoh6IrAK2Za05a9YlpifcyukU
y0Iz291mIQu3FYgHfngADXGhi/IGyvGqwph/f6PeHAwjzx2hDsx0znmr+Ym6RkEIkWFOq5m738nV
OWZA1kNBJ1dacH+X5cSobKvGcarhDVYDKWr4lRK7lXPL2YcdBnhqFjGVuANfES4+Xb85HDoSvFio
5nxui/X6ghi/oVVuiMaPPejhT5IBXJA5SHDrRY53ZSYY+eipiAi7Ko0g6XvgN95W3QOKifEWK3S0
xgZztU+rhH4Qr1h1L8MaYbQXCOQUnwa0ZK/lUwn6wmWDI9tU87LGPEOUyNDyr1KnakGA1FsSVHiF
o+QlUs+8pvchkFex4nirgm8NWS2bU+V2FiV3s3NsQWx0HEKnSZDn+LqUPK+ya4nEr2JKb2oNncHo
VjZyJjt/Y0Oef6S90k7kwAq/Jb0gDY1W2onQmDfePYLJsjkEMKjt3RzqAE+clA3yk65EBfSnngtC
J4X10nRh+7txDrqyXg0CobsDalrYFNOCOh4DaC+qqe91xF+Rf4jtYGKx2YCPSwqJ8wo4NJz4G1om
5sB4dditAIfG8wU/7jbaXZi2KW2Q78s96Q9XGff0jsoXNCI96FR2Eu1H7OBizdDsTUeAjTRgOZ1a
LZCxxzSRM1ihsVh7QisLaaOqMbsw9vELG3Ll5YIwpWW9dncrlDelkkskyK2QENGNHH4PfHIk8fVT
KvjUtbPjWKXu05E6f8cR/xxopBW2QF51oa+EWT6PKPUKOmFVelDyqCxXu6X5dhCeSQ8RyJtbFJXS
cpoxE8753o28BUwZZEpWDSv87nxdzYznk0MpPo34gF1SN2prCafbc3FnMBM918/QQP4o2oqUsFG7
QrI6wHfECXom4ImkeBDk5VI31iw9LdMvUlk5PZAcePkeQV/KlHPYDA7rjJyFwulaTtT28cxEfeBR
x0zD52pvbsuX2xRE9jzueZ7ftvO3T3XUUhZ791kVe+ZbimbUgLry3kMpckCStH6KPBEMNsw0Tw8X
FWN+SkHZ8CZUsvNIvemt15kDlUkoYs1UXmFyusgJJVxzcaNR1cpLNEJGKxyQa/eQfLtxnQQbAUeB
1oNK3Pmb9ssOSyKmNbw6n+lKA3cz/fIjNDOI61v6GQ7v8zR27TGrvq3wbE0PVHkIW6N4Ct/8gTWE
63qAHWfIg/HrQRbnoLQ+I1qw2ToE3LBfxXcF22B9qryXvWA98aeUQXi8Vt7MNlkEX61rGbpG+pHm
bJ+c3eqOVED5NwlZfH8i+JokQcGxwi9eb5fbwZG9o8fZbSQBQ+3eZO5l/w2Wfb0uZ9O/ycbqRV8B
y7Q1Li1cMlTNDPw9zpxw+4HDdnSIpr7HcOWxnMM8xB0hSgOCLAyrf1hTMmws2wy0TvFssT6hL5CT
sz7Gdl0ISEJNjBYe+t7CO29gHS/uL/uFtyLK/6B3Hv2eU//PPdXuG8KZq8SH6K3Gp6752lmGCFab
FaDgupr8R16gjruQ3QBf6lllPDgu5kt17HWnjpTfVBfbJlqSk/wU7Xf1XodZiu41E2HHrmSixlNn
TrnoJEk5otRxOZV7rH0N15tQ7FYIJfJM47snpniXYMPZjnV7KLyds0OXETmaWCgdvUkdArJvub0j
6mgm/tLQt+Gd9Di6Ni172z9sEvq/fgskFC61rXn319fl9U5v6sz2p3hD1Kgcny7K0y12aH1FmbCx
e9307VBcruQRnMluPGbnf97YJjRjbvnDWqHpTHhtlDATwwlrZxJ3wwjrEtCsU0UDUgf4yWq+z+GF
YvseXEUCTRiZQwNQp3AVZm2KHnv3xHeeen8EygnDp92s105B1q73bj3BVuHICXZPb+TIpQMMvpCM
BmwItIef4Y9Zqrbqm3qfnfqm/p8b9VfZJFiWrj3qHaB+4zuGZzeGoMmkhxntiwXG/dM6k2WgUUpi
+GsDF09ZU2zJn8VqA6Iv0Br8b4x4FlubqeAfYmTHcs8xYdcWyw3+zSulPFt+MqgNFioEjfRJjZFd
MaoKKYtVCA58hE9f9nQm5s9sD+KJXO63UlBks0ynFhZFdPHu9r9uwrzbxWTG+GHXCv5hX05/+0ul
x2peIWt1eA1FZXIafl9FjsN+9P9kPhf6H/gyw/3gjs2sA/F6d9JWcdhZ8evdoq3ivc6KX1aXbRUf
dPe4bp3jYWfFbzGZUMtQ28ca5wO8R/8ujIAGHGUE+EsWlG1lBDTNeEu8AmHpuzAV68TeemCjbAcH
n+Yy4XY2snd7NANoUGZitffvyZdIaKZ9+vlCM8/sPxZ/s06KUWU9LRaLr3ar2V4vYCnrajvW69vV
OpZByFoqcT7CFrL05yov7nYr+qOY2G/Zf2c1iLhZRZgBw8PY5aJsoF02vixq/ObP9mE8XaVjboun
/1Nk/5zig9SRtQstaKOrwLgjgL5gffQ/lzdX63oekWUv+BckN1fjp1EU6Cccyx6h9aY1BwdFteOu
b+HlXW3lrzBJpSW3VuVwjjo6tBbiEvtZ8ermSKod0wTiUr8ab2vwrOzH/YkeBMjukSDp2MukaGfb
LUgaurP0sJkcNjkpIWWMuRpBtlfn3ILXQAvfV7ghmJxoGlKU/jp+QvTPWbzWHbcV66Wdm2lajmyq
tYb38BHWvm3RVaM61tBjG6iWa96yXvNbFmzesmLzD10y9BPqXrL53mv2QYtGlea3LFtcfzg4bLJQ
e8h81tYcYnhN5Cnt7grNw4IfyX1trGav/IcLP2ItQ9fdeJv2EORplyH9tQ2pomaiNbNsIUw+Amun
dfckO8RU97WeTGhOvUXYBV7zdvXnQyR3/Osn4jo1yJp5EjHosRD0B/F92kMGkqJ/GytA9AKm0sxN
NQhTt3nsg/TOd36xdyG7sOHh6+JG3C4N6gxjc5PzKntlLguMOJ9/ETNBDDTxqEnbOez602kfSawj
SZiqpU8l1WnLVB3I4K4tQHnM+XujPdu8/BxpFgaXcUlhoz9nEzjclZZuKlBd/SkiqFk93E9aKKbT
ziH0RnzR7iHP/ko09Tfx9QiYhhypQWgnctbMzgZdnXrEPFtjnJT2Y1eCb85e8AoZheJTSsINDk/6
IFWWPG+tctwUxlTqs6E4ho4jBvSQJLlmwDQ6bMY4DZ3gR2+nen8Ffd9xt3/Z7fbHquGvtULd+v3f
6aohjeKrcqjDkwqFRHu6QyQg5epDSkZl3aKooL0MXFRyH18jClyM3koUbeeUi95KBwg5unJSKBUN
gocz9uVn1rQtHyKXN1Djt7tZqTZRpOPxmJ97nZYtq4Nsj9uQ+UU8Futvrr3Re1+drfbceyi5z97/
fInkVhNWbBdHoxH+A5voc9eYB9wQ47qFuLYqXKKgOS5tnEpcJHsBjI6E9fLkfhWaTyzWw9AtHU5z
0MWxW77LTW4PFzloIeYhF2G+trvcv/PFKQT5ZdXMinovc7sU/Y9LkgEdKiR03PY9Jojl9pkdwygu
t13GaPo9WAH4MguKjbAnmT87aMOlta6301PZMNW3N1vqdhS4Qmr0CevLqIkdRUKMYBonfT7Ezvl1
FUheNZZ5May02c7XO4UoAK9tRAbAe5MkSyT2meWMXqrQVFdLxAAxhALUyGrb6ZqU7TzUofHXHN+K
oS30eWAlz1YFNjcjgrL/igpYtCaESv7k2HmKxCgILAr22eg6WWRznbIF4Sim3iV6NOpdhx9E9bzd
Z92ccynnRAnT90Gs79EjO92jR1WRfY5eu5Ye2dd2MAWUda0oQMdIU9heOR8nihpaqBVp/uDD/wcS
2JNvnycPEsqpm2zWIMQ08OWHN8iZxdVGarlXTIiMC0GLKHliBMGIESV8EhDCkjZS5P1pZtHEAUtd
/TPMQU9NIMSNwb9yde0yBopxZ1p+YxKC7UXvDg1KeKHFfn4OcamAM5++7kTTdhpQiewf0RIB0cwM
a7N2IXD6HvQHPhHmFBl+heGmFfxbYMgcCSTIycP3e596ZEdKjDcu5xVCxBDvQhiBbTKvGAAdmx8l
yevd2Rm+/dYr4H+R9hBrAJ+SwlGsMJD/j7m323LkuNLFaN+cNVjHx2MvL1/4KoWaHiDZqOyqJnWk
gQhKHLKp6SOJ7NXdknpcrIGygKyqnEIBaCTQVTUj3fsFfO3lB/Kl38DXfgbH/ovY8ZMJFMnxHJ0z
bBQQ/7Fjx4798+2LygyhEmEIfoRAAXNZHx/T3xNMKJT3U4eVJ0zRLIwPfNtcGb4zgw1y7NNjYTPK
nBnGbfEPinheSHrDl7Db0iiRLVMiY7KafrcXBAF/oQt0UeIRj96dNGhALlu6hhG8gDsuLEFY5Hyn
awVU+4vCvrZyyLpdC6r6PQYLhufZlE8caQdYE0e93Y+gozwg3gyOIOe9s73w1lRAJUvJs/lkg5fi
/chHdrm3e/cD73tM+ko3rE6CZVa1IeFIqgi87Hc6rSyWOzs5B+10P8s++0ycbuXSzluEAWiG4cGh
CeYz1f2W1O9j104gDIQqfNC8mGpa+zfwX21jOSQD7+l7T4/P++3Z6X/2EQ/hSxapQJr7/1m46L4T
UtfBvyF7Du9+TLRpkWkwOSQinllw9iOJTHcoJJfDOMbmpwqePPHzJ+7n62EiS/oA87oO2PkHZcC+
6Sb7GFqDYf2UWR//hlx3mMdfDi851ALqGSZ6EpS5pOaubF3I7PupLlHD71HbYP41X2LlE/8nxRue
P/3k6aeGvBarcgsNEBGanesj9/Hr3cu8XCmma56dIY3Vat1IlnIqYS6xUQYQmqej7Hn6Fxq87gpS
EZ9Bi2be5ziHT/2xDK6rxWI1OIPfkQquvV4HV7sbMoNf4yqY396P3v1PnF0WISqapripFwv4/P74
3f/231psHoe+szLEDpCxktsVoA4hNgs67N/Vy0+e9zNMSMvR6pgEm7OTDAY47MFyy3TpnRHuYEaw
P+p8vMQfAqsFUOF8BaMdruvQVwHw03g+s9s5HM1hf1s2N1A8e/Z19uzVy6+yJ3PDy6CyMHH/Ru7s
4NXrb7988ebN9O2L1797+c0Xb194eVoQwxDzAAG4IIl4Zmnmi0VxU5mrZPHJ8+LbdbV8RWMctnrG
RN2MOKFiBkPyw//S3TAw4baSvmhco+z49KD6Xy5WTfUPWIerholxk2u0IkLC1c1Of8rvwqAgQBnj
jvBeIRzNXOUocy333hfv/gNTqnmkvH/27v/4byxxIjmKemAe50WDzPJgvVofE7gtZzoWnDQHcFMC
5owzF4wC0qQMcNUtgDoQEks5QxrzQT32oomLsp+5PI+jd3gq4EgbdYuJT2mUptIy8DZqP0VO9AmS
6/bNSpuFHmczxM5D8YlWm0QAvwsBRu71TB1Il70FkNP3J+/+WmGAmWfDtrqol+9P3/11j4Csm93F
bU2it7nKgG8QRiFL1HD3QdQnfIa8WZk0AblePhjhqdCQ1uuHkSaF3YbfE4w43b/ebtfjZ88usJFi
WW2Jyu5vF5v1jGFys6dZ/xl984wVI9erO/0j/G1++iH4qlsGC+Wb3JCVB7Oq2hscH8uUBwou1VBM
peBSGX51gPCrgByLQKtSsRWPldEKJmcDelkBgy4Xi8F5CMTaAI4NlfkzoGjh2wKMwXYl440JcVgR
YRXEFSOZYKZNhIYfZR4mq+C+Vbdr0OKyXk+VR4UrJBqwUiZDt9J6WRLDy8jU6I+V7hcLTqUIokZP
bF+GUULn5ebha0DMH9w9HWjtrwOJXS92V/Xy1ki7V7Sp9MVwILtKm2rkIFd/tZhLfsTtpphu7wo/
TSIs1Laqpozl2ZrCWdrxigRB4PEsBSIUgg9zPSk3ElwI7t/bt93S7Zy/VS6ikb5HbFvVqxbrk6Nq
qupmeKLVxHjoF6srt9h+DYWd3NrqDO4rD393kSzo8BoBy6zmJIb4xzAHtMpXUBjYINwV/ZEaXq6w
HqlBw2rogcTPI/M3pIe5G0nrapaRlk0dHezh2HThNG2qix+PHmFRePvlLjob4HAG5/YFy2thtzsF
TfzJyMjm+pm1umV+CgD+fJzfGJZQbV5ZzDhfzFJ1zObVAKXQUo8vGfU1LjTVzensNx71yhJMm93t
rTnaw3BN3OzCX4oWxvITs8/ECvvh+6dntyZsTDoRRouoTxvUcTT68RsMAXdk2lTmypgYCuy/MXyY
NFnetYiUistiGG/fz7wXzgOhvQNza7pbgg+mxYWrlO59teI631uDe0JYhvogeWYW0wFOPOwPlwEo
dyjrc4hB+La5QiCQdbFYLa/ANQD9A5yqEf4CcPS6wvyM+Dec/MVq9n28o6g/ODVOuwP5lnGdTNsh
+nYbNDgpkOpVgKfMD/tiu5L1GW7vAvsS9H9XSAMOrjzw+qTnJ7x1g8vBsTq1X0leF9R7PJvjC0aT
0pMmOz7+nMnIrObI421GWnz+7r9nCR9VMKTce//Juz//3UcfEXbr9HIH+MBGcmYWcSV221Q2EwYV
gwQPN9Wy/pdKQcmSCxC8eKQpMgaDArA3Wz9MGVKW8wTAhdGzZCgpUczwe5aFwV/yy6uHL7+efvvN
b/9x+sWbt/CqgH+nX//2i1/32oRxW8L0eELflI1NEWqfN7gm8QunND3f7igxHAMaXiPQ7hVgRJJq
FTPqXG7KK8zb7u6wlbl1LhYAoElpJpHDbH1AbFkO8xgA9ZcdYyrFN6YaSYowFHnFOkz5VwU/oNmN
ahE7kCEZyTRAH0edcFCavozKYpKQEumB0qAkEdHgl/jQw7eJgcaqeCg44u3Ju9uBz7QKgZN/qsUh
qvZHiASX72n47L6gJcDcrLmf/OX8gM40hDFmpQ4TNrVOJ6WE1sh5OKy0E+fdNRwz2+K4VRdi2rGl
kKGzXWDcGVcUA1wkJ5JI3JqOutBThxSOdCBp/YRcJ/MqT+ywmcB8T4vSwtB+GQQp4lg536cqow5j
9d4eRYQ87VIbaIw9HsKEagXDP+CO9AkKGwltLGG3jIaS214PCMtC3QiEmWoEyhAq3Q4DkQfr5Tbf
M+8AbzTaeQr+MRRnRBjG/mFXpNO8zWfMJYAeQspH9IxeGAnNcGTiE4J0B6328w7wQ6AzqElQimRT
GfGAVmtv982t34oziBKBnXIerKn0wCncqQ9DZjrurbqzZjq+ifL4R8vh1fJic2PTWmRkttXccOhc
B3NQGVFNFbnT+K4jVGWz7AvtsovC0HJu5l3iTXixKJc3fO9sqtvVh2pe6PZ7OhiJJg8X3cgunJss
MS0sZ55BFSInqzTaatItbIqqPtVKY2oT2vqcf060aX5uZX1Q9Vi32L5ZeqfOxueHbla4U2sjciDu
O1MNWcAnA3MHl5dGTsBPzBMH2SD7OPu0JcmtkU7WD+JKGG+un4aFumFUE+xokN1tIL5ozjj0VoAJ
t5Zt9HZJ6G+VBfqSxBv+Gf/MDzsA9hrOhpLXHL+jixit67XmOecdR4eGxX8+zeRfGl7Am9sPEt8l
tDc/+jaAJpAGRk07HHfKykTfssE8vRuPZChnvLBPD1vW9oXB3MvsX8DLA3WXq1Fm3U08j6BgnZh/
O/BHc3Zn1xmnXnQ48/I/WhDsYnd7YQhsSIL0nJ4OJ/kBfMhl9+ZON+XyqhpG407CXOsznVwFaux7
LMVQ3RTkh8KLAYlzCendvGbr23IR8j9euk11BXoMbwXZ0KLXbZVeIpbehyfZZxMuaBiyZdh5ytKh
L2austqhHzOugrqKMYGItwvRmsErzW6Cf0lYMUfqu6W3ch4LapeXTbXVrgzRwYwvPWmDzh014afC
eXnJX4tDBKH0X+3A1lbKCZUoBywI9Og/4s02kK0LvF/cFYqeUhdN9X4Hm0gnlPbwuvxgZBxfbEU4
kUR3mNTgAt6m65oyll6Y328lU4UaBOfIaJrdrc0PgK86yAAFBgISjZNEcpS9e/cuu8XgNvDHgjmJ
TmADba03gCBj3sBstLUNqiaIaoGXNVm1nRXr9S+/Fx+j69YjAPpByCA/hLM3aFSyrtjq7YGP3yQR
wS8u8BVowjYyyq6r3ca8MSn7SJCvTOsFgNZsTobUYkePDJ2TZ3OwUbJ5MKRyP0XPBryRF+XtxbzM
7seikAEfrkHZoNmKnG+6stKHzbFRrtmhxcN7otF020KB+dFS2B3MO/sN63WEFBzJxKjSU3hOj7L+
Pah+YYLBG8Gfkq6UJzInXKzKzRx9lza7ZEhBWMd6DHaAZbTPW5Gcfmc023YYdFBaqBTV4SPFLg65
8XOQB5k0wZRvxE6kghhm5HJRXk2cplCyYm+m8ENcfG7uoGm9NM/Rejsx0r95HC0jV7PgaHGTc1Ky
cYYG9EeTYBTmwP7JWH4A5gkuB+aevaxndblwNYibzs1RX5QPET9kynqGAtB61dQSq4apS8whrgTv
uuvetJ1hIl2dc1B+yXVK35M8ehbb1WlHxXBFYgc8w1jdgP0Am+WIL+eJa6G4JN3wbDWVAY68X6mK
15CY/z97Mj/myJ+sL2GWgWKzI7NIWNR/vCXWNF4IFZnE7gSDJ834yfxzjEs0E2YJ5hA9VKqtTaab
s+uTatgFEqdOHbRm+Eg7q8JAKqUmd571tlc4kiM8exE7eoOcK8H0MZFau+MjXcTg7g2mnQbODjFB
zspO0jRwdrir2MuH7+xeYMtJKCnG1X1B6xRpes0vdDkn7UIkekg+1qz/sS0OC/lPgU40qjBEpgGe
7JaNGIHkogSlPM7IsKIK7G8IfWEPpg+BUd3h4qmlHTpnXukyT1SSkU7cJBOF7FG0nxOF2HuXPyUk
byzWrWYDgsn+1plDWrWHs1WsGcU3IqQgDBTgqddhi1UbvKY/n2SfpOARQIpcP3wyaHS+GqJ82JVh
To6YDaMuBanPXTugtyizdbX+5OQ56OxW5dzcRlNInwWZ6swFNNiyGN3RyJYuG6aZYyM2w6UADQBa
A4i1l+VNBSIb0E9Mt2axnM1r2J9SXCQ3N1031W6+4lyS/VirT+5hhSwE+7BdYHL0M6HR8xS0k6qt
vN/OoByUv41HWqg1CpkutwSbi5mjCsofpUcwPB1lJCawnJ0+Qo6uehCJtN5dLOoZBiQJHnd2uVui
q1XTO3KZ0GG1Iv6XkEuQtJuJrxhok0oCKUSZ+sQw6d7HzMvRQW5T3mkZRPmIQRJC87SG5zW9/TCT
2iKj7GMkbJhbeHXXZJC8fKPFs00N0W2BXojsiStKQKv69Loslw86tyxnj/TkpKYIM7aSKZWSKuCz
FVpSWn0LX4ChxW/eenk64JBywl9R0+zW+AIxJwruBVPhl6HseehV1vgbQwT0OPmmcW9EBvXv2RvV
UsGwpXMni9pPXvwJEK+NQGmIUYO1WNHPa63KxcQGKDlygliWE+y+EoWxQCv5/DQRkGEDcu25dI42
eKvv3Ixlez1RYkaRlhJ0gTHXMFrt/G1DcjrsWJcIohmwJJ83Q7Np6Jm4mq0QSS6JwegXDM7XvSYv
G04GfLlEEg4k7CXGNnj80V6zx6ceOgY2dIBTzHTkmoCEjo31F0giAr38tsXZZS3JnAJfLZkQbhd8
jkeOP6HGSMkJcfitqMzAOKcpmLqQaRArBi9UeM6E3DeYYSKkgH5ltm+HGOx4aqEk0jFM6ntItKMK
5RvJRqbVQ6F+GxVrbMMP9NtuIl5QlDArWS7R69rpRI4Y5jt15ExZc2XMvIPnr+B202xmfoq+lsV6
6fR7AVnZRvrf9f9+d3X1IMI5S1429We2W19t0Fo3EtZi+uNZfscsJCYmap/MzZ6rIPNZ/tm5zPY8
JaynfgsCynwFqo8bIJuTcFzgYErc0Op+bU7/trxohrFDF5ZY6BBHLZzGJ9NK66DjRjvIccLTK/Z8
iIVt29JJMFcIjoqNxzXrubknI8oHjjEEdkAOcVMr2HCdgxeOssdhgeARSvZR0Cilagw47GyDToEt
o4jJW4pCrWIpr5Y97g5eJciser+VLJle45z33fz/YUMBsdPcfK7c5yk6RLIquuDhVlP8phlKP7HW
BZvLPrdE0LTlHTDyxwtKQlsv1zuwtoC4N+9qT3Y20aSi6uaM6hxnp+ftFM7Ne0RONnnzzdmYCO68
qBvD9YErpn15VLdn/Aw/NxP7is9vVEdmIC9sXMOeLzGYralh8dnwRVtSJVabpMqv8E0mHAPtVPVy
B+IknYLVZaKSNSPQFjfZsC6qQn3Nuon8oBk0Z/W5x2+HIcN1no7FW/iAX6rp2Fv9KAP0e5DN2XBT
QRQ8zLCpzABfFFeovCyX3BUa6CDSHu48dpqwQ2RvJBqiT0DnHjeW7+0tNbWmNMRxtc2AXABxtOj0
NnYulhem95l1sqS/CM6bZ8U3PAUdwwv5hId6v7WXv5W3iJ+cjgMRZgq9kjcdfXF86hQJMiogP3yk
gCnRLCD7a2zV49RcTuaWmmIJUNKp0Q5VT7FqjvLmurqfE68PFiTZjef9QelRxSRq65/pGuei2DHH
mA6m37W+CLp7057H1KQZ9wGdKjPZ/ZaRV9K1eu3yv7fBydpPT3WvkRXKWoTHvaQYjORhNlzaVt14
KDD8m6Ft+Wso80oisEKJhGOQWJulrqXEkZpqwp9oArZ2Z42G75K+4ey4ZC3muPPDammhrTO+pJF0
wLTv62AZ6ITONYSvaSt50pFN2cD58LsFgsVJO1YvnHQhfS4x9A0Xb5T1L+Hub/jvYkp/mu9p7OZ7
CUPm8vb7k9zSASe/JSuNecRB5AiY3MA7HP69WM0f4F+yDW+gt/5qA/JUH0ewLBdYxO0jw3v6fXMX
pIryiAGLK3VHJ0I74HpmCT6AyyW8mBJ/Rw6/ErBFjchiJBsSUHLbULTnln7NKiw0m1+4V0iHpwS/
WwIfEwRkgoYDKZy/jcVwuIjhQmYDq1KodEQtPR/9LA/tGdTMUzKItHMaGYgNCxhyVbOe1Lu11o6y
05Pnn+ZwL8EHpLMv3rwNec8fIIQjZQDvdjtZLebti5lUbNJ7NDinYS9dV7I+tLwOrCE50lrn2Wqz
gWsatESmugBQHMPaHYtxBmVBNHggoqYtA8dKI23OV3gsWBneVKY+wPcoDgi1wHcY/IwgScuF6fV2
vWXRxuKnLROw/TRrfD6ph3je87wxffnAgoH7/pjRA06wkD0nEvvyIubqQSMfJXTsUk4ytPQB463f
R3/pPj7o+o+rg8xq8TDu7/McjV+L7jUpLklEAaMUb38MrXrOZT/MseyHOJWRgcpIvE4WoIvaMG7o
1JsR6CaA0C7B19RXX5MCenttiG2xuqpnQHWr5eIBHI/mABxAmo/nxad46V5Ui9UdVzwtUKNF6tUt
+zbxH9S5E4VBY7NaW8gMNkPAcSr5ZBOwIce9+L6CIEccn/pO83Y/knoMj+bJrzmwyqKf2ArsB+5R
BB5baBHfGaLDyH6A75pd4xKiz/asDCxTECSJWWYHoh8gL1+w4AVfSdyR/foQR/xFfQNmiDKTlPX9
SOnC+C19m/tXeuxjc973cZf8MvTA3q2tOVrFMW3JU2XkPsre7+rZjWF55j/omwZMr7Jmbeu3V7Ga
xHPMPQppwby3h3QM2EZuSJAv8T64PYLaDVQ3TR6NuCABengyygZwsO/v780L3QuLf3AA+N8tJeWC
1M8D7zn7v3/K8Lnv2yUTPgb+RGxvkVZ+qKzeo+xbc91fGjrkP939mrjzcafUMJ+rI1iR3iQ+gHK6
8E7xz9bT05G6T/Knp3Eka5hUIuFtb4N3uXCh3f264mm8q52IXzsEGOEZgO/quaijiZmxyzwS05M5
+sYIq0BEx/ef2sBMw9KKuxLlv/c/fff//pXCB/KwN74CBw2yLP2Rig/jr/JxWwwhR4cqg1QQREjB
uOa//tdsnfAME9b2j49DJThQv+BK0u6Shv40EvpsuxjpRmHLZUz5ga5uUp4uzGm5rmFR6cZjYZUX
wXw1u0HUmsnzkbWAaOn4iO4QuGduV83WMLcvv3318sVX7Awjt80z3ramWD8MGhwj9ikC3K8r0mqB
C4eKL0+gDrohhfFde1HWCH5HCxp4g1qA99i7C40zqsscAvCPw6AZrJ3wUNKthqdrturuZwL9jFPa
TBxzWlPpGkDB0vx7aNbttJMXr46Hlu8WMvJAkw2QeUMcfJdzqh4ttUQHC2K1h2bHDYsQCw15Hw2F
KC2dsrQMpBQSrPsIZ8TSrnxgK0yy6mma1mHx+ev0g1l5xbWYdV3vkeUyYROzpYlWfKOmv6Dx/UVO
mOFxUDR26FPwarG6MEKyNMWIHZGchpPokuFdOzQ2QyH8VbotWypQOIGQZvOmwBXIrQSYVIKYyL+e
uTpdoZa2Wv8zUo9/3k9tLzdKkd9TPMemYQsLFDthXi4XaBTn844hCf6DzZRQeWcMn5xhtpr4+0hn
osYlH/1kUah+8dv5GyA3kkD3NuZydNpamOHKsPIW4GdZRcwUQ1CQ/QP8B1TfQGrl5urD2cn590sm
VN1eVPN5NXd4rYZjaOc26QHuzyq72F1lRz//5O9Of3raNSyHbNk7yO9WVVWudCIopF2eLUsDeXuS
EmZiAaWnXEnsVQsfEN+1ntXbIX8Nlu1tdbXaPEy4uVFE4BPQZnF5HGI+CvjVRH5lSU2JGFe1qf0w
CQcjfKNoqi1HcPjAg4ZSpLA0YqhmlP3rX2h2hmG//8/v/juN0QH/ef+zd391kwCKBHEQZRBmwfJY
/fLb6R++eP3F61+/GfHn37z4xz9++/qrN70eO1ugVEboLpteD/4LcEITzx3DjI4aHA6gAJwHLmjO
RG+vZycBgAhYjIyN5J6ptXpN4emu8A5ZpuLZT9cPzw9sQpK5sqOFj95BsasbEcRfkW+EdpFzkIix
sMx3VR4Bv4u6lwuMALvaCjz9KDTuLvDGUtegdLEnzZ06a1xDi1ip8vZCDau4H44DGYeSNES9eD0c
wKMivGdYsVIv+phAnu3y5WFGi/LODeFABAYt/2N9gVswf+gmltW+JiQu3GEnYIlfUSjb9sHFaBvu
0RFMXxIv5JhChNxHRdnKc/MEXSa4nJbmFLtHMClcET1lhfmGVhjLMtvuQMtodqHGHFrI1fLCiz9Z
R9xX1kSTjNYgUPgeXR+QHQBVbOhvrWP+Bc2Fo1YCIteBgNDGdbkBpyNQCuE7/66qN3PS2wX16qUo
ApbzEsoYfnTmnI5367nh6fj53IyxavzseGsJNAjmF27qum0TQYPGXledW8lH940XKCq+nJTFin5C
9EOJFtKvI71JHrcT/sahiLaMaRT8WKx2S3nTyZSjiEaopPAefujE1KE1T1uJkl2qOEY/GtTznSPt
LDVVbm1Mqbcosj3eKNT0dEg1OJjx0QWszo7Q6VLU2oheAVkDNlc7VF8DbTRJ3+pIWWr6GMBbCBy3
MMDT9LeykOnXFbfFW21K14R0G9hpJbWxDKGhDAscZtOyjmARJaKyDvKE/IynHhZCqdfv1BlwltLN
FYZTESOFoyHf6BNtRj0OE9tStae2HjpIZ3+rpIvH1bAySLDnXNgMARfybCwtncudTlDx4aX+R/9O
L+VWpye6gHOZpxUwvwb1y/5rrBbzhcsw0YjdAbzxd+BHX7SDDieyvXuaMD9izrtm6XKeuueilA1f
i1yQ56DaxC9S1yWW8H9IOYFrxU3exhR9CIfgjDlbEF4UNkTRHPEgDtIOonBcNh3MmGYD/X4SciFs
sohAJwp5OqRS1ZLExsmB/anJ7mcDKDSQu4mW1kdKgdoDtMMQ/mO5yMr5vOaPuE2WJzS9xGQbnUSQ
mQj3r5PDebeGowePPgrAFtGQkPI9XZ5BXnZeSVwRWgufKke2vl4/TMe2fwFNse+zeL/oWrx/u6VQ
rx7JN9e1HplaEJsxc5R5DCq6iQZNeWlWYwggDzZex1RaVscEtsJ5T/BhBlx+wPdtct7uoNSrAlrG
cfAINIoB5aRqGSC3om++73HHoksWUC7ctU02JCccyqxCgXrmArPX3qEXbL2cLXbzKrpU5SINL59D
L9UNYnr6aIM4yM2VDWSU1xmugun8oPSZ2K649QxN1ZFPkpD//TxPRon8pnroCBOx9t+arqfVzFAP
TXSHBstXzYOK1ZRrFccjV+hbeURj4q8UvCW0hP4YILbXKDvZh7e9A8mY02wfUH9oHT0lMaMF02x/
SGP7EVy//EBSCn5suVTl52J7oS9Wd2VVWzVIsXb5r3f20btFmRuiqFC6Ai8OALrt5+Hg9Jxv7VM0
8YYg9XPSIOWnvfFnDXPRiZziljfVAiFeO4xddjGI6PB6jyKFDjbLfeYTDIe9Wxudaj5hrvt+8sSe
F8hus8FwdCt1eDhO1uQbTt5KB2mktX0yw+GvfXzkm/d5GL+Z4NqpJfS4LzGMrt1OyIH2IpQhD21L
Ep1L/+1zScOyjeBcbRblA0rKG1Ra6atN5QGLJvza/Ur3O2EDqg0CZi0yTgnRiwg9Hdk+5VBerDDr
nBEfYfj4Ozm4mL+gBsvioVxpeU9SoIxRZVrSnsn/7pWgPPVWgN2q3PK35UILNCVdOdFadWWPy43m
8SvFjO+TtCMFPJKzXCKtKzgCr5b/Qg8sEveVApHfTMenmbn9SjimSjEgiUvK+2EHZwJ/YR83zQ0D
XVZR89OBD2PJzx29InhM3zlsKrhWkRRt03B0YbfQFwj81JrtdlMpqfMItQi1DtjAVZbsh2mdTiGv
HUHTTrjsfi8u5qFMtT6kvNBXIzhR6l72r/V+JBdgWopWT2lqIcm+YizX9mZ8N1/pFg2NCCfr+2+L
473A84WUmu/1JcYAsS50OcX3rf9kJ+Zx2//Et1qcdw/HZXEIhHyonnYamg9cYoJqAOhdt/PpizDC
L7TEqK8TcQ9TD4zrej4XMMBOUDR9i7OOBrJBgXBvxBErbJr2qsD0bqT51eIDK80d4JWzZ6NTDgac
CebCrqkOw05rv1TBmh6MSpvVuyT3tgdC1JMy3Ld3ddBDoQ3LOekklQ7vt1ckmkOjlOoPBfo3FokB
YPXBL3/5S/dYFRt3wCs8nXw0DPGFTdzVkf/39wdeS/bZN6OPBLV+lkGuInPfGukXbgvIGZNJ5hjE
kwr81JY20as6JMv2Z0G4PqwMtcskrjZyBl2Ai5XmuHgozkFz/Tx6/w1VZAscUvuDNYRWy1m5bnYA
UYIi3Orystpk1/XVNRhyPI8rSs5Gp1Ka4YQgvs/2C37c+a+Ktmfi9iLgJPBrXS7qf6nodiUHc45w
3wYzKEL1o1hptxejbGCeWhDgFSZAoXydhj0lhDPygIM3dye7dZHSuKn00IYWkyWhuQn8t+ARBUTZ
bItQ2QzpgruQEFOVIMOwA/jbCRwqnPCJQ+xZrvgPJZHxN+bIgBrGVmhTAjkCEGmXjO6IE2Y97WtF
iT4Tv3gAIv9ACTWWD+CbfFEvBcrU8wQhVb6WHUM0YIJTpUtmy9GqjEl6fAHgZixSK9eCRsGr9AIo
QFBqE26PWUHJUcidyNySK2BkmiIKiqbXz5Z0fWUDr5whgqWT+GqGvFviPVaRbfei3jZkOZtXAcYv
2rUwag4KivJY5NNndnp5WneqwbtC304Uze4Va4p+bwUqkegcBLzR8Y6uFnkQ55GRDP43VCSna2uH
C2kJXVIW29UgjTwDVaRogQV143lL/0xlqmuHzjax+D3JqvpV5NVPv3owha77M2/3PHf8+97pjZJW
kcMTOZAaFGNBhrZ9hlgudGWo1pX+AeI4h5jdYHiSh5PgXjykx6H5Mho8aistL64XmMqJON9y4rBp
YU/vCyt35o9hSBgQY+5RQOiCudksBb1QnIEj7zxh8fSB+Ov0pKJxHumuXpo2w2hu7HqEPItBs7Rc
jFpcOLUwHClg3tIl+PX5bIhUS2qjUNajFsyiROyUPdCIf9E8kY/5zbzEZWBwso2RyXFZvbaG4Dzo
Fhw0NR+qvMss4aiV91Ekpdx/5M82ZXONpNzxfoCocFR+0AAU0yaVMW4OZKDoBUvlHurIn13eslZu
qPAqMILLopxH8Vz0ZiOhBuHf6/OU7ofUu27tWs+3r/LWuPLg5KpUcmg4Wi0x5L5j0ZCkEEOAbwVk
QM+8bQfS2VSuTR+7ZlNfYVbjBlUDa5BAN7X5m+ROjtyzdckosVE0q9eW1AqT7F//4i/3yJkbALcf
oVnCSbF30Nxz1kBbN0hjVTWnmNi71eaGXQEScXMNRxVCflczkyuzGLeU5w9/BlBGRLFBryNW4Kzr
oCE6JAIh0/jug0iF6HRr7qjrZ2j9yqr3OyO1bn207yPwkIKBAzeBkL2EioXoJtKy1/Nh9Au4x/A6
pnBDJapc/Lg4T6BsWwlmD9OnL0iiZzqoZJQjK3L6s/NIxblI+Dj7M4iD31eYfj12Y9DEgS53UBIC
AdPSNnh8F2LivCzYkj3FVW/X34Dpg6ePk+RBTE8n5sPjqz2fyEjzjkRZfCx9krLWQr2pzuie0uX1
ZH4qPn7QOiOQL1rHPUjOdfBLRHs3Szmwj0cLVP4SAIOTzrVNACpsz4V9QTZeymPc4mX5ob4qrUAd
MGibGQ9f/ox8Oxi0Php3a/tkIf12+F6p0USd1pduMY+0P4cYoti3NISgmaaNs9PzUfaFBPqmYiID
c6kgWEjdwW1zNQg1oB1jSFOc6qCxjXe3B3ORP3TY/ICNpYMW4ibBztsif/7jbBDYU80K8+DMwJx+
fRxc2kh7flUQNKmajpmIasqO+JWJJVPt047acLVYUgz6v+D6zzvq4yCXkRMWfK2VYvB3IfE6Ha1Z
aWcoZqlYsg0s1q6Os2U5kYxnkj/CGOwxgOyJue0ujGg0IYtwNvTm55J5uXHkngfRDPxB5dRuHtD4
3uVh4i8I6ozJDuaLvCgID6TBAWuNq0aUxiSDB5TSBHlUrOnPJ+IR+bCiCyk6X6s2WAMQpQ9QFCVg
6qV0ajUQw2aVODCmSKCkxlcKh1SYti8qI5xhhG9S3pJw+GSkhNuukToYyooinJZizYNAc/4V/rE4
mr0gpBrWPzJXYA3FWALGkWAvqqszS1O6RpSICkHRHaVtNjqoFunNLMV+K4VPH46fgCxtfku9eIKO
aE2pO3XOIODD0AI+e8LDxudC2IMcE3tAfPuyfnu4c1b4byqdRl2pzxLiZeA13yJP8lxfmymAWvy3
jGzsARuDEpzH7r/1lKMcZDQmKUQwidC7ZkLuN75YUl4gXK0U3F7Qi3JCJxpe6OCk1cU+4AbM0Xss
OIeikfQNur7Czg11jJ/FywIcj4OHaBD4ayY0zmBCf8b9+/Ny9WdQZ35Qcg6V8hkHz28MD/FKlOkA
MAFPtchKBAS6RcII1A5Gfm+QXmllJ9S1Wz/RR/DkoJ36arnatCVsrLklkCgH1NggaUgD4vDlRfjG
3j7fYNVhG1y9o+SQdQ3bpQ9kZe0/01HsqC59GpnC6dbiCnnO/NI/xrla+8tbYFhfk4K3mr8gQWeo
6N19FKLH/6Zpnv9VVC8fFOXLhzgE5HaLGdlF3uLkSPtsg2nOcqZjben9FrEB4hk0Ys0/wJQ98rla
nsjhthnCG0qPb7esgTv9VzNGHg+PU4ITwt2O3kfMdEjfYH2f2OFJ/Fa+FuhtPJm/dlkLFO4RakBC
QxtFTS5Wd9PbckOZt/oc0A1tq29ftAcy7OHIliKJ6x7KhMmG65jMRPUTFGLvSp8hsqTKHGti+w2i
I7h7kHf4o1+ABw8eEPQp+FncXlA/5V3WXgrBJkw9bjakvtqB8/puy4hBFJ6jckZ0xHN6iHq+H6IC
uDk+zX98m3fSP8EfEBymNl/lrs7jAbQNQh2sk/CkfWrOI60CoEjiB+sBkAcSFR5jz10s8jEWVxY6
8b53md3/PO1blTb4BhhDKlfdvoy+U+utb53yk97CgV9/EHIwVQ764XTFM1JOQxTZ1fje78oDnoQ/
gAzAx3K99ERF5REfJkHUrWsfeGqQJXeZjrSf53mbCAHs8Avoi9HklSA59fZaw/MTEO3kmERQqwIa
ZXufmJfCxDmLi2GZ82y3tjk8EKwr+cRS67jHJc85VAXRTeB5Eieht7jGJ70A55t/+SyEDla/PdWg
i0o7AKBgU3MfXdbQch9XwRt//HTzULK9Bp76cGuue5s9u8tbIc69IEnCVENpgPS24ZDbTXtpPBTq
pnyKK9DvmEh+2JBdjaen4x80aFTJzUTD14JXS1fNxHBJ2TJG9RTpVO6tYPvdUBjFVlwqpyLzcN82
lzX9M8ITUS7YyTmPMxcsJbZNzqiv5Qmb/dS1GB7K1LmS+fbN//s4CyDpjzB/BcXQeXoZLWuZG1pd
iNIHD6vlrV0060W9HfrpPklc4/EQQSkh6ykP7ux0fJ5Mx8B9d5ww1UGKoNmuyKuXML3o8QUrld4t
vE1UwMGIn7iJC0U9htOZ4c5DUxblXDGiOC4CG3P4xXkJnwAy5CdmZ3/Vj+sWDWCPxEdQcKShTLwC
ouZlJ05MUZbSfJMuFuFuOPavmU4H7ekRXDZBXcF09Jn89XkCYDaNOkZ0+xb96J030AyN/yXlayYs
bEn43tIC6m0hjw0/HkZsoxREVIZrKeBGNSvW0sq8bq52NQr9yHU+VBvwv1qidAuKk6IFiW0jKDLB
/R4oFL3eYNvhZuLKubnGfnYi7j1Ki9bxaj/q8sAeEuQ/xiKOMsDjyfNDNvXJ8ekJUCsBEbLYIoNs
mUvX5lprB8ZtSfPffYcKc2y+rVULo9H+M2tL1mjb5X94xWDQVXk7kacsMLi7TW1k9VZZ67d0+MN0
wMgY7HNz6tw0WOL0hayUuG3vIy2iB55L/+ZCU7xJR+4dEMUSuFiWkfLGOYHol39Gd9P2Lj11wjEG
vLT34+JdWhiovIipCRWDGKvk0gsnL2r7vE5UTMQ+jkP1J8VeiO6PiwektN7wq4JwCOz7hPZaLi0l
qUQpKxsV4GEF/Ei0V5IEXdT43y4BJ074FEu4GENQNU15hY7g6OYNTICWPk5jmT72rgUR4MikSiKG
tf4ZTtfvBykwUDdBh+W2vEHfRKWoyiO0NXO1MeNtUcVrUgSFPI8tWCg89txQnDlb7dk45cHh6qpd
ZzmC+w/8GEItMTIUzgwmNDRSTY/0ZFkjlZKUxz9AwP00lmajsWlve4H4x6G4N73dNudeHaOFaWXU
AfqYZYzglBcXFfD3BfYVEweraL590x0kkvLWxQQ4a7iO+Xqm5vNUSDrS63LdSzXbcn34DwEvFMXZ
qu1hiazvIoFa80iwfK4NZUEg10O9v9p5LhEAZ98DnuHw9W4JmX9TzhyAZW74fH27u1U+VXOzB9e4
F+Cc1sfXollPaZ2ko9TmBMNzUwnc/nIvY44u6anAxXjk3SvmYGxKjnnx5k/hU8RkY6c8Z8oPfCDZ
k2NII+H7Kte8L0rBKXeGp8VPShsiZyDNRL6XtRPfbHMRv+fFnATLm3LKcQvT/8lPfmL4gNvQbTXb
orUeWDg/YP42W68axCrJ+3tSAyrO4vwweAoj17M1Cdk7OxTHtCkndZqgkD4NiaUVOsy9SmjLUkcw
sHH19lr5vJ5Hrk0XgYTBFeUCqo73GaEa5zLujECdWWfgIflZmFQcmymqJVptBrvt5fHPB7GC9iCT
01H29T++JHsxwnAsFuKPsX4AJ87je/OMQnC0aoOhStbGbDUSPQ12Z3UT7nFVM0RyvSreGpp4+a0O
bb1Tv9FC/hGlesRUndSr0AVoteViw22MDmdauUJ4kJ0RRW0Y/OH+QE8a5xNRbrMnJ/cOGsI6+aO7
qjh9MxHEdKPJoh2uvpW6AmNTSP/e33FRoXr7ObBKVWySQw5qx1bO5/wLj45eizZNUVOtJ/3jfmQI
49asejyupk0bagfZVequc7ZtWy4aE78nC3Ujowpu1Dvzw9p0vKbZ5eGv+KrlBnO9u46/JXaW3oEV
CDb9adZujhR2nmSPqVVwV536qxfAIjj+bj93WSEP3IkjTmVBAyDATvQDn9fg9kqYJQ8avwquyhhD
Z79TvJ5pLIiQzO0eavicSzhV8d71+/GF+tBGQ3KXinQYDgYgeBICLGRtjS55rnNW65Qs++YBMsR3
KX1IWJqepyh+uO73r4pH80AsTKjqje7wRph2xskz4ZdJHw12gvG+a+d924swWaxHrH3yxul3HYoz
+6bhrqXN8/Yzwu7mjm2r6omjHQ3nkdMRFQj/fOjpM8uO2i1Dz+Su6BrL82j4qYGTNHcAU1IvVbGs
0l/iz5N4rbZM0iVO8zZMeTLoP+OC1p/C/ZFojYbDIPc8sB+B4VlrQHDoEnDVahhdrMWzq9l1Sr2Q
5pLPWyd2ewFGvLyV09HVcbFazNlfxTQzMf8XZNxsY4wk9ESz1xvUNnP+uetm3jftw6d8+HT1FFLm
nCPNCe3xGGV9UhS39BuuW9BF2yJ460mkEufi7qCJvd230N+BeYT6gPVA/4eae8hdGTGavVgxwVoc
Xp4H73EyT993mPisHXSdNnCcTO1kVT/78zvZH53ukz8dLjxZRHvJWW46We22a4Y6hmTqWeCQecSw
h+VSlTSPKYrBAzSYrJrX4DyX7cBJCSHFHaQ65t/Rw3a0SBmvKFMR7LQf9UyJkYJcKpIm62ysIkAt
UQJsXtOd3QrTUPl3FRtkDtvbztv0UXfpIfxIcxmfKsVh57BRo1tPMGi++vDKe+RdFHNhMrZcluj5
2E/akBmj1E43bgSl2IbTNfm2wRYbp2hSjUz8FD/ZccAXz7PPYQUBzuuunoda4MDNB2u1RxTqnaAO
2q2cvA5mLo8wUB82DNc+ZPYdoTdNopvursKBBg10j2TPQugLwvzP3Itir2eHTwZ1nF1bs/7QpU6l
K5QjLbXTGzr27mxiUGRffhxPZvvZrhibfNDYL503WTo1aDIzgC3X06G6ako6YDcIKxr40bs29lRh
H9tWggjGFE5yS1mEGeMvgp9ksrxi44PmwIUfM3iu0j1q2TZnWOZviB52y+9HERSodRBR4AYzZP2h
RHHQ+qulPAtp4LxYryRgK7UV3UvltSw7I01KZmLJaLO6+GcM75tZv7EoAbEKoFeO0+LM4hbDM485
zGpQNa4sWNyejDSmvAq3xcEN6tspdDYgJ9zOolAOezuo8MElZQZhWQqkmyFwNqgyFPQNVfRT+Axi
dZBf3PRj2oL0bba9IHtf3eBdPvTdkeV/9zRytbeFNOk2mVz5UleiDOZ+78iDXb5PbHuv9/7n7/7j
Rx99NCXNe7FcNdX7v3v3//zVRx8hce2WCKuQNbsaYBpAP7CtlnTtm6JEVzqHVi/KrEUtU3It7kZ4
8G4JzZkfLcVPm5t67SKVGoH5wK9xVSk3cSXObDASSpU2FV+24UDahfB7+fwc/oAx62UyNTlUndqg
DH2qvTwFfmZ+N229MUN6C52Efh5qqEU5n0PxQgrnOik5OlYNdfmcV4KWaboBmdv8CyZx84QBF77p
9NYw81roBaBuRmhCcUkn4a9CO07oLwKDbHLJvUfFokJLzO6i2dbb3baSqE5sHVGASh5uAQ1lWgcL
HT/n1IwKsYgAiNLEqppCy7k3dvZ0pwkXEKSce30VLhweu5a/zar+ihsGb88Ccj4DWGxiqQ1l7dZD
WFa3oHUzXa9AO1+XiymQEJTUZWIsJ/ixIPzekczJBiIFBHNVAeSjqpFyHhHaM4WBB8EozEccberc
I31IFg2oVBDz4gopxuJRxTIc/EsJj09vW9wf1d/TLRT05hKmj+XZ+83jUnkNh05pyO05qySljKXW
KSh5w8Eocl9SidCsnuizc06IRUNGzdv1ApTFd5yuCUezrcrNfHW3pNh6Gg4A7SiYH+yjoew5BVEi
xjEBF7mEZ1v9L9VmKMfHtogLSOSYE0dgCSJR4NE03bn60kG4AfuWztXraWdWoXHv8LTTOnl3Jvry
arR0R2/ihT52fj2fEwMHNvLFYmHoZcqsmP+059k/Q/bXzvPvj97WCUiMhrJ3y47IGEd55gTPiRIj
30EyOtxM/AcCHFDljD08kwXiVsBNo97AyZDLk0vzLdVLB+vTXcSTlRhMEpKcvSlwTaY60k0BV+SX
ZVN97bIu49T9ZcLFWbp0UbeVkfznoUDo4F1ZRm8u6/stxBhO3E2ONc1LmwUTLiFic99K99yFiiOx
wp9rF75RUj8MGly+uRePcl8yIxo0iNC53qxMwVvGxgxhS1aMaW+IB3Bjt9el9n9oQOsHoM0YdM4A
QA46jzqP3U6Qzb4fv/trzpC6WF1ZwNP3v3j3Xz4iCdB8fQVtfvHqpRGszDjnu1mFmYbMZI0AumzA
rGr+Nh+X5NnzoS4lsUyTk4D4/rN3/4POxGo7mrwruCNGoGUZGiA38UnDEGGwDjAEaO395+/+Ezfm
/Pne//Ld//5nbKlHuY4I8tJwYgwDZfcQ+JkEUfNagT2DsH+SRUGUmdqw0R4V4xzzgGTGxfib23JZ
XlUbJfmaVwfasEaZaeK+NpyrXilh2OogZFzU2O3takm/Qdfy/Zvpyze//eY3I/zw1cvX9OH1i1/z
sFZNoZuxse1m/28Z3awxX44oqWUDf5oDPUJSBVf+ujEvhhsjqzd39fKT5ywCrxflFrS/aIfCH9BW
PLSnqkFubI7VgAETc4Iw2LbksM0+z4afjE6U5vK2XE/LZgpxBZitHK+rRL5QLGAK60K5ymyr2gGV
cbkW9ekbM1Q/atyid8IcrM506Seb425lpuR8s2pgTyibmGER5hZAhZzLLdNqHdA1W0wDcey01x9o
CNSfbQlUDM9mZM9QEsX9SyWR/Wa1fSmnoppzigwQXWjH1YOHafduHmEFWPxyvGmqOT5fh6YkPJ/W
d7t6zmp58ylOkgaNAOZUy5yuNqvdugOOEX/PBKYbCDpCqf4Rp3+1WR84fVMSpn+1ubLTv9o7fbrX
zeFssZkxA0hQZAFSiUs4SA3BanS0ZDjIoS0Bg4g8CW2gHvFdaCK+WJh5dXdEp/XVqqnvX4H/NXHC
Aj7/fWmDGPHqvzZEzqdr1wBcBxIAsNrZ5CQgkdk1QlPisWiuzWuU4UtJGwbV8d7CFvxILP83yLoL
wSpwF9WXNcWOldlyd3thigXwyPQTBHUj8ZlxZYjNvaui8fQSTsrm7fGQBooxp4eEGhhcPR/CP265
r+RXHLH5Gf/1FBamhzhQ2MJUFx+MILYdwjo6SOB7zkUM+2+WN/HcM+0Ghdpe8qlTYq4u3FEMrhre
mwcLcgvvpBxSE0lTV1bOveU8Rb2KhZCtCpFsm4fbi9WinoFV5KZoyQuYHo10NBIfjg3Zet1tdwM/
b8XFeLWYB9cOyj1wBli+vqZkzcsbSk6NYJhMWV466rYR8Wjgrua+1GJ5AzPT9sbGAW9GaFgtdttq
choeLDNZyGHgr5dLBkwHjNZ1qPNy61nkEaOW/sa9gyiHR02TImVQuPjtBrILQHGaSFJCYDep4EpK
TcS74iTJJhs8w8SK5eKufGgw9zo0ga0Gp3oRBO1ppG6IhSbwdKgYescs5pXjsW3FluirhEULzO3L
+T2qdWCsKzcQzS1ha9WafC2Gg6Iw8lv+8RJwQexozTf5IzeBOoionzW5mmfx+9gXKOKUXsTeDe0o
4kO+PMmsbGHoiNoU3EjBE2ogqTz37XNEr3e5z+Pe+Vrxu6fbYJLZyx36p2aDAeCXvd7Xb/6e6Ixa
J/ka7hV71xF6v3fdyX2Izgp4H1IzyszDqirzwEPBhbLzXJYzTMog7wEEiMXjRpQL4oARyjWSutnw
cutnjvMhYmkkL3G1flc3t2C7IzGJvgtjd5Ctgmg1B73B+oHWeZh7urYyu+WmgOtOERxvOh00Nhkm
+QwCdVVrNYwvkfY2jcgH8rfqn3BgQBJJgF3vTR6ENRGyJhX81WYA62xeI7mWFg7JiU2B1BR06VID
/fbbb189vvVFS/Mtk/aWMSGGtoqi1E2h5EZpJSWDtsqh7c3Q07WjId2AXzV1+x8gv6Zk2G0RSMaJ
956DSjbnD/hQEh3npcu745I5QsZfPKnIGIQVFW46+D3HUePRRdDDlZcAjHR1q42SIl9eZl7Kj3or
KQyasDbooOGuTrfiJiUyreH7tbl8jvGXhoE9XVpJW9k8tCryIvPmZm/QWbkxj6lyaWUAHHFQH7MX
YyOQZZBR5kt1LwtfvKsXi6wBdc9D6GVKyrTSFMKIM1WryH5vWPWyujPcyqwK3JYl/JlBwutW7Ea9
sAlvVb4IW5+JK7Qxzu7mWlbxVcekREg/idr6Is6y8dQK6XaVhnIK0gXh/yaCIOzWt8at8r1TuKKJ
QNpgpKysGoquimNjOwQ3erj/AYQnfq9j5mawrGBKZzp+AZX1u/1C+2jAXh5Xt+vtA0Mg49GwV3Df
O/SQiKDVsfVaIGgTb4DptHpvuQXezfpBfepQBfyta55rdTZUQw0UFupLQ3tVHtT+abFY3XnRu+24
dagLfJ6scOCd6CXPC1k2oug3z/XyLKvW5eFaIElhCajMM1cNLLb7GvAo8LP9y6r1ho9r/PNHNA7c
yN6VcfMJPOUBlgE0bgyGAjLHhBzA1i7RR6oBm9HicpBmXiQaPo6uSIw1h3YYDlC3KIc6ru/tW/Ie
D+29aeWdoioKE7NGXFnHw97tEb0KD5Nm/PdMvG3klmaD62aT0xHD/U6xuzb8ZHRmK2mfDPOx1202
BLhA9TekymXljXmGF97S6H7chcx/U249m/5MWkRLtLkebUNGlNiLT4yPZNTxmGZgiklVT0rFZOFV
Z5XMekVKGeTY+CSBl4LZ5Pnqrkl7IiQowLtOZtdGLBt++unPeQtysFrOtiAVnPzs5KR3mC6K3baa
650RaYrNLax8sP1pb15vu72/DkE3aVco3aKV5lCFRtdK+avUtTwd6i3YvFblFvgsmlvA3n0jvAYB
m2XSv53/1DCT2fVuedMYgXfy0+efPv/5z9PM7bq6n9dXoPdYoQfANSmRKEMRJscOFfzRzZW8yvi9
Dy2C+c6wRrMYqQtNPWvHLQ5315QsA8OzytOWmEdXDovFAgg5m5kHBHZo7hWp4RPNNcVRC5NGZzNb
Mk7aOvQv5FGWfqanBamvVsvBNrtZru4yAGQ38g9va/Zkg52Ck750rlG2LkVwWa2r5XCwuRh0pI4m
xpSIar3YXWLyIuAOQ0suSe8kuIdM8U4PRxhoYekp8QjGArv1HKKATWNqOuBdswgT+RazBTjO6Oyz
5q1AxP7xxzd3bTpSs2VkNGBjqEXH91848ji5XIHRH1g2W81tQryGmnqAvM71akcNYJKMZjzuBdMr
x88g4+UzKPNsu3pWPsOjE6ZDzbL7+w7BeL4xL8yoQvA/r0K9WaYSU0Vur64uvDoOriNsarepDq4n
lQmKJC0LoQcxvwNBCr1jQc/payM56OYukH4u/lk9bJJyfOBu663ySNZuZFdk5M1zREk7LPTbxQPY
owIRpk9tSVO2btiQH4A1kJ8GiP54F90sA12fC4HcMoCEv6k6aQZTLw07r+diSKNIO8NcIADw5q7r
dltDfqCbOy+CwRuTv1YHPG5oNW/uznAOj8wJvtYJlNrvdnw3b61vUJCnZVC0+WhznphikD3NwhML
I7azhuTzZmmolN7R4YBJQDZLxgDV5afzoHvvN9Oyf/rjKQb7IVUtJbtdCEo21XowymJDhX+ErE7A
67T/ZCjNNxD5t4Z/7L43ISlp73b7kHOnh6GuzceAfUNmTlD+gORxByqnARQasPYtNA0GUFDIkwMm
wKC3btIu1zdD7BOqFuOgmc4EJnc0yDM3IYrPQauPAJ5EiCSoF4ti7jQ0K25tApNVon9gAmcnIU79
wmuCt7u1Ed/SRE2OAfz+ELFYdhS1WVDz+LQbbNaeifSR4jGlGcSecD0zBqlXbDCEFY5uW2hfDcM5
Pm2Px/N4MZ919/dg8D1C/LqahPhZd6ec1ePztpHbtfQ4a3uvQiytjDdNOMBv9zYKaai+x0K03zmY
TCx18fjDZn6x0cG9NirdCHoETgHyXhBo66VIBs6BXAy8QXBCGFa9WGQDqDaAB4gnGYCgbQ6+EfIK
MYVPTuFNvwNTGiQsh+Gju99mBUqYS/AUdYyT0mshJ6HMWi16dM22yMomp1T59Qjm6Jm8OszfnmLI
/J1LYgPhNRpgtVMIMkOnxcPomoEZaHgNmtXrzKWAuYQrEKer+ZCHm3ZNIV0VDHfchrQsiujNVVsJ
HguPKlnKB6UL5gGRYNVmOzyhheslZsO9RPkkaGCMIxbZ9Lue/EfsPLxbWn8S8lyowDXDaVyKVKIJ
r9vBswSvc4U21XpRziooRkhbvfQCy6enSHdPvQV/lPjdIiREKpLUzY/vUzrJYMmbDCB4t1o2hoNJ
eh7EtDOnNcRe1ud7iQ0BTB08ZQO3F2hY2ezAbIYdOA0dPw0pzsDTzAGbREf9Vl0ctZWw/phmcB3y
goqQqs5HdeWpJZWpac1PvcI3fagGhTmqpZIP+YHeUi0tal37ZdlsFd8NXKVm1/VifjjJYPGWxxkS
JLFA9ucNJ4G193lD2gAI+2NKS4Y61KTKoOVRKW6izvwd6wIZGcE0B9fraQykaiih3/pIc1YVI/5j
0fPsn5QXatyfNAvEf2i7WJYatlaOou3INru1EYSt7Y0T3LMGGteNlhS8543IvJxdlxuJA+1//Msz
cMy2inzhSOQK0GxlXogqJVlsAPE6cd7Ro9zp4hmVz9z/61XT1BekwIYMWpRLQMKs6wWGWvvMHKJB
pBb013HGoQGdCRmKp23MQK5Nl3GZZ9ymRlaLrp3x6RzJEaTT1+QtwY1mtIebkS1as7d3BfovCfSj
aTAVw8hH3h8bFk6/u/HEeCe/OyL6DMuet1FyKIrUnCgcrPJff/M78GYizFA3nB+6O9G7Ur/vsPEg
0C69RLFcHtCYadX8SWuVhE+2yJpUJIhUAPJEstkAilvj4canxGnQ6na448LPItDulvNqs3gA4ZlM
ZeTfkfDL5RVE/yFoQbm3AsZ0R3+Ir0xKWn4XQAUZATKRfR1iF9oYsrZZmNkrEqWNlCkQytJTATxx
sHTRbn5DSNrT+FhxRWWjS2X9FRdQLu3AttKvN85PDiV+MqE6PrmZoaOavpp7s41oDSafONCmOvzS
Ujd+55G9dTZc5+34674r+kHe7htP96M8I1lYuGffW0rymZDx74Ml5arpsnvltDbH+tMW7oWNahfu
e+uKPmxJhAL3WL3cVb3kO/2+i9ZSe38/wkHk3c21UaWdw97lSJORR0p2KMpAD4Tt01jIBpClWpOM
Ooi+oVGOj7yK5Wyrh3FIxPuDBlQaPysLSyt2EvPd7dolNRxlF/Uy8slf17MbxyHNhbqi2SwY0E5P
JbDX3XXa6zqt5dRrAQPksV3i8B5vTLu9cZLZxwndCj+a/lbnrneiGRw88x4jBAp4c/shEjJfPJ8f
+0l9WzcHRzSy2732dnodp7dYu8kgcurUrENpM/vMy23pvzWDCWKdjOpgabeJReodySviQntr8omM
n5U//mMS1zmgINBpXYbbTBCyMJs8XB0InA3Whl6RB6wSJrD314ihC9EFysZpSZNF+3NcCOvfdfkG
qcf0IxfUW0vSbtwNDljL/8pJDc2TaMWK+T8/KPTjo24uIHafVugQm6TzHt2sZlXTZFi/v8fVJu4W
ybml18ePE3cWNoZgQ2m/D1eFc1XdIcA98ghHGJt9ZQ1yQnHDVpcO0s60XhE+WT6W809p482+Rw7+
TGCTgFR8B2wqQmVaYwPiKkouCT3ZBAFEDSzvcIg7OSTOhHBR8ZJLetK6UI8X716+eZvS6AKiOQhv
c8yNMsYn0TPTIB9PCkvlCLvtNUiBz5ioi0RrYHJYlOZcwXN+K2AW6HuYIuA9c/YOWOqt5Pabz/kh
BhVmQ+jzX5Kd4ZiveuT59MrPhmbiTMuGafqPGFylvIApPKx2fDdAjG7oXoMWTcTTG4ROOUv7FMwo
seTMDMAwzIvK89VsN7V0CiCBQcS0N4AkdmktbQtRJjwmFGdN19GcaZ/Mv7Y3VW4PcGp46pUvQVgj
pAm42YLM1rDBr51GveHXNAGhRcGh3AgcdGgnHU3k+mwPWDRlAoVLB2eLG5ZxtrTw/RPJZzZ9S3II
lDXAajJTMW7Rgi7aV/QRK7ZILpnb6GrrVCwjUreklKmmXELNQp6dojplCQTufGxnIDoq29IQ5X46
67k7m0C4g4Y6hzrQGfgvclgTfK1sMi4aibIbo6kUhgSjUT1bDJR5BeovuBPa5Bvbc6yk3fcI3EHV
yB4CX+4nzMc1OoTkm9RyRLHu/nn5zR+++O2P0RthPyNt5K5fZQ1JhBgq3AblBL+iyAEVKrdazOOQ
t27H4JVSjtoo7I51+ObbF9+8TTXhEWQHPEGnaldm0WPkkwBKSBapbKazu3mH+pLrZVyR8xTTEjb6
DICgMN8JJJST5bHeHDB+zMNWmiuyb5cQrQTsC1CRttwdxZjAMpjmWuJK3DLTDndQ8UNdmdKwCp1C
oylg29LIDgubGXKvIwYvi5ldCRjBECmisQuaok3F60hFTEpexymOWB6icS5n+Np4tK659HXNezKK
DSi95ZNNPsie+P4Y+/I0aJU8WnMoA1Qj+Fky6ld7Rh30tn5Y31zpjJ43V6AD6zC3YyeEB7YuZzeQ
DkF8axarFYaroiuFL/5QswVFv9If1uAB1ezNg9ifgQ8zs5Pd2shl84aJp9lCWKkloXJpw3+L9UNk
7Lu7Blw968YGtwnO4Zgc/uPs36/dPY/uPRmvE7ocosN7hfkSAbO/zXgoVRLZr/idJPpvdKobshdN
KJnJO8lftuS71ntL7VEZt2idHfiCXZkhN2sNE+NHuN24UaG0PVC7NBArchtKjV2/BPbq3onILUmN
HOYfHEwUxFte9k5jZTC8oGd3tM2BBcnFvPfbk+g2ydwk8OSDvByIBWd+OTs5D3BVMTl61ucuGOFF
ivfNzecre/gH5Q3VeMIARCc515xORKc1HAIERm18TCf0vYETf4vY/nBIfaI29xlckrUIDYsHUrxx
8OSqORYXQWwieDgHeE4c574fuimGF6Ep+ugiTj/19mEt6imcyJNNdrtrkAOUS5kEZs3BdvLvB/OU
Nnx1yDMYfCRATb5K6JBaDNIUePpYjBC79/Y6EOUlU2/q/agSJIO6FFYHWwMFAV0ZhPtbJGySmreN
/DuBmTzpOPHKifg7gF1uN7sZKSZms9UGjwDDDOPd49V5a5rlHxGewOzlerd9Bt2awe4ob6SCHu8i
JKWLSdJPIMMGpks5uEYa40NLa9Yf+ZqylsskXrxxL81D6aKx932euF9EpyI4qukYCk0FXSAoAbfj
9p0iJFahqvg7Kd3uymxK+PjwZ2Gd2HtEfEBwgBAwZoTryWAguE8HDFFawH+jKIIWN+6GAj7gE3jO
Y7Si3IMtEYvUvstiEQQrsjN+v+AsXYFPUPsldwTCtghuzWy1xnSjcAQARhlYMftFVT/m3vsK464Z
EZW2BZQpAuEqfltRzoAzLnaezLWm7/m+Ekz6rS4N0MERITZDdCahOZdLw9zQpRxxxTE3aptPPFZG
oBTcsjUokWU7iqIw598wgNkNvrKx9XB+l5SjBBIXCFZTgpvCD2fHn47PKemCmROk/4B/k6kovHax
7jiM8ED3Af61gK2lqKm/gbwy8CQ7tNm/O8+e4kAGLcN2jSuvQVPH26C0Wcff1J/s3dTWqT8/7x0Q
xdw0imYtZAQ3s8eskFBneG36GB2e2NIkxVG6a3AoKYAwPgcjmSW7cfbSNN6hye6E7dp38PaG8uHx
wLTCZTYzQtbqNrMjn6/gGdZUu/mKn20tYdzsDQFx0U3xOxwPyHBplsHV7GFi8TvtX5SaIFHO4fhj
ygxY3VczcsD1BDIzlnk922qfaX8BW8yMkEph3w6kdd4JCnAa5YcGxqksRB/AQrQGEwTgvjdtWor5
arcl3wCMlIFGdgg9CUiVAHzHLpJsFRsFcenVhvKZEGoKh1cDeiW+Mqi1+N0Ogp3Yjo0kY98V9fID
inYSebm93qx2V9duLM11tVik5TzCAN9d2HYJvuAVec6/evnqhY4e/ECo1859d7vBUJMPSj63a3c2
oHWi2FH/a8Mp8GuvAxgDfIcGoDNLN+cUwxHujIoCgl7gqodmoS/TDkJt7pZgAah8rGApcFfW28DA
m7CaU+MRoBvuf9LsbUez1/CNL6gtsPKTWCGSHoqZX+J2cBNPDsf8dshwPKFerLmz2zlQY0GMdgNO
aPgfd6gPC4pP7dQodsfH373jiUGQM0jNTIySXn0bfrVhoFFX/IwH1na52i3nWpnHZhk6Jb4SQfkD
v/ri7T/48XwF51GQ0eh3hb+TW/sEk0Nqzje78uMNRg8/MwvSH5bslhWkNVmyVg5nMGLNXcOzrHwG
DSanseEVNTIJyPBQQiQAtCCYe6DfRIM8wYSm5n+x2yIYNVSB8rOHK1O49a1I4W8JBdqayNIlTEg7
ie81C1uzb6ezDAWedWkROzF+qLq9ZI2MVi0/1JvV8mwAuufBuQRK/6I9KHcwIFFmya1hvorC/7Ij
vBZJgX3gW2N/W+9fvsfNbmLsJiLl2Sm8+cc3b1/87vW3374dnLeAAeyRYFpBCQ6MHeblPdtUhbly
hoMnb3Csr81YnwxGauSsOdzPW0jfjAh81Pz5I3yXurbbnHm33eNBBPNQzudx1GgXfXGdp/6eQD8v
3r21XfGzIAZVxtpCGP1+3ksr3lrIC60W8znIK6YQNdayJtF5vc/dMxyRukk7Bpcytvh4Cj3kvAfl
mdGO91KEpI6i8kkP/kdp2jt5UOcr6Isvv3zx5sAzpD0v+AzDxQdxP/D+5Fw6/G3u40tcr27B3wYu
STXZaAPuA27wD9/+7oXiA51nP7mbQYN9aPCr1y//8KJ/TlFxXld0oB73YApXRUdKLpohxwGoNQjW
S/3Ca3Yk19YR3uflgvGerYoV8Owwh3aAY00Hy29DUkean9fAtIIdCOyK1M7ACPgViNjoei3mP2/m
Q34emGIEWleav3bNDozV1v9O+72ncweoEyxvRm4R5ED4jFZuakZPI1hE/ZPvRA/fkmgGv4FLW5cg
9koJYh5Qv3mCVM012s8PWBowj95Vdh1Wuw2FbKYFE3rMyMQ9yZuG3OJHwz/CSqDY7PY4T1Fi2iZh
15vXCtFqJiA5c/s5rz7/Hiw8f6vXHJLBUS4Q0wefeXNVbqrL+n4y4Ax+xwN/Q0bZTVWtJ590SeqG
Tm6mYO6nZ83pz57//OQkH6PSYnu3yublQ5PaVvPAer/T/jOcmJMTllzhLlHmMA3+7Ov9yvv6dncL
eRALeuNybTCqNZDmC4VmwqWwb97yEhqmqUcmF5gwVN9u1OAIj1IPb4FuETC2oRmE+fIYKvoXr4jv
hErYHkn7/enJgxBfl4aeYY+HiTBSDL6BAriWkhRmt6WMIigGDcnFFp8WbGmkNcq9AaOEvxT85qQ6
2jR0sdQgUtzOI9DoJVJ9uR1eLM8gclzaOG/FoXcO820yW9P0er5vsWSHFUrilWFiQwUJoJtY+ARV
nQbkkpKXzda0EhqiCEgwdgYlqskAhKfXJpXy3ksEbsofgNoItj5pbzEFO01Y3qDMB6brEdQMRMYj
MoogJRnWm2a4ya3dESnLxMjTnBYSIA4AOxJ7fXqa/1j+52lfc3sLcIRWoqGa+M5tVS7RM9MwGAyv
39H9U16ZV3BqpS0hTHg9x4/QNDoqorq9g6RNcjHRtLkDJ3m4w28oJpkn420XOZcGXCortx6N02pB
dkAPykExfX968AsbDmCz2bsFRxK8hG4f1pgOitDlIfdJ3pbFXBodZQMVRJoyrUhJL9gU6Qp6OwjU
yzbBAUstlTFb7eZhSus2lVqpQ3pEli1ztG+GedbU2x2qgkYU0iPuXXaxm+vVbjFPkTZhE0MFXFBG
er6rma0ToXMzhoVjKvTrKtVS3dwA62+qil0tzbn0RCjzfw08GcuNIfyvEXr+Lm2WCEcltMa+XDjH
YV2YA3RX8a2caMg6OqPye4MIFcvaCHcXlVm3SrWYF6lTp6gI1KK4Y11Xy+FXkaUHhtE+IA/KnjvI
89LAzJum7SuIztoMEySVeyfbbKr1bRXhpBcIMON/jyuFbAjmu88mIhRlxzicloc0oKyTFNHOJA5S
CmwhZ8DiUiIJGOmgtTQkDLCbelgdVg14si7oRS6a4fb58fY0zz7r4IltPBw3FDPQa0GT3A6gtWp+
mLpgv8YNe6KzRycNBD1zsTX23Fls+sHicvD4LWCfYzwgBG++R03YFXiy/5pUNj/kHWbo6F6ADkve
uoyyPxKGGP4FvgTdapVeIORgjj1VKVoFSCHCThpacfH7Ny9eD841izMt7e5HGWRsWfwA3UlHf998
AXoZ6CuFVL9XZ6JaHrAAPHDr0WxmGRuBd6gVcRchJSnazM7G5j8CPnk8QOub+df8V5ruiHtoit0S
8SCgvSjg4ds3iUF7zDTVIosAQzOsUZZsd8gNj7IQkzyRdTVPdB++6XciTEYv7vCNHv7Oeegc4ogd
NHXrQ/+7YmBO28w4bSKWVy1Z6JOgMYtcbkZFUPeG4K9LCF8xjOEKZAO0HGLhS9h73OEQO91b9Eum
BEzzdwhowz6QdaSmH4iznva+xqGygHcAqjolLlQRhVExHKsNGiYENuUprqxb7PsWgaphmbOT88LI
XYv1dWnkGAZSMl8iJOl0kLdnGPHwnMgPz6Ie9qd9gGzNU1lqKCkxp+yEruHKz3vvf/XuP3700Uec
Y764NW+d91+8+79nlPh8BhKZn6gcAy1MUTRKkgQ6zsDJCDxqULk0AnkUioANdU3BGKxl21Q9lXfc
foLCo4zHIF9DSm/MXW7+7lmS4tTniwXBc1lnhN+RM8TvjCjLnmj88Xf1vXm+teVTwOZ+b5jWV/XM
pjiHz1gtaoYTtPNimWkuQQclCdNxUFMjIU2X5sD2zKDNplyUs5vZjlU7vs5V2hHXm1zFlPeO8C1G
zuDOEfj2FoQvw+uqnnkYv51++xvT6gl9fvvizds3X3/x8rcvvgL9HH358pu3L16//v2rt/jlc/Xl
N1/81vzy7Wvz9Sf09e/ffPHrF/Ldp70ekIqRURHdGXwl1vDs6f/TWXn8L18c/6/T8+/uPv6bPp8C
msq0nM9XGDU6RLlSOCn9UZhfDaUM+5CUBJzUMQbYkGzfi1oGIDb0cSk/rMzDESbPPu2rZd9pKCmd
B0Qq90cSgzkxwuXH4Oj35R/ewD/TebmZNfDpX82H67/Ap4+L6upqwI+8o2BktAE4Au7qCLVqdlgc
sY3Uz46iFtSmkbNBRgHdAA0W9g0EcjXgs/7HHz/Dpfu42N5vdR2r0nMl1g+wWubvj6dIOeZvspcc
sY6eU4LyrCTd6LB/VRlKLRdQG6gWjwhg+VV8jjLaNUGaoCTUajcHx/ewdsfHQJSXZte25s8Sq076
zRaSzIAiVU2MAEx7kY9Ws530bSP9qMB1tVhTAU6YBro5iIWF4pRNCBHiSrPCc1zsos8OJolBH5vn
CRQ1gzW3cfmh3Ez6hu/F3XpTAegk2K96uR3xiLkdNb+TrqGT5pnGDM8jqAuOwzB0SsZTtK/0MQS4
zdoWON0raADoCDKAL7AzCuu4KzdLzCB3UZkzXO3rv38866vVQqhMXg/05KL1MIcFfgEO0TakxarE
uITL+mq3oasDueefoOKfcHfNlY9Hxrr+40mqMoQBJO8ZuHzqWb0NW4IDVoiJu5XsmScD2bs7w5u5
t/BcBjQZRO38xTF/07kfNGvUgnA1JM9mxL7S5LGHCo7bgGa9QawfEJT7oN62FI6MgJXrDb44FipD
SylhfuJiHWy73vVjyhXVd/2SI4EmBgqlCsZA9UjxwBG9bq2z4a05LPUxwj9X81z6PwIHxyVFUJFi
qkaWD0u+vDw2C3WM8cq064bPZV+aT3jcm2pbU75Sbgkwiptst77alHPrhc45xZKbvLykC3kg1Oy+
Ugcc7WIuMFnWAAulNt3SO3PmQeNlRXUwUKCW2Uu58+pid0WzV9318YSznJVhmSvh5d4BaadxMOSA
AtBOXr4Ip942ZW/mmLg8YZJ1ObJwxIY94ZS1qGD9/0U3JafGPCwM9xq+NM1O4D8js/d0LjcT+2mU
fQ1uIV+jsvgNLciE/8117mtsi5ue8L++zCILVw3pk32GbSmNOXyH6Kr4gcxJZpEvwInyQUR9JlRa
58JecAok2ivA9wnKaT0CzSrXU95YHgcwjnrr8my/uTG39Rb87JQYCDK4IfxbsdwJbUxkUWRS+lcc
H+jDduDaxIIkzWRJP1QoVkaPuehlx7Oar9QqBtp91eJpqu71anVT8Fbw+NCiOOQ/Jo3e1VSzz319
vVk0u4JRXatTw839fWOYYkLhwCD74BBrKkwhIfkwPzs9LyLMfaDz2+YqneNEXObJl5WfoX2UsMfZ
k+a7JaQwM7VHeZiWtm2bnIj+SIwXUwpmQW8QeFUUFuDlJc5u39bccPvTWjoYcpsT/vfQOagnSa/L
r/97Ddk8dY14Pq2kpAxy5B/AR41VXkpxFmJsGrSQKpiW/BVf3Nfb/BBqgBc3vJPH2azcgWbzzXpH
CdVcQz/5brkPAk6mgO+ChsTjFOxX20TV4zGt+HC7gTYAwwdf3JNoZu4+VL643K/Z7GGmAxOkWzzX
cP2FUBzgkWAP9OfmRI8P5BRkOxq2TVR4xyhhypEVmMSL0j6w0zb2t1umGKCIL4uduaYZE4WRBaeg
35uv7pZD77qKB+PfVbeApVtNgWr864rrJ+8RLM6Xnq7q+KO9ZCxyWXjJbFfm8kR2V0u2eDZuemss
r0skQ4bIAfUIwF5a95LEfjqRMc33E1VYywSHJ67jX/Bh2/6ayZpzBL2UH/qtJLpzmj5pwr/l1Xsi
wkLB1MLaR3BZ3W8NR7gd1opvsM0D3JwQxZ+gqkQ1QsL9c1UarHCw/PrmQvgpErfNkaZE9ehUh9o2
ZXGk1rCNxm8BLVWIbLXhSOtFdbndiwclqwLTas7qp6eRL+nL5by6T1zAEcwYEt4og5bgrq3Mq7ra
lIgzqvpQKyfLSRDJanHd2TZ/F+10NRWiH0LBSY2SqLQzkQ8BEiPzOTTsm3fbOhUPL6XsTY2I5WFN
jysQsShi5MS+QqkEceWLr2Jz9XEquaK4tfP0wQtWni74G0ZuyZt5ap+GCHywTrbk/Wk2i93ezT4v
dvPKHAjXm+lMnp3cct5zVyoXV3pz1TKkDAXv9rPQ/9y5t7smzr01FMu4bq7XI9/efzDb/2qzun8Y
kvcpryHhEVEEraTpaBJrbeO+6Vc0aXigKwSR4L8kvFTuhOpsewkTzEhNm2GGK6Ady9bq83vR0SSQ
NZmhFFK4InnuyfcvBC4xlcCjKRRrhglETCQyvNgsooCiJqnPJYZqeZJBt26kxZogvamiAuOMvGrj
cYo9DB5o5vgahr590DYZ+PUScC0DdKkjwNQzT1f0FDtGR0B+IaRS8FgMbjZcqDUUWpO+Lwk6najs
GyPD/oZwPpuhtiu0kxsYEkIig+8w19S88n+w+LTwUxGA8hAuwW25uakwPc2/YiG0UCF7+UtAj8Dc
7DCM8N8VN+AhlHEXZ6bOgaEDErkuoFBtoVmxi4DumLGLGEiV+9ewYvGcRhkm9A4X2JuCWSospJua
V4uO5UmiOoMJbg7v9wpKY54iHihqKPo6YGNaI85dSKRNVS05r5A3zBgM2C1k2g0UWpJc06m1i44a
DGgItbxhLqplK9TbglObFXY2GpPOdNS0VZSUP/nBsHL9z/TJorw0CFTUfI6wRPbUjDLQQbnD6HN7
kH5Rn0UFoBmrbkJJFpRRKAWhTkspObebil87rkKzu8B2qobi+cXdbYTNUMQGwaeVCxwugf61MgLK
wIz7xPo54raSNIoF4CC84mhs5Dz2wEcLMPg3s98eop84tD8kmZipsKMI8kHdrKo0s5NGntLClJgf
RW088GriFUcb0nlvZhY+rtAXqWpSdGAoOi8Zewszbq4u/aadtkw+ucb5G6/1INjHRulLP3dlI6sB
+fjA3DGkHL14CvMWacFeKNjzKBvQL4MR19JDkDP6TFh50B9YAHBnG78zy24m/j2kjpoQDKZqRIso
+ocYkWtTOnZlng5wP6Liiy9fvycsP+XyUwkzIMbluThiNlFoBywcqMY278TLGmOuLuv7LYBEV5d0
EkmzDHtjCNEIPvT4DPg2EOlzQshw9eGu+4vq1o0RDpiM6FdyZbvMeiBRpGAoedsa846vUbsPBeHK
v3+gKQF0DWwDkzZKJq2IlCz4Yxja6gZb8YUlsnIgVLkdtBrVEQ+rT9vkP87ZEPkNnwH7YzWHUHLn
9eRauoB8cObEO/ZFz3YsQVgilGlMS1h9+oEFzy+RfcaF8Hsu85K1ZYli8hOX/FpevXFJ+UlK1smx
wdfSK70Hox7N1/0gYpLAV3AJWmTyRaNOriqib2Mo9JNJWmCM8tosVkZ0Ltc1GGyH/efFCZgt4ait
LomxPiFIDyupmp8jGanvZNfp+kEecOCDhs/foihypE9KM8IQM4lm1O2Gl1NrbmIzxUdc1GYKTzZw
KwfZMSFpJnibTDuxIvzVHnB+d+SROv8IrB8VmeGlf2vOWHkVRh2SS8QWkHbYRB7mToXaePFzA9ao
5asQvKgpTlOm9ME0hmarPCkvG3EZiUiobzMkjYIrA9LkuXpJIbmrXWInUauxMtlrpP+kGT/BzXdf
n401PBTuY631J4aOeUFx7hNvEyb874GYIEB79XxiJbh6fmBFN9yJ+yjMNKOXJt0s5pKq8PZBOm+5
DKjv1G1QZuPxcVPBxQR3MIMTzyswRICqbduEkiJcrRsATukGCPceVdR/+JzyXVkT6Wy4HuSNs2Dj
yAvm6YwJ9+ooc7n0SdavBu7jadYfj/uSwNbJi8QXrsvmupUvwI9Dtcc+nv9uHSdiaRTT2VblBhXp
naWmt9XtCm4+1C7QoYDzQYK16FZzbfCQn+EcTKv7KUxOvnNnmVx7ogPnqqcOMldqfaFplJ5NhS/K
IVc6OzkfSQNnp+rz83OvD4mp8qea9iz1h27LdhEn3H6yakncejt+BNDan3Ei4EJ2WX0jbEDjbSvO
1fOOXpvknAG7qOnOGAov1Nl1WS+7QNUhVy+E3IIYGLySwFdFkgiMAsyoEi0lJMljoP3qMnpoemwD
B+KjjLDW20NPJQ9s0p630Rw2JfAhvm5bNYtXXqBbopoM350If4efFSD9fM6qC0HZxT+ChZw/mO2o
Z4hoAKAoJRcTfABGWl76+c3/9Ccq9ac/ZfzuKoUlw8tOnJw3N8XHmdzQHaBnU1XDehqbz19Vs5Xh
96tNSxZiGsSBiYh5XhO/ZW5D39QJ4Gbux6uZBOHV6iibXKltZX6Hzfbz9AvyjHotLPof/unZsvz9
DfgOCV/baEdxxd1berUR9HnHhnjDs/mqasCnjpESuR0W30pydvfPyYdyITegzAMTHLUwaCjeelA6
SeMlOhukiSQmFNPPKBt6pUe2mbwd48rU8xkSvv5uvEd9OhUPWgOZMwUvewblRpG3XD4w4/KTUXiP
e/eq7+I9aLpjux1ugGOgeejxoRsXDSVynKRSIeI0fgP+CqG/WYsAcnZPei4fnFyN89wjb23UaJGN
DjWFaK6IPhdGRHEp4ZfR7ULRo4E12LA5YJSQcpRSECm9lzQ6V1wSgd/YLmQzDnY1Ai6hHxQ2CtOR
ucdwfUa4cOb+BY2COIGyegPEtxboFU8BMsWi6GhReGtxWS8FrNXbAyROeedFqDrAYCgQ/n6rlZvZ
sF6CfRD5HgPD6Ftydk1A/fZ2EAUpPwlRh6HvX85nk7xv5UcB3dSvQvopHLrfJH8q0vkVBChF51fY
LSsbdWLFIvLOahWM8cnOAtPU5gq3flPN9mERJUS5vLW4v7J/rHHTKYM6/aQub4uvqcpvzeNxt05F
3tljTQ2gIaYAH41yC6PWyXwuGDGRErLF+RE9x4zL3WKB6xRidsJUzRN8edXveCELxLe32EknODUq
jIewgd22nuHDEPn2gFdaVm8jwxgMCfG5y9121U8Be2KBLBj1Ufbu3TtGGLDrZw4FLBuE02CuQXMF
YG5OoE/AAiVrxDB3CLLbX/aiwSRVD+l13l648ZuxbLZdE6ACBwA3BFPutVALz3YInBIMyIhJt1d9
YEZxh04FzSQxIffr/oaQmPC/I0sIE/lg7cXu7JkZhWdRjFXWrDT8xhmEgc05e5Ocq8bq9gRfQaB+
nVuTVVbBvl/vGrTvoac63AM7ACUulVHL6n+9wZAwaV0yQ7XIkoN1opCAkUpaZDGuSSmkVM1kx6cB
tz6zoAV5adnporzQsEu8PLnyyGkX77dAy5m+pkiAToQJD/uloW9zkj3zrd7CVhZMc+AEVrAObiBc
NUo5meaiSKN6RyKRCiJ6PQaa1DKZh7AphP7NZyfneTr5WHhTBHcEn+A8ULp0bCP6EoMBFKIJAO6W
ZBI0oMwoGeJuscW4OvFds1TcnuXLV/UMptZCNhhli/L2Yl6OnZm5sA1qHfKhV2mkZxHTXZSEV/i9
2wz7nVdyqYvaz8VsR15ek1ZHGngdqrqqcgL6Q3ey9Hth/ynsLIhgzVP+2UVLW8Tnhtb75es3jovZ
T/sdrR5t8rZWVd9FjJfMXIz39/ck8SIbQI8/JDOwv6wffukFWFtXrhgAzWV7bn0obqqFa4BTrJDO
tGUDTZlxMuUKxuAueomvvQBwgEKo1qOs/0w/3nfmaA3V8rM4XdgV184FssiJ2Imki9s+hTELBpIS
W+TFpJzXL/SBXujEPSJmym6AKqVSiQ/DBSWkIMicAWkwnvWjjeHG+VP3Clot4IKmzAQN4dBqWdvd
SIS7W79ostSjOwHfeVQYLJHhFV9Ca/Us4+DOD9YChLYnlzwW8XszyEpshCN5HfZYbbKpHG4KRuTB
Qy3VYuOu/MCL1k/5++O4pxBpUpDXD6DJKfmYmyckgwKgPtRTOPoleNTw0GVHMMw06iVAo6Au+DIl
/Qc9itp0mGjSlw3QF/5y1aU7ENZHSycSUWyUErNW2Fa3KUlqPcaYpCyC2IiaRx4gBqqTSW7r73eG
gLIGfEo8qrTPXbzpAw9STgAC+gMOSBj2p6pprNQfZf/6l3w/ko5lWVjrzNoxT84fCTZsG9rHklQX
MZJJPIrYYThY9KGQhK0Fhp+GEPPoi+fnURCZ3m2Cwiqj1Rd7hfxmveN+B1qrSLxHuRXk4TlmLJLs
BOzgyOHbpfuB0nhpFidhiTHjZBaovOKjmLYwiXd9ZYTHRtIQcjUXcupJiNMpJYbBFDcDVss3A0Oy
iJv28Ekrc4t8vdXYi6CsL3TkPk/sfjIywwxY3cRXUelT4ocVWWQ5x5GNRE6N9BEBGDgUAbMH7FPF
jNm4T6egsyEJUVom/B0lPufcj38WYLtC/Xocd6+N0wwDYtE1dIhAAikkaEDCySJZz0voqEOAJLA0
JaHsjdqAzFJeyIZXJ+5T4kiMXEn8kneG/vAHQN8VvAeiL5SnBf1qHhd3ZUN4FnnqTtLb+DSMtnUR
x5qEzDrjm3BoES4imVQqMl5I0M/nEymQijYMyQewDBigBx3TnswtNAZ66nZk1FOd5io+W3aWlggp
Ib34bnvg9Vxv2zPQBvEi1D27znHUiNJD+1578m6Jm3MhJsLL1T5o4gmC0Ng8DforkgeuqiUKamEC
UvRC9HcXvmqXCkhPIm4hYb/Qo+vMp4p04B7wV4DGgfJDzb1GvRZWp8ZKoV3NJOgtCdPV0j3Hgqb5
pnNkv9W67/bVVnP3TiqGhidVoBI0rjbACxd33HLYD/rlZKfUc56oQQy1Xs7hvemdbWGlW8pc5HkC
xKQbGeyCIpttk2hCKEX+Vb/jq2PTEv8u7TGFAcaFKQSklRJV9CBEnsY/OgtjLpw5lwxUVwiXH8By
DRP3aZ2gKGbZ9M9kozHfW7blWG+LXCp2ZwK1HILwxNlheDlR1eSYT7oNlBEhOtfw1eFyRQqBJxvc
iuUD6MyebHJkq9JirN7zx2Nzx8DlQz0iTAF5AFIz0GMig+nRu3fvxiR5eR6S7gKNIBeGH1OveYiQ
Jwcv+YgxO1GQvqY7IB7KgaErFQZvI0Rq3V5HPld6GXPsIX4jYxxihFbSmS7gNS1qUALthXMS3zXm
24jFwG6Px5z29rZcU349OieJgyL8hgw65GRP8EB95DZtVdoZTvszyzOe64ukBRf4oa4W8+w+9Qbj
90dLgkyInTGyez2nt8ZNxc9xugObVbqWRGFJegjEyxXjCJh4LWhEWpKxx9A+9vFARCAheYCi38Uu
3E0U3UA6llmnbW7ho/wydXmTT/y8hSAdUz4lSPoRplNit2UBijS3X79eGsmwniPNPYH87jjdvBVx
GnugfO6X9WJCivbsfpzd+3mcDnLgBWhhUbSg/D/KLi7JgGf21zDkQPjppkHEa08ky2gjxJih8Pqo
VeTZdOS2x0cwOhUPW0ZDr7QmMSoZUUQflC41JbbWWgD0BNNIM2qztitROLE8NQfBFO1B5xMdDa3l
uXbXpdg/MHX9Tu0qTTw7QBgZyOTxfZYklZD7R5xyAD/PR0Vf6sHrN5mpUKhtidQEfbevrP84P5ge
Uqtvgbxat6ATzmJqLgizKB/MmUE4O94enRmGUhV7KVSYhZ1J3uvygrKnQ45DgJgGTutpyS4ZzxYW
9V4S8RXh4/iIQ/5dnnLQlw/ECZLUqtQOKMqDusALMCSKqi7qmyoTkD7KqQHGa3DpNT0Hdc3pmiGE
YQFQ2ax/okrlbEvZurDpGvg7ptEkZ7lQp7oDIHcz4ma3Xq84xOwCs5/AJbgBlUEYmxg0wXMlGMDI
bkhAr8SxKF6+IwpbQgX2CwRzTOdM6JGgdQtWwur4Q0tYG1rv3mGk/UXA4X6+PynO5dwBOwfVcQJn
z8/BmASTePWbX0+/evn6xZdvv339j3FrISGb0wRThQTc+fkBA5b6pvx5eNC83NROFkiJDcof3Sod
6MLJtjswAJESBjkMZwehJDLVfd1sg/gUEj1B5eyJHWm/HzoeKdmVlTkBewgkGX41cl98sEHqzRPm
QXn4ORTsZ0a0JTteJB4p+wpJ0NyOSloZXpSKC+eH+Dyl5o4Mr0HZHRcY7wI8ipl6ah3i+6Rbaa3a
9uCCyk+zeLEReNznGlYJZn7W2eU8kWZkFf2xJKNfH65aP67yKG0HNqJCmexwWlulLCcAR4AF8z0D
5Gw0AAPyedbHpFhGFqaxP1oDgH1HqbvFn4gsLMHAz8an5wn89nnlhTIdvg1MxNYggy9fFvKT7+so
plv5GQChhJGOnk8EmIvlsUIVTseag8EzW7YwUCLJe1z6H3d4zROWA9M3WI/zcVu+gMREE8ORFx0+
5g9LxRUHenqjcsatYJGTOqm43z0ajOuyYSf4eWSViV4j+1Qcpi/2f59MgpwFXWum1SGKIs/uz0eO
EPL2BM3+HDyvXd+LtZyROPAVZ7EhPGWM/egP8z65WKDx0DpBtpGD7hPR7Sq4AmR5cliAU/zBLQh0
kV4QO0lz/oEYhidhCPiPuXTooPhITaULX5Oute1iKar6NqQh5o5Sss+let/vVMYTgFatcx7BrMUn
gZ7CHjBJ6/v83+k0XhLey6EqRV9N4PaBWujIRp1SmD2GLHq993//7n/+6KOP1g+gjTAVp+uH59Zl
7/2X7/6v3kcfAcLxuhY8EUIFPH5e/Kz4BDITW/++9UPvKPvyH7745teQwvooO8anzNRIbSBqszAG
wr9Fd2fH5N2yxsh6ij5rekeS8QOkbIbwIh9/B+M6BYe9YQUlfNAmI6J+jWV9DZ7AvaAfs4yYaB+y
xyqIdE4552pCH8gCsBuzr7NrgBSn+I+LB8m9XUAqv6ktbL+hGAXsxcVq7SiKRpyVqQ2a/iirStOD
YQeEzAipZ6s70LLzeL+B6ZmX4ogSCNYEliMO1OzvRY39Irte3UEUJgbkYO03D6YoISy6OUJSFdVK
A1XKBar2mcWiHxOZWTfgWjHPIU+g2dIHwzu2ZPoFhSesPOKMXECmNIHDgUzW0G3Pmhay1QzDVeZq
EwRVwQhPNXifAAADBuC4vZBauHqLu/KhsVkU1RpKIkJYG27f+pEfZV+oWB6Oh6CbgEIHqxKQCi53
C94nRpiBNTTPbm7DPdIhTIPU7fVstyg37EbKA9ErzITDLXz++efiSIBFT0f84bmPPkfFudKXC/O2
g6kBCMhuywklLkxJQPmoG8MxgACHtkm12xZnH2cMau47myLVLTCiW5TNDaWV3NRXCEe13qwuFtVt
IaiMQ8VX+Qz+fdlUzjko05GwUVk814Xgurw1f3kVsJToG0BAxTnht6iGAOiMOGJvyiwCLcTAKKZA
vj6LYB/ABjuYUEcWaaTXU3IBriWhvXALeikP7r1J9H6UvdwiMFTJp4JOxC8w0gSC5tAY4Y4OLz5W
IXNDTyx9Smz2FC4AIZ4NJSEH2eiWK/MIvrxsqq0h5BJRg3PBsnN2ada02J0MgsTiy1Y6AZc1+Wi2
rP8Z0eDn7iGKAxbBesDwPP0nIEWg1fLJ/LvlAMyK4bh9dSyPvV19E/aTockSWuaqBQxtPcyjK52W
p1sxNDMXxBbTFJgZS4MbanFgusnPxtTMOWSYxH4SUuzS3KDUiOFwW0p7kA1Bl2gYWnnR5NntDk92
dgPbgVbgRX21vA0ToUdDGg5nhSFeyqNAQS+wH4NsQJCoaEV2NVKD4+S0gEyCZSj30gwTIBNCCk7w
FITh9apR/mGtW/Ck+SfagsGA1CxqBLmOyd7BHAz59npRO4ccMQ/gFWszyPUBzMHKDzYy2oI2yK2K
hHd8bLYPrmCzjIx7lpRORFmGrYMUNMmmIhENqcueiy7feRwP6E/qjXuhcZ8zBRD/SpxJV85mFci8
uVrwIbVSvFDBCMcxc5F8hyS16Zm0M49WD2R/Kbob8bgQNzD4bLdEiQSSvZnJMnbA50BreG1Q647J
v//q3f+o8ttBnBGp696/ePd//i+U5Q5gK9D/HWOZoIjIJs0IxUeKO1LpVYpMZ7ULU9npDHbmLlyb
AaKZ2M8eJ7n1oOSHEhXEzSG51NpSxgxUyphBa/oXnushyaUow4uIfQxmt6gv7HoBt+EJKFmwI7MU
wE/1j4+h4X5r+jDISUMlEqPBO1PvUJz6BsfSnl1rPUhkOKJeOUDfJcLB6zayV7tMYh3ZryoQDY8x
JxC9HKjxKB9SAnsN2dJbwXiHJhiI0UIi7zgP+J+Wq/GfwLP3soaEs8VVa3NQcr6awRb+qWhPDoRv
JSYhTICFXxzLN51Z0w5LACd5q+D2EvOVS56n0ifZFF+tibKQ3g8eFGww1jhwiFjdBSgKG8AmvKeP
2dQBHQ78DWD8BlGyI8nbgId2OJ0iFSCA9dRmP2KUU+/HgrOGDfN0oiHschxaOUIDaj8Yn9JkAzwm
pnAlK8jgbhBlr5hivUtCV1SwHpLBhNlBw3M9fgIPpAf+B5UHcCnN7uYT+Bc9WeEDZb8JUK84HyY3
OZ2iQ733d78IfZ/g6S0FMDwzcDGhjPGmf3DHlznBWwedLaPpKrW+kXpwjpshT1ZZAKJELvAPpoV0
a+3RiRGq5HJ2Fk+BcmAg219pHB5zieJDV5GRzjLiOR2oOFTxpx24fRtEWPJqU718t21Tgyiz/RNL
5UZRHfm60a7lZtjbwzKfRIeCiUEdiyhCIMy82jH5t5yAgy9AufyeUGZac/mJpiy1CMMkSSuiI57O
mXUstAwXRpcynb9LHENspXHkf0SPpWVrmeQksdLTrO/lGWLaPOlZ9Cl/mYFPRnTVkgkM7m0oH81m
XwYdOwikhbAZFlfvaH/rVfGWMSz+SHTEyavveJpCkiw6mXnwJxbosencVsI3Q+qvI/NnU62HWX/S
t2kXyHRuaBi4gBgLpV7/jAjhHDB4jznZquitEKPGv7iZYxem9J+3q3v8FwFyitkl9TTuhyPrhS4x
wXTBrzQwh8GEST+jZZ5EPQjHO0PAr17gFpF2FGGNS5+0AgryxwjD9FTJhk8a8k6mtz+qe6InjXlK
Hj//tMnQA3kItUlAV8AvPH/4z9nY/AXYLnf1fHt9zktiVyj7Rcs+mm0Mtgvh+Yxw+aGsF/jUELRt
GOYYiA2ZASSYxe/31md0nLgB+SFsYQi0vsR3+GbOCnSXQxLZ1mqD6ebhlc+QQfBnQEji7idVc+4I
CEVwbx2t+CFlU8kdO46Wu88/jTN2Due/RzkEOOkwM/aD6/VsghkhfyNdY4nhQACLScAF0VPe4iw2
k1EZM8SW84G54M+JIXRxzHFKb+bx2/Sk6U+ztA1GuPY6mK6nsOjjAd2uVovGAnWZreJJaecVAjQH
JGieXge/NveVRojHUnC58801wHgy+SGPcuJtCUnKHDgjhWXlVrYLui/WmxW8nqd0AvErXkeMNM3b
9WzmYAIaqoQ4t6hhJBaLYqWvq9JcOZ27k5ZvkcJ9XCH3TmnbjF2D1GllUvPJvF2Pcfp7r2hf6NQO
3R8AK/hRlzXXSCkt2QzMJfJeeg0OmCzhs6VITUJ20qROgPn4HcU4DPPIuVITaSIsQ0meMXGmfFXh
rqYepdgBbk+b/4+6d19yI7nyxtb+x2Gs7XCEvwj/5YhStfmhagYNDjn6dsPwYrQUhyPREi8mOSt+
brXRaKC6u5ZoFAYFsNlS6Cn9EH4BP4Dz3DJPXqqAHmnXYUVoiK7KynuePNffQagGt897dyZeGt+0
TJr4ftkmlYWDk2z6c/5nvkPveLBn8vSwch/0EZSL6OdWrY8O0yJHTgpeO1kVnmEBtLbWbLUgNG7s
E+VCkNXFR3Aho9sNbQk0JM/kTeuHF1pIRt2IlJVTQld+3Sie7Mvu5RveWTC2t6DAK74K8klxbF2I
yEaVCTOehcIfyXCK0Qw+QI52IIjCnq8MYfTxzPnBFqPMZs3i9xBSgQj6rqkTVEQa9o/9J/NRFgLE
1C2izNcLzCWRwF+OXIvYG5eMUpQWKz5wFCRF00/9M4eOqMV8BZ+YWwHHmTh/NPk5RQvu15/WwGXA
N5O8y3uFZ87z0ImPaWI6qG+DQHlmHs04VhJRmET5yUOJ7TRDTyUyxO3qqkn4SLqX421123yuiqCK
OJzVdAbnOmBy5TX313YVnpWpyL2DnaNtrtc3XU6tlXgrQHAf+nELVwnsMQUhdbhZSQXobA5nD0PL
jGjMpJHkHvPczn1/PZadxSmzXhRcXW9gDwhKdHZlzB1tpXab/h/mmvV3Hh+JOAr9xHowZLl1gZTN
Hg0+RGifpPCWEpwYmRqGliQnvD3f4pt/oSvDLADjxz1qJza6kuiOpV82Mh9xyci9LV1LCqOD7yQO
0v9sv2AoDgqD76BN4nQq/m8+c54DN/bHQTAxhMXQAgKzaBtnjLs4m+Wl5OVzt0Zz+a/R1aWupvmm
FikWMgW70nKE6H2q2TyE16b6z/iTc7WGptqRVOVxB/yN9kWCfQIj8ifJrOCjALoACiXSmbAhiFUN
sP/Nn8VX8hi6xM+wFeqOmbiffvj4PygD1m2z/lTdb2Av//Sbj//3/0QWLPUUQzzBq6Mhp37B3DFb
YHfPdittoLJ2KYdVLkYdBVnuMSV2YVWzxbb6CQyNbg86DyizAy8uVNmLi4yrAGeHz/USvY4qvpY9
KECU+QhXgE197QgT5sGItnWFqUYbw0mvP9dbQ9cmAxVgZBsct3rB6aDh4Rrh8TTjC1z/9bfLahV+
e/gjTjYIfMrG+mWL5bmrGfzioc2YkRfeiATohRiArraiz45o7B6tCTNuIAgj0yUpvbcCuH4GYVKw
ivWCjT0YBHUJzoBLsIFTJMoOtwzuI/FARy2Jxbq+mTsHffKxu7jgjl9cDDiegdy3wE8NlYEAqws2
fYF1gh3jY10RN8gdcuhGlEpu9xjTNqI32oZtVdiPdZPBka12PmLaLU4BMI7qcAhxwaH58NZUfgzz
4JMgfD5gJTikqPlczehgok2Hfs5UxKNvg/acm6xHlvpqpM93iU4841z4ElUuTGwJ/lt8fYnTyvyy
bVZmNm0XMD8aVjzCGjF+uJM98HplxbR2p7ydcKIpgkxKKh6eXve4H5hDhZY1/nxW0BcCscZAa7PZ
slngdXVsIJqZYDVpYTcc8pT1Qb0xxA4BqJLsDdN4uLUBOElcFXlWAdSh3V+i8+ExHJfeIb5ylgfP
HA3v4i2HWY0Na/Qk9JOGXcshEp6I0OmJvdW5xTsjBGn5yAnf3B3Zd9mTNCcMFBhVAKC1alJOVm6Z
/dt9t9vGhVV1OqRF/neghiMA837+snMyDMPqM5GxeZ7pFGWqV0nYCXgzgqkYqFynu1Y25lFp2tb4
wXeGU6BfGKm74wSOVKcib47t5M5LFvq5II03V5Ju5zFcco/N7fOYrxPIILC+rlS0YYTAFuXn5bpS
YDd88aZeLe6WEYylnwRIHwy+GKc0AcH9GMDQQTITu0rk3NWsbW2ARryt/yQRls2KXcsd2vSv7631
hWisv7XkMnJtLGuiDBioqRI7/ECa0s/Vuq7A3/m+2WOCB9L+37uUL2ZrXVxQBw03hm7XLncB34To
+2AuezBwovvqstnBb0XkRy5lHpiqkTTZei4rGbLrOCVmyV5QqoiJztuTYNdya7rPBbkZACUAm5Wv
e+wvEsqWEz5cXNAXzGTSxX3Fr5rWPKbgYy8dhsdH8Ar6/AMzA7jt2IsczqtnDfNGKkl2GN5foL4L
XmhkbD1qQL7v4I8OeOy0CrgbOrJnHCQv1jULJYxBkN5mL11swysr5hpkJ3fnMEqyB/vWzlmROFrE
c6QIeR5+xeVpsxEvQLvLbsp0PTwFbIX0yaV4n3o6O6cAtCxHzHrRK2UUMEea8gtdJ0dLk+ypF3iz
oaTGX3esRoogFHnn9eA1XXrpdsmfjKi3EbkW23qD2cgQ0gASr9QLEroeY5lAeQfaTJZV65Y85Xke
AlZL5oLZCgvl6rIthfPhEXUVSxdMI9Wsdb09e8uReJHfYhJ/DG3nnO1uli8u4FOgLiA0OyI6gtS5
HhW39YTUfNdBzQ2N+Fw3+3Z1HxH2l3AmXdsQFWFtwbCHFDHHQCOW2p0V6CFU3RJ1Z20S4t5N1L2+
yvaGflaYq+0KXe1iGsmOkFDETIFzi9lWpzwHP4/2sRr934PEyQbzTl4nbfMJlP/twync8eTu4VTN
hczGw4vh1mjFu6YsoF+HhIjjKMJxmWB7aXLAQieX0uMWUUdDpMTQNZ/mxFyh1VSZE702EpA7wWa/
c6RglBFLeFg9btWW+RnTUjUO8/5MFJ2U70OTwnT/DxNAn+zBuEa8tFavAjfEzqbmo9Mb5DDRpi1T
x0N3kTR1/P75K+aR94ObT28fgP5MQ98HyrcA5172A6sp0Zjzeb6t0Zai98TFBVZ0cTHG+bi44AoV
u4v03QhNkHcG7MDAXyJdFSegZCN+5he4Mrhm5oy52Wy+BBsNJIETxl71wTU7DnIrIieViO4Avxlu
B0GNWaevNLZJnoz+/dp++7X6IrDQSZo5Up7aUj3cQLh0D938an7jM7BzcpmfOBH7aZW8UT+lE/yv
JjqB2jWJdIY2H54ss2DmpewnAc0yf4tPFV8TgvneRqmGNNnntDazdv654q6k0adVAc7aCz/PFP6H
PFMHErYMDkUNmLTHncMkfYG34wE5C53zBbVMssM6JskDtyG9Q5wfdu57CbM6I8mBWHxX0CokXSC1
0kG5oKdSB5EonePAQ/MVww/BlPRRPNNCpHWHyQS9cirzEjy3DKfTwVBqSLCSY3TG/pZMM/ZT/Myw
TItPpuxzTslUYy5ZcFEy1KQGGDUSTaqrK9AG7derqnWIHKCTMLeEEcMhKUlguLJmAGwI+uEdIoxG
DGw4DlGP+YXoVtHSbpId1MJEWH15hHdSZCLqkG3MYYiBbxyP0D0sIBq9w0oOKalyVdcaN5tweVBa
zu5sGBKfB0HmIElIIkOkmUvA6QTUVYyzhxU1ImcN/pCY4vOYSQ06aNmZ5CUfTW2cmauXhDlyZdGl
VPGIKYhLdFKGdGyxPa+2bNlJPVBl+dNvP/69sgNvlpc/vfz4f/03ZP9FoY6dANFl8hrRFOAKf/v9
r0nseouxONn3+Nrc4EjOKCJxdrUHH2QjRompgQ06LB/Y8MblpfwEs3E66hGyFv4VYYw5eiXMV90R
fKemGw8K+zJCmvnk6LgvEHNhwvSk8uQtefJAvcuQ2eLcwKNF9yCKxvYcBf48NBuVpNaheKWapSnK
sX3+Fz/epTPQJ0rdwOOLg3w6EoS8XV6+XH9uPlUQEDU0n9b415CvDVBQT7PCPHd9G7kui4M0V+vc
+q78hM5Hfg+344rUTN4XNpWDe6TSPiTrSY16oEORFqtqvoYwQjJlQX5lMWfYap0x421b7c0lZJ4x
7FR1hREADdupASjD9NiyBVF/IlMDDSOdCBFWANtyM2BPstm5W8mbsb7PXr4xN+Nmh4k1x0HSejLp
ATmD1NP4t9ljVzMvz6D52vQyhF+1hMsfRycJs7Ukvhpb7+gip75KYuA4wQjVk4JegOfjdt/CanEt
Rb2eBT4Kx4QAheEL0VMMA/kOnNT8NShevjm1k52xN0tzdVXmKXrtz4EGZYKsoduZ2S+qadg9b+GJ
2+IFrlhpLXf2qE7C7DIOfEDolALyGiHrlEg1Y9cMYaOSZOKYletYtWNXTM2G6+3YZhPlXkcJdSQY
FymvS8kJtsxUbk4dDcf5TaGoxRMF1+CYC/CD/16+/vDi3etnv3/x7t2bd99JiB+CqXR9ebXatzde
ZmpwPdg07e4WggVvZz05o6HUjIoVu0uJgIymS88UD4GQ8u6ALp1KeLichHc4n9UW4Lv4EcVZ8pcC
KIQpcdsKAgZ2En4O+BBG4l00GIO0BUYXtLXmVr5cVQJudMUQFosGrHo7yCK8QOgiQyRbEDBdmN5R
+056ueWO5+V4trtLh+W5g2vnNRcXks1YZbXe3SW+wZmFkZlTLxFYR65XuFbSJKwTxW4pnbRLXiFr
2tuArOmzjIP2xz+uqy8bxMmz7p5Cmc3SAXwVAoy7Po25ih95MxiBqtqu7j2UKgTGMkw7NUeaPWpP
GMFEs4MY+c9LLzxKfRQ7Gfg53SWBwtnT8wTASfDJjH9AYcF7AVRpxF9bG4p4Uy+X1XpGtx9Kq8I9
ZZiuCjQP4HRCr7JT8Xchd5SaAN3g3Vl9DsjCVzPK+U0ZzSLjQx5729cOC1ZA7oUr1tvGy3wHVwFf
CaomjnKbYX8kx5Th6EPSBa9HOECuY5z8Krx8r7qBvqGuwxObhnmhzgwk+PqtvfcQGdFmINqMLZNt
WHtywzJ9/Ol/+/jfaVkHme+ffvfx5dP/AsUd5sZxoy7rdtF8rkB6xvg7XLztnoz+2nmQ1VwRZovY
x5Vg48s1HmoLxHvK4Xhlfn8PDi5zI3NYJ1oCVkSEQy6n6PDWCk1c4YAyK0cB6rMwQt1lKxgMYKI+
UIQtgt5QuPzrNx/ev/gA/DSqrUzBurUuEFNlvbVpONmQ673EJwO4kFFvPFV+wWN5amMvr1qC7FIO
2pRfeQXGD5maNqt2C+2DMkfUo2VlbZR08EReRzWKOWZ3W2DUlzPPpZt8vMx/x6pAiIGANQwRgXI2
b4fJz+Ut0T4eCI0XVzAc3iCNMyplMCOn2c0l+V6p0yAl7KzVt7AC2ps8BbjEfuTwDw/wgN9ZF+gS
VlTfYk0PSfqqPh+4nMcYrfwD76BXGP3cmcPbnMwNJfGd37YJc918v2vM7cXyOMGbLnZf5O962UYJ
g1HbD9WC3AH/+q+oKVIrmB/+S27OvOVf/mtp3ryXn0GaMIxSqJcaintGEUVCYnle/GAF8ZdIvHW2
rX+Be5CsW2kIH04nLXHkGNQN6SAQgKK4nd+TdZ9Q/FGfo2UUaXvMhIXrUfQBRpDag5QWliIh6KMC
J36ay9tclpgzFAbLGiwjEPBCfBVLRyXQ0x+o61zaya7mpM23nRfw0LpV37Eifw9u2wXqvMjJG34C
aqZ1CYe8Bw1FQdHSq5akBfIGI1jDK86I4BfB9oAN3ppW+ZxfVVvwt0M3BgbnxIReNrX2HIKfoUZz
M61tHnBKmDbgdCj7FYUz4EJXLbYjrLzGhQF+j3dBwT+czpnwCGAY3t2HldnMV1CrDIsi6+fQ2w3j
nTp0XLOdyC0Noe/0bFhIVRvigfefKjLQ2065n/wrcoa8lBMIAsHtNCGDDR5txCbAdlEksW0a6eRm
vgXdMuLmXUUHxW1J62hXjvjoALtPswy/JDGu7gdt4gmkE5ZgSmszc5uIe4YDorWWhOoxB2U3QNu5
m4BdAXMLv8d9EVW0J2+ane4tn7IJUBhKjBXWj6Zi0F+izEagkivOQA8arYTGZY2wFaYhdOPBA5wV
pL2n2YRGoNPmAjFS9KLeJRKz8GlA+aSqlnQspCO6l3o4hkpM7GwLrq8huIiObI662e2bZi1IGHgm
PBLfNqSlM2/uEdVZEKBlVk1tY/ajgnqxCEUfWSc3/FZ2Kumh0U3KzDRgHS0wYY/d59S+Ahy2mhHi
kohMEign30sC+YiLLrcRxyRPlKsenVShcTojnaR6St3DRc95sLewtFty9yy8he2haPtsGmLF7Ugl
xQrhKzCNTKmVvnL/YmQv/eWFtaQ77jMK0kW6Osz/WcWOt/Ls3+sSwtZOu64iAi558fHti3cvX714
/eHZ70t9O+3mn8hyapjdWw9vvM0mm/sJVDO5EMd/auPCAk6T+Nwm7ifoH2yaeXZxgR0Ehz8S4gm5
9OKCRsUOI2hsQ0+QXSPb3VVrTvn7iqPudrvN5PFj7lGzvX5crR/DLdfuHmND8snN7pZ0DbeEj2fk
8f9/HgDFb6J2Up+HpPrhQVv3ON80rwfBdgdyy2GShPBIVzsCL9MJsJYnI3DMKI3Z7l5H8rK8ERoc
ME+BSwxHad0KG9iMW8A6EHtuIcseHGaVq2YsEtASwWtyEuAdXCtd61gdWI5tCgiirQUzdHhcx2Fe
cHQ2BZ/RbVGOPNJiZyCK7FoCls3f3Cjp2SSFIyPYTF639kFmSlAaWjSoI42VAIcag0xRjkwzn5f3
EgkOgrLZooaG8xh4iGYQRmRDG6JqG7QKU8Ad0B3RUKjUvFzX4vbsQK4ay5GzHxkA6BHaULp92iKo
7Ag7oFVh2JFiiGv41XhzD9P91Yy1RMMy7OD1qrk8bXf3K05sJqkWkYBpLRKn5rPKpN5OMoPeNU9F
Drx3Pop6Q8e4iltnn/yjGrcsfXfzuwc27/GiDHyiO/MzYRj1fp54SIBWfAnhkhzan25TODFcZ4QJ
mXfpTgSmDXFbsNTYinbMwm/rP1XHqFNU/k687oj6IlQOtqEalaZUC8VXLIlRLvmv5M9Pd5Ti/QhT
v+SQx/VHNTmhmOeCOecWONctmwY4h6/5Rfjf5YR8uubBgovkopzLc8pzAY49DA23rK+Qo9/5giHM
ztg1wfwycMiqMuD/hRw4gHX+gqBQSAAVT72W4OiBR0AhXNWFDnpSBXKe7cHK7AApT5OuTmLQsn/W
kzc0lTwxVOXsyejpecmxZSt0sTUCxh35pFlpTlXHrAoIO3qKSVRFIgg1Tym7lCTicM+fjlVdIIPF
DBlzY6q3CWYMq1dVVTTKlqsvH7yzOrQNT0aZ+uvpKBuPx2aXIe9MEuacpD/YRKo/StTVaIbUwDjr
HblmQU9UvzIZmz5ULOCjO0/Bf/DB4r/GIko4jw1m8V7RA/vZYWDe2O0HlHrt2BnrpjrMd+xb08RH
yE7TkHs2nMjcuBUZemKQKeH9rcpRF4bZhDujXjHHZT5Wbb7CO9A8ox/mZn0O95J5gP+avyWFi3kk
P1Wlwhibtz/Y3T/8DRHvZmse29/qK7jyV5ZfgvGaP3lhCQ/mL2b6A6FPtDelWgPa1DEOidKfCcg2
c6F0+hYLyN8Lirvm9hZzwoHPgLnDnP+2td4SfoOgNXtAbfeoqAfq4MN7g+cTvKpdQjT2sO4A+lYK
fdDTOgWtq8fy1yK7uzezusX9UC3lyzBrqa61+Ep/SjdTt1emrJLfF3kaNcJFGdUngZOl2+YFRnlS
6HjCo5WrPTP/AUdNaRv/7hnlV/JhePEGucYlxzVPGeDyUl5UgGqUtcOnRqoxHGc+ibPFwvdjITH9
CdZVTnD+zvn2Ac831FzxMJ3kEnvH6cGLdHrwNGpW0us2SFouOcSDUfVkE0+ldN/cy0Tfzj9VnPs5
zivOUx8cq45Pg5XiZl8lCvTUh7EYwWldSKpAG3ZqTZtbhCo9gM8PhZIyssvFOAjsQj661YmH5c6d
GZNFc282E4xD74UTVfkJeACZKvcQZgMJw+fZbf0F4H3JeIQsFEgnz+cYuAgxgMsscAJ0jeJXMAtm
A4IvXSL07zkbkN1H4He42Bv59pZGl2ORPOkogK9sGnDcBQs/VSPjjkvlcKTDDhHkZmA7puXJ6aWD
KwOjrlz6qL6BuY9dIE4QPsv5QlEyiP36ElETBZqqgKxh1S//8dsS3EkoozwkCjKcmc7hnkAlycWu
m9P+GkSBqZZ2+ZuDuDeZDcBcLsBYPc2fP0W8RcyJMo3VYIv5mhy08FNAF7FZ5GwCrLmzgsWxpo/4
NKSWUfc+HCaeOhhmEF1lj4BId802MVS1VSzv0LtdDqDICvZruyu8Dbu2UnXhDn0pOHmqiwds9sp3
YDu/w3xJ+IV5Ortazc19+B+zb5+a7WVr9BXKaZEUXPQYmc0Z9DCuYm0EK0PYFjo56okQAMP33HJc
tbJnNNs2UmwiJKp4nII28TngOn/ZFcQoCVAf6UimocIxJ6rLJ3yxahNFNA2wpsO4mHCVeel15xXQ
sEL3TCk4zcMi8Je6Sig9OwNixGcJHXg1Y/UAqB/7MfmWWJ9o0w/sXlejujnseFuJG28YUhw248fC
hCrQK9SBUvY/RrdSDoCsj6XV5VWxnS+U460MII0TdK3Ac8bMIuC5xwdB7LQpbDYQckLsHLJrzF+0
qUT/vV6s9ku+5qehTx3w8vac4QAwjg99UiFGg82CnNYUxslIL56L/OLGvJQFAkqAD9QS4d/jLeRE
9dKigD2xJ285fnY4abns8AShS6YZl+zrQFUipJJUA3QYE9WrfO3jar1k0ExgYuPtqXK+n01Ovz1P
MZ56/Trzinsr2p3YGOc2jTDWz8T2fEiv4nWENbRJhLBQxL3CZ+geZubnDLwFzvLgUFjPsM6T4ZUY
4yVvayC/ZgSMD74nN+7FHMMhfxWwEFJTKsaN+ENwwJ6DXDPDVLHUg1BwgHSUbZVR2fqyBmBQXyzR
QbzsmHJGHnf012x2ngZvB4fSluQlvcMWeWI/2lboh9lnT84jWHf0yIOmkyPrk1VbEgJsJUl3Pr7W
ZLhsEBM65crEbn+SYrVe76Lt47c9khbclWbTkRfuekvkKp+4HRPwvS58PYh3IB2/jR31xcnYhBAz
ZCGqLlXYzZahh7sL3Q4Ehr+un2Jn+Zv3kmY4PHsBsLqctoNsrOQEPtdgS69/PUZZDIMotkYIaW6l
5pYM4lX1Sdz1BCNJTPmqngLogSRmXt3DftuaM7gQY+ncbKe396hphSsVfFfAqPgrDw+E7q14YENp
eDjK/vyX0r/YAAgHmDnM/GBBim+3jqIZGoBLZHPYeU0KVeZ6LBCUjo2u1r6CaJUMj2a0kF0ba08s
t85lJItFUlUi6BPQbvouSl7B0lUbhpwEcSTlAE6NVS04WSLVHdYUpJ3ve3sTwxeh0Re4mQ71D/Xu
zPwT3+UryUNi3qoMTGOwIRefqvspI+9hEDr+d6wusPJs8vQ8IoErd9rsbGjMjUDlwfSVvGxRdSS3
A0+pkNSUk6r6qlvLk5agp65PU9uxqX9Z++KhHgQ89QdiRaTAt4KZMu/yNmfC9CMui6JSqYuBeoUK
Y7JT/AlVgbOoF8FphM51e1VtZ2zHKbiHI/hoxL1TPsG30nqHRURl/bbaWbiwb8dOj2K5GDsf2FoZ
WUfNh6/4p+uXqmSkr4J+/xlT/1SNaBoO7BrCutodKy/1DaN3iW9WVp7xZhrhsFCtHqIA0sMuBxj8
UKieKVlEaf5avdr0pCizr+nLVO/HGzQo2MSvMpdTawN30J++J3eshpMCeaRtsvZr1JoFVBatCbZ2
X/mCF2hfSpBsvlxaJT3kJ7fOvOZkoWYt0Ps/Va99WOHl0uLObzAwW/bqDJD5aQPKQMy+ulVAZnJZ
4Ogk303fmJHi7y9ZCsoftWeP2nOXSkjqGdfLmLAn5mvKdXnzdtBDTFqZyg9qGAiUHJ9DVRjifdds
l+30z6rLE7i+/sLKrQPTqpTham49GmHDfshV2/P3JnrUbG8zApQRNy+yoYNTsFNocS2Eo0ou6uwE
aM1ORT02wjw/dWZw9A8sg1jWO3AtW+/YUR+Nf6SqZydKRC1fodJgW6HDAVeArs2Y+u1azMHfV+iR
vF+QKyb7UVtniLEfgAvea5ttc2mqv2f+D6dmvro2fNzu5hbP3V0laQ0ARee9WZqnMo+Nc9RfzLnB
hZF8d4K8qI8QsL+YmKu1mYoG3Ucbgxl5OiOhNVxC9BgBODfGg/pcdcw9d1yUy3a9wCEVneRp7wEr
iWvuu4FIyMFT6zvMDCE8k1AflRrkqFPMVkY6deKjgmoaOQjSyzTHyAwJVyGQdfbjILpIH1d+d8af
Ar8o7fst2BHPcD6m6gngAbA7V2FHcXZephus14bxrbwWIbg1aKDsa11uLelpFCYaTIRdmgTdhMfr
PbAWtoczW968wCBaGdVxbrJZVyNWlE1AvEuTel7wQXsmn58Posm02wID2EUXemITOpp6d5DGop6v
FHkYtjb+gAgOBCJ4NEcON8BjMZFgugOoY6i21eTGnBOkQriRxQmcEsDas6HJgPJ3m6WuUe884BLj
puOl5F0XnYQTplR49uccnIOjNHTipr6+QbCu+VqR/QpFUVUDOtQDwDCM4K4i9y5QyhFNA1dwleTU
9TtTPt8nMEfV9pQ6YATKuh1nf4Cu7Fu2zSFnPF/cVAGFRCD2G20dQYWtBTJoXNiRg5oVqhPvodCH
OgJQwS9/MVWO50EWKfoOY7Oh7Awe6HtWeaEnXK/TwqKIT9TtqQQ84aQoIVG18irgmN30YEATvj1d
mWVaOWGJqC/UzAtRkP77LqE/plHaTyMvcnSCY5pSs9M4Z6NELsQt4CSiW2pv67N91lOJLXfepzJ0
m2aaqcNY+AzPKBsOR9kxBAwWmW7MGZ4wy1ohH9X3ZbzxeovbA81ABIJHcPTUuefnXTsvLfPIYh+a
dxe1YJ4ObCR09/QEflyBBxaWtzHJpjiv1kubaDURjez8YvGfGeS9B485/FNPTRhvbP1Kp7YOv4BX
H9tN7N9x0YByh49odg7L8h7yC2G3UGICI9TeYHRqtmDjFNA4dM3kaIBmax9xHZLElGq4rbbXlYOA
FXMbpW1V+UNXS3KJLoI+JY2tJHZjN6b87dg9e0hkeiprpg5Ss5UmdWLizU0uaFI0kUbSPJVZ75Ny
XR2uNG9N8Ucizf4PNXhVKvW/clJ87lFZFxVQkbY4ALE4zj5LegBIP1L/qVqiJmEI1uMhq1oY3Q97
z/fBIR15n7ZojAELFChXV6Qqi01re3ORF+JhikXG0pZWuoVdiw1lztMJfm2Zs4BRokvTosowjRx+
3b0pbzHJq9Ixmi1JbRfVGo4v4zsGCFa8Sd9jzt8UdgLG9GO1vLIdof3iMWIdgl+iUhU9SCDfMQav
YKDPdpeXUQd0xylL1au6RV/ARKcEGU7AdoryLDC5Hd1tgZrHqJpbbnLyx3XeVbJainKUU1cgwRIj
ossJkK4jI+St+AXFYvP15MItWxf1iz0UbQB6+eK4HtjOb1++/jDJKJms6fXmHnlN0/HHGdg3sRU8
qY/N6aUAzUQt+3Vt7i+0rCDnA0f9vtlvVU/Z3JoA+3+UVePIzzbORmynW8U15Uib/UOcxOACzN96
CZcOHbpbnRVVEtOZmjSo4X4TEwnzcGZ14V/Aa9EVZJuaefDjxvMcshiE6usOZOXj6reF+htIczYn
5ladaEu52bUAaoQ7AL2qWkn9Heg8CQefIoDngPdUM5YAZuoFTY1TD4ljOsWoaWGC6iIMXUTZ1YDX
3PkAnzpI06JSiuohl2ffnCfhj20J5bJw2KfO+1DZFtDJJmFy3Bmx+vvmbs3RBuSe5Kferdfpde+t
c2nq5G70VPrvvdbD/w8WG2BNk2tsk06ChghtehMEbX3Icus6roLpIGZZJ7e0vxyiJbn8Hs0EiVLT
Y3u6DfjmZgGe3w0p4UCHXrP58yfg2BP4xapoTN9F9gZT5SRIusgYCJeOOuIGs/QpGzllAi47fAbC
J2njjXOMJItCXpS5b4Y5P4ooC95XkmYOVZlhgmTSx52nyG/BHVP71LTAMFTDkS5dPrgacZ8+sp7Q
B4s3cqKRmKTQbKQpyqEZSbVhn3mzYZ+WD6pAzUNHDfGZVCc/nB0rt8h2S5zTHrFDeak5b9AiYiAw
NeW/j5xho9S6JY11dWcFx1Qnet15taCFw3IaCYSIcH7L+FOROgrLuGJBHOV0iD8j49QPOiDbOr47
nI3+k465QKot2t0IEpGaAVgSHZoapf0I8fIxZ+H2c7Wk1Qz9CPXMBEWVtOyL5mpzdPrCxttIr1Fi
e/cuUSJVRmqbpvPc0e3TfZsAhWErMRNP+mIYxKIxLZGijl8JSqcyGcVtyAIe20pc3tUza7bcC/9W
YD9uaTxxGwRfH7gW/NIJlsNn7rB5b0QP5el6CZ9H8ixV22z368qB8VqA6QBXOnFSQKlSWrwkLV8x
FsLVfrXCmkNF1rJSkJPPwRaQ5sbI/Un5vqJ63XxDL/BnyjOW3YJoSOYbwdC1z3zrhC5qf48X+52O
11PtTNXv2GihalPVJTTIutl1R7tJH7qjmzjUjJkXcFTH1giSNcr57M+bXwX7u5aheAABYt/+wy85
MB6s4Zf7HU0gbhdMrIhwuuAPEXyNCi2CeMkAjAFMMhCbzGFeSNYRHg0rCGc/sQt3lyR6gIkIgIry
pJskGI+j4UJu7qfpacVuYHa7eh3P09kTcOzudv3HLxEAH3NSYweLIeq7hvpw4luInQcLwebeP52j
jPVkqwYSH3pnVXpUbbeRjjgIxi+TWSJ9pGrzYXyOPezjrY+H3cGaeDyC8CfRKGV8AdYzjBX/6wcV
2E/D2Wn2ZnduQzBVNhvSSxG5jVCELsf0cI8bDeE2zRW/AJupSmlGm6lnp3m3Rt/Wk6poAdOK7K65
CeaDeTAXLxjMdJql7eBDT4hlcrFzqGlGx2UHXAWP4VgT9OCqbbQBnRJ74CX4WC40MEpvms1+hRD5
aNLSLt/1mhy8L++ZOUS+MCfQoxzsRWAqJyhMMtJQ3CkBb5QdTDR2AfHgxpApbs67RGkQEX3Dm62x
g9RmUA2pY1a3jumbPfn2l0FapoAh7GG4VmGu9tApHOhLPcowWgC8NtBp016TRRmcWuejhyY/7XOJ
wHTgifilTB10i1sHP47J/vpomzHYgvl+bb+nPUPpmJ1IiT0qy0HSKb3Lsi8uiGePluCAyJDq/Soa
+83wUTvEr1KBY/3O8BEmMYzVIWISwgtvvfUp67WxSjfsCELusCP9ShyRcNoCp1NrxW2nBKok7pC4
XH0u6HrpqRafmY/96XHFCgQZYhtiUkjBcvoEESrqjs9oh33SfHT2zflII6t3px3m1aRvEl2AF08m
fX4NXIPnquIdD+lPWJUg+gUHis33ompzUwl/hjz8UBRkngoFhax656Nkw9e/EPT6sXyXjLWheB4B
pXaAMSHENojC8B5MVAhnZX0TCQqhQx4OQruRQDPaCGFoa6J9wF6tzfMAeDRfEdeP4Q01w11Jvgzy
+0JECW3pFDt8U/1r+8nMEhvis+wlZv9orakYS0aALCdIwO/YX4oS8jWYNNNMwWWNZjHMw43dHPvx
AXbQh2MEkqEBGn4NpX77APeHL9S5GQ4iDupjlsMBOL2jB0XsHigeJDNv2+gjjFIg8D/+ZvnX1g8T
WFd3Cg8nuK6EuF7VHWA26mtLDe14/Dfx5Kh2VT1J4NL0YAeDmaF3lKRW54sQFb14JPvx/wmXmgCK
z17/KuZDfvoF0lPHXglLdog9O+8A948+AZxHI+WDDsONrKPMzDIFu6JMlsF2+qrS7qzYHXacI5Dt
FkOjrpWXtHlhJr7Zbxeh55B46CcGxW699EbxqVb2SUz+Aoq75fPWQoUUtc7Ndb9ZWm5KHnol2TdU
l9OwzlxKxuGVk4deSR6WV5CfeeV8n2Fd2nvjf2O3DkJ86O3kl1P7xd8cyVIzzUb6TxPlZfdEOyq8
Vxc6j8Tiplp8AlrR7BjAoFo6vzWfV2H4LH0mHKiWt5IHEk3kyz2wK+CFS2wbWC1LH7QBG0gH8nYl
HFEdC0hYb05Zm39QzyM4idhjeASfGoMHeDKkXg4s9c8CmWHHXC873KnyUwYNuJ1vCsOrgSIMtT4k
Nnu7TU+iITSIGOWAPQC3VfsjfgZvzR3gP49szM7IOrknXD/JX9Icm+pLGQU9jEx1sBP+VG+KsI0k
oEhy78GuGwTerErnyaMoBYnN/B3XrCITziUnC/U60Qt9quUL+TuR+vjQzqBKq9uN6SZuRYs2Gcgd
et1E/NCBXR4pE0LkSJteZvCwc6GQJKTAmmIHgoVCcTZ9TntW5UuZvkelXzEJB87aaue7JuzQHEh+
gEP16Dje7mOcaNonnT65VIiBffPVt39mKpGNM9HJ4XiOPh/PbOpwkBoAMrlid3cb2/HKxsB5Fjqf
6WTg8/Vwh6EQyNKDkpbyzhBs7JVqPfeROJwrnI+rHxMqXUnKBmguKfBakOpwHi8uFGpre3EhYAyn
T8ff+v3QtkFNQvX31s1V4nrTs9rNPzqUXC8QmGOAMc6XiCsH+qayQTGmp6T/DRhRcRIL/EDt4eGQ
WQ+FyxVQY3Ue7Kja7XHxnsXBOeGXyTBXGo2Eez9gHBT8lmCYZ5SxSukZiOl1iVUV8nHoJG+RnQHk
hPz0w2QaIz9iJlwe2ITPlksQUrxUOBx0owCpfFRqynYDRYAeOfeCK4U2LZbz6/pztba9NiLxWz82
ESgPG8NVWNKS0/midhV1uFB0czNvK8p8c9/s7eklHSiI6OsWcKZSrDxEk4LIaMruCDabZwyuGgzp
bMCNiD3zJTqMaoaYsLFzTKHkPiI1GgkdcWlPW1DZovKNk+MsK0NQ4QfmQmIIaAlK7TK38NpaKO3H
qOLiqcUvufo20SOa+glmyJIYTrciy4pSrZqfYLJqrnawMBEoKywIKcVZdx3Di3NgKK2EwIGHsSwo
YkP2CHexu75AuK1LJlQDArZX6etUbW12V20VLLU6Atntvt35GOavTwmC3OfO7m6gCqT3+Pq0WlH6
FQdJPqeekFrbyN+Q2KjCLPOpToXLIBvLpn2ixmyIa5Qpab52s1WvfCbtkrwNYepaP7YGvvQ7Z2vR
6b1C1YS9KesdxUGzGwruTXeCyIygTmHsMECHkXLa4Lbho4PhhvOdPrap8/PwbFJurdvAev5zs0q5
dFLhpB9MLWU7E46LU6WBqGS3fb0jWiAJhzA2j7vo0paNvU58sAXNUbGaC8xrYrPTYfYQ+d6iNgYV
vdxRqD+YvTJIGLHFDEPre9knp3ZvEAO0lKyQqHHUSnKcbCS3Zs2X96BCX3An+UZgCG8AgMREeZw3
YU65AdW1M/AscMvarMfeiAyeRsYRvGrXShJBziiKgbHmBfiXqaqaK5tgws+Mdwe3GXyLk877CGMc
N5vVvR9rxRrZneMhgSXSesW0UogagHSnbqtGaEm1EC3fWKYSQQTkChMeO9MEFRz5qW0TpjDTSw7k
8kry9wiT/7D4bv4Q1Z7gMCDjsAkzOnrASlArn6nI+RpBrTo/POMftrPnVJW3UH73bCw/9TMErEus
kB9YrwDl1BImPhv0mKYch9Znn1Ixi2dfxkABN0XpBE/LMLUbw+wX+SgvoSlbMgoDZUQB/KgEK/6T
Scck8b4sYB9Ri7wd7evzcHSOtxh01+fkR1PtV36HzgdxEHY6dIQju1149iABXEAwAWPUVBRBQLYs
iNzDUSCBxFBCVBoQRZTjLb/abDBIoEK68asuZPxuBHxW/zEOQySldKB/pay5aDhaWwQe1gAed2A9
8Yk0lG6GWDWEWaUZMASTkpAaJgfKLQJwm/t6CiKasLSQBg2sgbzMSeoVDWv4SEzT7nZEHtM8NxUO
D47Pb21ke1LG678M5lqSAwPYwTaZg0eT+ITEFmKbaNHOvDrz1frleSqVgNWvib7vuGtAqW5gxKwq
hFPunbIUKbU9VnAf1kbR+8XY6kV7NaLtmRrW+ZEXir4RAG7EmxlzKfQqVzsAaq3CtXNQFofWDbLs
kNSliBPF50uK0PX1h6z+qJdTRf6oN+pJIHMXzllrhMGIiu8pUSKfo0yOAD+RMO6lH3GcfY/IrLn0
PZilXze7ys8wK8KFjBIx9zlWV9XMHF5Q/WZbA2lcO/kdFgKm2rBvwF5hdtk9O0A5zBXTk/9s5Him
xyhIYmim9EGfaU/g50z1NnmUy3vlRhrkpLYculVROgaTtyJiWEJ02PaeU+NBKl2UhSM22EavdrRS
LyeObRc0xmsC/IBZAYga0n9s9ltz44jga4bpI3WinhLyiEOA2jq7qJcXKDKKXJKxb0+9jBPjhp3C
bQYaCyc1qFzJlygBNi2nA2agnDDBqU9TXQLt3Y2RTa7JDQJynCpZ9eIi1JlqvamibdboLFlIgWF2
jJXT2cNC+Z7m6suOUB9t9U9qqbXf1cOub+WrWsjNCfFpUAtuAkR4UNBdY3H6OjKbj5IyrLkgkeAz
4iIMWcJ5RB4OxLlqmScsD13WArS/wg2jaKP6HK2zu21oiKiX7naMbuE+E6f5EHmcLLLuQD1AmrCp
PmO6Z0Vmg0/K1JOi+hYZUpIQ9fEK7FAVeEIwM/3AGx62xgxnM4SuN626uvyKkubCWBhBYbG4WjXz
HWJhgzPudpRdNs2KfHvAW7JMsBvcKevvt3PzcGb7dV5+DS9kyOUxAaqJikHy0vtKW7msUZe/K72U
slSWliuVuJKaR7XNzGaIg6gUQkcAuU4S0elq9fNCjB0z3QDm1+R9Er+w36Tz2rHWbeai3XDH7reG
GydzAOJFrAxtXbnERTZlFaCA+cmC2VFCvQcgQXqTcDtCHZn7AE0+NVr3dq12YrRV55NJaFo/o+6O
L8ErslpxBq3tznxfnmdfYxvgrKgWlqqz0P3SPKNjtNVmlOWPBch/d0czUTfjD6i2nq/+sK1dDMfn
ansJUd9iVgI2Ek9WkfMrqYlRdNPOcQTWZhP0eidZ4V9d3UZWo8kgxqVqQw87eOYQAmkCItR6/b26
v0KJJtl2Em3HA8rSkFJaIbLC1M+mqyu+wgtXckxGVFpi1V+ZJzmcBVwO6jNwkjUD7OPZg0ZUyoK+
rxKbzXT72IbsArhn4v/hhoRo2vYkgr3D2vTsagjsOlwktwyRw31yiGN6+kFVIC14LkJ+E78Qu2GS
nHOWFAWnnwsxS+Vt2N2NMVlomXpjThmQVHPSbHJOUfsilXzUgpKBx1bGVYRTE5g7wfWDD+Y/TbNv
NIycIQwYXDLLD2NFSR3fZd+keSISaPNHbXZ6yn220y8LcgxvRfXwp4NwBlWpUXa9rap1gDL0M84Q
ZVyPT4F5Ppuh6sVTuZjHMR+LQWaAp98sRD/4x3XfVsgJPOdr/FJUiH3z4334qAXsDWiNDTV2S8O8
m6GPEgfRDI4nS7Dk3BxZvRRPDjNTdCdzcC+dt3V0HSo8DcwKaT2IaTg29lL77kN4gaT6wrLjRJoS
kCIgrdBq1xRev2xHwteaYzBbcfJoCfNxtZbEIl8/MYN3qcH4yFKuV/ZfocmhR8ULdjK34W6j7CvJ
Ts1aduf/wncHGr/m5LF+aWbo02MfHphb++eobqJ+6yVz5POMA7fYBRAqwBTId+Yty5EfQIRhv5sN
JbkAa+nFRQdGlpH7OFaAvjfiLdmJOLf50/F/QkP3ZfPZHFsQ8G/npAHwcY7BSUbSIqNpiS/vycTJ
At999x1pEnku/49q23xff67h0kdBQy3meDyGf548/oa+f4MgT2hhEs3C3MUKoZGNghPmRjA+vaxO
WS/C8cpBL7o6MLKQLqZhd/b+yZs06Nt3VF+T6BUoni/r3RY0FLaDkqOaNCBhd9ARqfhSTmSnPnn8
Rc/EkX2/GmUHOn10PV+mxwz/GWyC7RIge1rxvKrRNYXQu5gUEVI4h2Qsj1+L/Kr4psyP6Mdb4tUx
Hx6IybSNTsP/UeH39W0NMYMAYzbfX9/s9GnCo4CGSdr/Iw5GqhFIHNCMRcuElM6sF7pdLDDymFfP
7LqO42YPG5pKsRo4qqiP4fNmxL9qu9mi3sdsKVPXfoNOBNdmV0EYqVPs8Yl9zr0ytcB1rHqEmb7g
9Ntn2eJ+wZxAcXER9u309Lt4SuAhhkab1YSIbVgumQPot19S4Jb0F/CcZuuz6SgcB+AIaDNGnShZ
23WLaFA8KZ+qaoNx3zJ7dkBLJzsilcXxQfAnBKIjOTBfhY0zlif3lWugJKgr8KzYG2ZnhUXWQMmg
znqRWABS1L6vKut90VwxqjV3/OJit703M4vRnqjGNBc00gBSyNms88tqZ0g7Dwc83La3c7XGooeb
zWxI+E29BOg85YdrLsfoBoHj+AxvITlWAfqjRfQD7EGzVfzCqixLhBkg7bX7S/JZtEFVcn9qkejE
JqJvJ48fG5J4uV98qigZ/c3m0y+fcnb6xxjg//jJP/4DPyCK4O51rRmYS//Ge1gk5kd+Tc37nVcZ
a0mQy9IX7G0Ljo9Fbney81RqVstTiqqW4LBm6+FX5Tb/L/by10buUJwBCAaPbMZOL3wxwUegITpw
cDZ9gu0alU6rlCzkCkESmY+PCMCF4T/KwHaEH6iMwak6o55oF//umuPPWLZzxus4l8I73AeSRjWu
YpACx0GnAgjEbHean1hgXsdp5sWbxBnpqBh8qrYfhX4C6CdmLYS/iyehgIGPx1czJDYtme+8Mtbr
PfDFEMxJ5gKZcpKLEqla/aq7I0cCIJT36GCJQyox9V+9CgVOxYDD9Tzj8UuD5FAOSbGaRYjKAUb6
Zb1cDz9ktxWg10tpECClr1nbAFYNRtpT3OavgmpE20jwCGbim3tzmNABUDM+HPNy4BwECYQDVjeh
ZOPwOt4z3TOLftnI54O6TPH6f/sOenaK719+n71+8yF79+zl+xcun69/MA7FF/YdWVStx7fGtINU
2k8YNkL0LqppyLu05bbDhtxXNj55Xd2ZwskJSSOEcR1ek1/UaL/abVSzvRcm05/dBtQeSWtN72Kk
hmYjrgvTjbD/cM9dKrQ4C/OCdvv0UoB0auRTumTnreFDGP/SOb4ZzsKUCVDTQsAOHslLU3iU9UYY
oODqqocvYHLI2dOw+SvyuhUnbUa2YHsxcFzEkhMDFNhfPV7mulqj4tpq7hJ7V2d8YtgCMuaTQjlw
oM90ZD6mfeLCjHFgVu/9iw8uPGwqQWeod44CIzyIGYsuIx30u8b9OSbxmucCRC2LCSKOgWjDOwv0
kjQcq4fGUU26IN249CCyWlmDVRHoYWC9wHrrULRA0P3zX8p0mh8vtssGYutYNu4zrEhX9knuivVr
sZ4nnbkoj2pW3h9o9q9pzTXXUtoyiVrlv6ul7P8oUjqMzz+QSTBEBUh3UnakA02RmBjlEUMWSOxf
eSBcpyNKR63pAfy4hHdTaDk2J4q9EXz8evU8dZ90Wt87FyAZaUeZuTQYXS4dzctRlgBuF8Aam1FA
uwuVGQPiIkiEddLrU+VGaHiqA4kc525CvQnuzFbOsDxebqaOWE8hyirXXcoBQ3rr0uqRZ2LekTrV
RTPruv0QRQGo6oChINeBnjA+XOEwhE/FR3ECjqFFcmQuJE8kHg+RbDXkbpSJvAPJlSFjbMYSt0E5
X4k5a2A8P8tLQMxIpIMTsRFsT6dPkoA0lF2+Pu8dgwOM4vDzrmk0UjYM02aa1TNZUGQjeICRRzKK
GwS6wtNrlun06fgpA5HJSrLSJhkJqQ97dFo708Lrg4KXZM4YKJ5p1oG67TEXaCq8k5WSvaF0PsRr
KlEu4nJSklE7f1O1XztRZk/Ysbm+CtffnDA037LmeJ1hELYfyxeJKR41SMZtPyRBDGjUe5PDq8Tw
FjfWpocvU4x0+6neFPk14AHicJyPGzqqWmvCIwi5zay5ZtDrOm3RwUeqT8pylubjaD00dKpCd+G1
G6AY/hSOCdwxzBMRywa2OfX6LOcsCeeoSyJ/CTDh2uQJXmHKLWajq6M6vjZ1mBs7HwXfiaIi/lRq
hC9tqVEWfG899qPvVc1QhS1ohiBsT852QvxOaAdBqRMjvWwWmomGskuKaqnwmyKEuUH/YUfIgM1k
UyutqLPIQpwthYSFZ0h5F9rv0LlNjY64Knhw3ok5iTJ+7HBNSi3/nBRg1Ea1lXVKqWHTUkq3pdjA
QHdTdOR2tGBs1K0Yec3OLw97xMCS9F9tkPZsnN58u0B9/yo9KP89s8SbIL6sgykoOdl3t4n9Tsdi
FvJJP8YXt5CgsGpt4DIXCUPSrMkTkwWsM4LCNFz6xUXkp0oCQ1tZW4T0B8KVdZSahFCs7hPw34HE
6Sh4yG2qyxG1bPKHo+YTNyONuO2ickvmw/y8rPCyjKKpQ+Y7BJszlb/HuBsyqo0oYvkq804qkgxL
czCBC1Kr4PrqC+AhwUQY6xROlO9KpUG7PHHGTZAn6PxVYf6hWtXVgb6Nid7+TPQBX/DyR9Prphbz
VOsmpllwxhS7oUxKmCmwIMd8wLWzFjXZQkQuuoAl4ArGEpaFY/oy8NlVMGy5efVhC8our7nkinmZ
aFPw4p7fXCJtDGUeAyMKqX0lYQhGCaggVg60CD5mdYW47EpPSgbdZdd5Q0vWc9MPCBAOKgAy/mkN
eLJzzMkMxs3HQWB0R4ZLNyedcjr6Fg4OxGyqy0DRGIvbHzgkdp6ddJ5IHXqru06pa82i3N7ud3iF
UeYccI0FnByY+Qqyy7YMxaDwhuSwBcfP3wrflNlp9uTAXoBbpDil+r7LfE/FtkyGqvFd9nvDge83
dB17c9hJHfT8+OMRDkDNJblOd51pUvOlTjVneuQbDTBV+A4013CzqDFena2m7l7wJQzvPKv9wNAs
3CefCSsPS8LQOS8YiTtGyeItfaEkPZAo2OERYGLfo/poFY1B/5g9dd00N1Wqh8SuFBxITjIdwUFE
0Tx4qvmgVku/e6Z288Yl1nAdJDA2c64EvPy5n+6lvuKPk4YbeqVGGQOfdaRBgcHJu2B8jUTBHz28
Ewgo2hH6yX6DFnDKtQUGd7GUS5HxB/Of54ZN+iGMDuqFntOTNoN6LKd1pPiogIECkV1xLP59oVh5
cV7ozpjrbGlad13fQiMKs7T/tKTzYmLyF9IWeXnDfs5W7JrTaCNyNunOI6SOOAqXXdQHMyC29y2w
qYiPq4E2eDiMuiA9dnltjus7daDrCIi4mOqgvHssEBAqnE+iuxVjhFmRO7uEb6XGrt5IhEh6iQlA
CEuITfKoGZB4FB156pKkcPipzdvmNWtKulcOzd7uJfKWY/fF+dUO8VrcycNkF9hvuEbYlalDmIJ2
6vbGU/vYsE6foIA5f9/uEUEFd4PLSNPezJcQlQZelsptnnVKCxBEImzXZP46Dhieemyp5Ur7ZlAy
umt/jhULYim+10cb6Fo9D87faz7MH0aPp6o/3P6U/1UDQbQS2t6CMYt/hJheFC3KLymak/KTBLTF
xW86d4gP/goZdvlqv5IIXApHFccuNELM5eDxuXNwYGv0kEs0FqHm0JcQmTrZ3E/wop5cuICt7aex
h2Jykcq1CPzkjnbS3EZOg7Okq+T1s1cvivF4XF5cpONQ01pPjxacUV89UJJg4EdcYFEYJK+jUixD
iQrf8Uq318Eyi+d2zLU65zmCX+OENAH9cbmEQyEjwQc7DFXqijpdsXpThbrog5TQawQytNzmNfoS
5PptPoql7jLEoHS43l0xu4TPoYN86drpDgnWrzWfL0krkjKiAoSGRMTLmdPRc9oulz5LXAWQgDkF
iyGy27nZdinUPs8k+I4uEtKdAZ0WADx2lMczcXGBrV5cZP/R1nRxIV0wjykGFh5iR0AZtgZvY+mG
eWAR5CgyXl8dXlWOd+G4cr5xBHys3V+2cKus2XaoRW/XzztEBdhWdLD5LqKBmW7+AXa4tQt9i9QJ
Ie1UpA2uMIIGXFx4ywChCYatEv960eCBVLoChLA5LYZHyiKNJEVPVNdG2K1IkSmZVzkWXo1JwSu4
mwDJkE8IZRInrhXTtar+TOEHZs0/182+hbRTCMJmJyRAIYOXQE7XzalFOXCBITChVGHX9+QiaL3s
CboO8BMNw3dxITVdXIxgZoFc00/auxcXfhqMLS4qXotm3hFzmNpHT2mzLvB7VV9V5GHdXPlr7XdN
tuME2CKCKkC/dtDjSl3mNdRSKABWueXTdJ89RbWdfCwhvEPaM7h/hin3FI4/VcUIcp4Yn7tq/mlb
Xf1KZfYwJaCH06wICdqom+Nw9KD0qwryO6ludF9rBJWFhc6kP0eCgB/0etMYT5J4XmkLY3bLv49Q
Ef2Kv2OLCICEmI3IwBSk6p8DwALbRPKkFiy3ZJDsnOoD34bQCatUSI+VLaU8OBuIwZqY7tAqaFfQ
LUECpxtUKMLH98qslGrF0pa0r8SyWnUuuw8ESk0WHijagxhxn09mYcc6NXFMoHd3dmmK4cL7njS1
KENswfZSIesJJZcJysw0T5HXZ4gN4y4MSP4NEwpKyArBa6yMclntdsw2017DQLiQZREUQXO3ApeP
msxdRpaler3ZK7hUlqRCYE7nBCCItgiBgzeSkX+WgllLQXPzNXbDNs8YtX7KUFQp98XSUSQRTV+N
LvjYTmffsstmeZ8mmqFtYDZHHFJlw7Hc0JhvX3Mi9yuACPAtBokvu/ZCv14ppbE+jqxF9UZx9Hac
gX3Dcnwe9AW1FfPRXYBxZJabAtFC0pSnzy/pMN+21X7ZcOXfV1fdyRu9eZcbZ5RhEEOsHwvmNGwm
TbGD1eCGnNOGPuEq/j5hc0gx2VRhqvAh40Rql6i3/gYU8WUneWW1OoCtZDSorrR4hNT5ZDLo3EAi
23B94F/k3nfqKxWWQFcOutXYsIXVtk2F/Ntca/4HjN/QtR5uyPzLyG6oULSJtbzpS1BxVfPEy92E
iQ2Bs9xfBpb8y+oKwtWAxmMmsC7gqZOsWM2RQivxJjUSe6wSWBAeoTskpIpNW1XjywcR9RB3RGdg
Ec8pm7ElohtMMwpfbzBSWoIy3Qg5aQ864PBMgW96XK0IfxdCOm9riM4EBuMSEaBh+7kgNclSpBDi
QDMN8FY9Lcfjl/RCgT0v7NOVQykDPwuNNEb4w4whyujGAmwcVINwyzuESaPygnVh9SO98Gbk3+Zg
R+PR+EmQ1Jiig+zX1cvJyXYjlNMz70sFpKpO0DR7v7/UzqwjAXHGj0c+8qQ+Kyl3wXl2Y7ZCtT1d
GbKyEp7Z8h3IE2HkPwCKbTMqFQDWWIjX9DD/7aSIEziMxOag8cpFiUE49NV+FZNPhMSfqvk0t8sc
ssbeu1zj5V8prsBeOyiwOKTXQyLLqKsKOCcrjBnFEdRV+8c14aP0Sji+pwA/BFARQn3CKSqPF366
CeMJq2YPHT0SmRTBZW/agudh6lbLS1EPDPwkJiaRhHCFIdFAP4jsoSpblDmVsqIkkuEcVPFrdBe0
khwPs62actsRFND9olS8YQMmRjZ5gDDnQxU5Vt7jh4JTxk3FKDbdLrzaeZc/D3x4U/b0ACBMgz9d
BWC3HKkhEcUU487QPcVXCjNGnkkv/GpwlizyHPoIT2iCH7Udp2gjwxzJxCifYYzjjBPfQis+B+X7
VcXUzVLUqfasm0TXXS3XOCJbYz4MyP975UXcLat2AwjXAP3gHB6jHMQd9kkPzo3WdoZ9m1lvNF1c
xuLr2pYW/hslbzc28uUIB3ZljjXsaiCoVzo4EcagmXLVr85+28gVxS5IJl14prw3MaP5rCvd3D/5
rq94oB5tv7NZpenYip+suqH977zYSHPqPf/YGzM9yA5fc0xk6DA7iJCAH3MBZ+1JuKJyG8fyCZG7
qicNoJzmZ0lPuJz2MOFh+rLOsMN0AjL3yjKeqVR2nn+qz8CHMmVaksRSmtIHo1Jv+tx7LZeREjUi
H1nHk4TRVSmXWZVRtiOBVcIt1n4UvOrxhfU+SUxzYNeT4l5ysgNuseFQPF/Yo86nO3LASLnj2a14
Vx7a7uDGLF4Y7k5+7RGTAUyggixyiZSikug0M2A9wuKGPeWIv6YdW1Tj67F59l48StYVG8/myu1O
NAjohU7yA/hiixsKu+jQuOw1whhyHM6hs/GFqRbc1VO6aA3LnrOugCnKurrTt5jcJro6WyT7zn+h
ayqDQANtlFa/1Uos0HAIdGQrllHeS5XNBZgVknEcMKbXgGC7LHsppmWRLem8ba+TCe+c3iGkc/j6
OKJJ+EbqFMRsWZCBDqFmzH/dASE2CA5JeER2lwmG0NAvRBacymvmhFw73KczPY7QR/PcL24Y9x1w
UwAcSwBo2ZdJ9oXREaMRK26JBiRj03cFvOmOC+ZO4r+YqoYVAejdJCePfSfWWcXYbNVu2IJ6qu1J
zqZEil1GSae0e1SrFRBXHG/Eri/UnTDKDXjZLjY5yoxuGdRRNlO8LnCPaObCdx3f8dKaAwxaykfM
mxNyZlb4Hfn6ScAVc94iQZ10GNQkFk6SEjXmeF5X4y2jTcYBxNIlh0xZppTjWMuKa9EgqEOzxYcd
di6EJxsk9kwCGN6q/rouHK/XMVRwHPnvS1UJ7OBuLABirFzi69Qh85z3Y6PFfKFzbEPrSJ6tQkCJ
pyPbXpmce64rPccRGnCs96IjrPUaCGTc7NdLdw0LYY0//XoKOgg150IpJoz5CmFIpKGwZcrueuAW
Hm7ux8gtn55aANwzeAAn4HxIPDfg5mFKAzPZKlUZ3ybxPfQOKKx/iEZCQ/GaGHkXQ8+NhjUJ3jX8
0ZOa1Zxk2W4WyVQ1i4RNArpiQxp2yF0D/CsAnnFVmCLqr/DGWjken36GBWwHsYwPvNpzb9qx75od
TwqPfnen7QoWrjZhcOOt4i3ASMHT6uNLs2BVITwpaWhcemnpm1+RV42auG64Xg9xNwXXm+pzDPVs
q5EAZ8vrrjSjq9YAyb2/IV8xvqqHqcMezT5sdE0IFZjWxCELomevITbgZGzesJlGwylCchlokQ30
31OGGRUadYVkrq0c1dIZaWziFzB3I4eNhQfaPO+nNeSO1mtT1lk0LegjDYDciGjTzXfo/bRf4Gg7
+wnOSlr4hvPOlpLT22phxPd60XJ0xg5CdCgIFTGQuB8IOGqDAID7x4GC4RZiZVApgai+p5Rv1Drp
dtRh3lDqFjH0o4HMgi5Bda0SNYDKmT0BUbQNyCdOw0DCm0rdwk1XXS3fNKtl620EcmCwe0Yl4/5e
5Mtttao+g0MxhQND3oB6sQcUVeUf8YySMAL8pSQNtZXWVA+odW4vEZe3/kRuD4wGewrfnorpBxya
+VN+C7l0zNNT9MFdqt6umjihkllSc673G5cL1NNfn+r6bTLKuURHPMZd7zCa0XbCH/qu2VbaJ2BZ
6zHJ2SrhtQ44TS8Z7qTmdgPYuDxJNB4KTBVXO9tf8JXUOZXhpcy7FCJddj3OqnH0oTPEKvgatcHQ
9IvMAveHA7a93mP2Dxr2jxu6hnFuTlXe2ZZcy71BQ3C2O2fLkdv/WBf6ha/wYuBx19twIwch3MC2
GZHANANcDENy8MLMZkQWE87X08TDjrJwxSfLw4vOOHI/q0hol3DZLjrysItePZWPPaEdioK026pa
b1b7azPb5NYWRU0DIai2hipA0Y4y1BBYR1JtEHM7MwRjxsfTZssscnEFd5k/TD1Frk5eXpY61boe
75i6Lq7uYvPRAWT0LlfWUdcpHTbO0eim855Pm4LRYqQ5Qr6kfAAeVhZe59pBQlTjsOnzIHY7RxKJ
PXTWP/YkdU2UR/pKpQZzdrwfqIfgojNwAY3XzrTUs3zdOLSqyCPfTMoBkziIfaJ7fHJEliHvg9cd
6ck54kC5CZP8PFKf90FrqWrU9tYXgPP6oXq9PRqFtBOllSqldj3Luu7OCQvqUd9gWlzDV+oSA08Q
xliXLhQCjQ9l0Q1QuHjakwOHyxW60Z9jEA1tPGUHOERA01X6Lr/fR3YhGF15BE04Y1/eFDSF74nm
4EZPTshfHHljgICStIgufAJl0xoY63qRmXsN8h0ir8M3k6TbrmyFEnBxV3HkFpq5d5DLg1A/q1Vz
F32OzVuKJkhUSDVnzkQumCb43DdV0jNnUVYXRoogxfmmvAMbO1dFSUGo8vEMQpEqQa3520BRnUDA
IwkCApRaSUctt0789p5TJ6IjTQhEAcvgOEtwH64d4Klieik9bjFvOWf8Mqjo8p5ZF05uHzsaYRYm
FBV0xiBYXYyE29yngPRUFjHl4ZCwvG/Gy3qLv9Iqow2kGAJQufxxhw+rbSrIB7ZRKcASFctnpubx
oZpDTLVAlKTtEufjinYr5hzkHS5BwCqe0OxMFKTDs2JPEp8RLxkd7jnLPSV5EvARpBKFr1ZwByt5
nGisXefVG4c4HmjmSnErZlaCWEMbYwZI8kr+IqLKkcVwAvwgw5B5891PJJeW7NjW0YwOBjABTo+b
SG11TkKW9LSmd+ndU3OyS/4+vcOqLzsjAW7t7j2rJ/XXT9JBFNA7KY9ckfzBgYf5pOuIUBRykBcr
NaViXbETqLDfMc9FhnKetwpAfCDZGclr2p0OPBO/QeVNID94TWKiNK9XEKTobDtpAJkv55B9bizt
RE4xuokUOy1chOdezPc6KbGRJ/DwD4l9DiVeJTm2mWX1roOY2Rjahmev9aRiI9mMMbjEXLFyuQJl
x3yrug52mvK1Zvd38/uRTURvz1iXGR6WdLNBUxNf7BRBZ4a5xbCRbQU67pryuS+bqs0EK1bVgb6R
p8vaLMXnaush58yvQbmBySHm5mZTA9fe1/IpdARNC8pdu2U1DsUdw5ePRaQBOM91a54ruUqZO9wi
hpYNPf/CvekQA4+GWYOGh+93W22vqwIDaEC9UMaYzZi0mXNepW0eFKghhzfVqfRhTpUUk4mpUBk8
sZPp+OZesRwsmysUtE+fBAEJ8uoXU42A5PUlmAxXWecHqclLRV53z4sKeqnXWWdmzaPoYQI0qysB
Z7w50hBSHQavh2FieVYovbSSilPMEW3ZIR30y1mDkO2QvN8IqSO3uWijg+QvavatZ3930LyzKdrC
fVhtcRQLf98ZwiKLC5xyLWnrUro1iB63gW7RPgwMr13WS2+xyXHsgP6B1pCHruISikTST67wASKu
YEiiOkgCKMNgk4fUt2z15+bPchDzqKJbggSs9dU9maRp0+Bv7y5tIW4HwvA98EtygPbAHtqzCcVt
NdtltZ1RrVSf6oPPjzu+c9ZsZwQljVeAzaoggFNT9C/3pU3m+ntcB606ktk2bqVH6uv4Yqy/imQZ
KRRcXwBpbquz/K2nIu2VihPlUS6xT8ojuW058GazqY/9tmnIorSypSRbRjJGTTDgwfxexFV2Eu+T
hLGAIWVCXXvGavjC/ItwDxiUFEYNIV2rslyENK4+JzuML0kzNs5UsZgMrOPNqHiPUOlO+sWzEErd
lhWm7pcPvNlsH82Zme9XoqikygLQmRR2O1zgURfOA+1pnDOeah4J+bXZXkKoIc2WXkMWO07SRJz0
bMa7yKyeirEAgO49OC3ebRvlSBDvCWvEOm4zpdXAKkvBobXpDtx0FktPQpYzAcfjKNrMyEEscfFf
D7ksEMR+sfsy5W/l7+O+tjRUfozwouC68JJI3/lpbFZD+3lbcrqas/Oe6NXxzbydSQbgSZfT0qG4
1PQi++CjuPnWTSaNkdGRRKUrs+d2ie9RnKrRTm+2S6tTOOqtECOmuSqqLzV5Aeq+oI0YRHSEJxGt
j9nUiQo4/FWZQ7EKhuheIiA16/Da8aBLeXF2RYyQ4n9s/nhvDc7LzkWgWSjqUda1DpYijvnGmfRr
KZIOYiADdGp2epVANv08X79mwMPhyKutLFNKBJQGAnfeSN0VZ2g7jFT8QLQBZvM9NXe8t7kUatyY
XHV68rV2IFrBFxQPQ7PbxOgTbHSXXJTUvqkj36mIo1xGOhgfvckhJk9MToiYr35r/LPu0DJXvrSJ
dNmPHR2ixN1VZ+jG3UY4MVF+w6hCnbQNPQuzfPLH9R/X4KzVAnIQQRaB5XC9K8oSCtBb6UpMqzcU
tcesLU0FsFMyFbN4LqwLOqUKHNmbQVKpX9knkxDkqG5FUGy23gBDka9nMWJOpb2eUuaimfWwNn2s
QSFEq+3S1NYOkFJp+uudDqehpIhRJkTMmeuhxEEqZ8j092U3m+UJjHD8Ism2peuCp7mWx+HIw8PC
VyhZgJzyCByR4HtFHt7vms3LHSxG8lr0bFPdF+DD1gpV+v5aWa8jiDu5pWtoTjEmQyw59OYkgWUm
85HKhIlJVP+tNp3P6itJghNlgUL04gIHcXEx7gqbfml432q+NEwpBEYSsBspY8Hasq6cM9pjO46u
uuDrVcW6WdMT8JanqGf0MsTVrhCxwPwxjjZb9wngLoHjmu+vaSFgKA4J+ON5jZ2mDBuR7rk30MU3
+o+skcapOshFQgUOpvhXy5wGwjtxnam4mTgErMP/AEtTt0wh/oFMgF9E8zuuoM7j5LspU8ZWL5T5
mICenpBCBR7hBTjRJ+LNE5BCF87YSiRjO0g6tliobthZT6C+eFQP8HWZptxesBJZTFOJ/PQL2J64
TgVprZYYoL2Mw/5cLOXZ+YNBj4NKnAAhRVU0FDqbRZFQIcEmvXlQb4Lm0mYJ2980qVgXPMwHcQrA
pYOkJqxtboEKWrQRcZTafJdCOWF/YQ+b7GAaQIUNlfIJALA4TvulSroJFUgGL6LY0xqaDSd8wNLC
obU+cCYkfUeQaloebcAUi1y99ebE+b7dxRkaA+W2dwR6wJm8MLtuILME2BUSxhlO0exTdT/CvOpe
XLA/fZ7h9O4IvCtlK/lFL0BYRyiyBADA9CrT2+2967SagFTgdIgiFmDu+xspwYT533cq2mU+9Vxu
twJUmZxAEES9cUzd10ktHVR4UNOvcf+3Fdoiiq/Ml/HBTvNj/mUd9vnE2oodoCvoBFx88OreV7Pb
3FOsoRtE2gtTI+Lcgj+14cTABIyIkYKp5RBf19VdFrpDqg0SEK++LKYe9RhEKVLlGpj0AMQhXZSd
J6NLpkCPUwQ6NknXliCNEZMLMyUeK5cQiyZu6Obc7wP4zZMs7J61vWOOyrtm+8k6D+TQjdxUC6vb
BvXMW05oWS3H4R6O2ujdov7QMTGFTTA39l2AY10cfvULNWO9RmthidxfY9TrGmYx7HN5WA6yi/9A
8fYhzqIeu5KUuWzwfkcm5ggzkeC5NZ0ZQRIMyLE+Qy/vsjwAjZiuV+idX7NPPX0yciz0QWGxSSDI
CbMoCpIU/CBGGH4BGsIgBoXSYZkeJJTivEtWVQBewG2z+DTbgLIJU6a6IGQnlgjCyv72Ei79K8UO
oN/kfgPeSFCRelPA3be+L10QGrubhJ7VlH0zt6+9W4gVH/ZlNF2Ej4dtT3FlKViHbNo5PM9H8XOb
BIcLYIO2RawteYwFBQi0tRty60OPPule526XcYytaQSBhpCd2oyBsAP8j2l3/P2LH579+PsP557c
CA3aRkrWuaVEAk5l6sQCJayRkzK6MxDNg5xe4839sDU859Y5lJ+om0Mi1esWpdYgRN1cTSuFN+FF
UxEvLjePLolqn7stsPvL2Sz3joBXn/5zrD6RdVLyVKQm8mQtNwpKjuGG4aSvbwaWoKtO/MKNKln3
19P+QxSD8vrDGgSxCAHcVSFKTfP3dn4HP13dJYDxMlUBW076YGFe65kUGXYS5MSXM/vZbDaUQ4Kp
2WhcZuSqadij8qc7vK5IdIpIOS6DP3OzOjlVn/kHofsbU25gburVdbM1e/sWDye4OyJOyJpzl3ms
mcUYJ68UQxrrdnDC2NLKb18UAdPpN5nk+S0pFJrOLyXtbKU5UwcycMyZKCQYU2S/uEE/xCa7NUzb
LeBjOrqaA3gjFc0HJ+yigW2Ijq6lo59y0ZjIXjJXUTtzYPYDEa0cgqW5lObr66r4xjmPzgK6H9V1
ZmEswagdC24E6UXQXoG0BpVwJBnwH3oRLCcBZQoFOgZ+ppHhCgolZN0z+A66Be99dZuaptmcQGlo
ukYU2DbyB4lwpvEMh5/W1+uGvbr0t7bjVpNvp/y7aTzTcJLg0NDyZf+UfRsdEXw1sP45s2WzVugV
EW4xFSLBgT2BZi0yAvQbPSQR3odGYKr6Y8xIrepFpXdWx3jdhiiTHQg9iewk9rKEwRCSEy0NiwlI
FNV2vF0pRQGGHZNXNNI1wncHt2lPAajkJzVzUm3K0qfW52tvEMHcQAnx/NbFgjmEq8AN52vdC1dQ
1tCuJzMGPStIiKQzbz4te2Ak0U0NSH0oeQF5w0NNoTJ7wo0N5VkGyB94AlclsIBAujjXKsnNEMiE
gaX36Hz9h4o6y37ZG7aun/AkkAl8jUmwJHacZmLdoH1Ub3JoTH+tZktOYjDwCZRdehI4/GXEQnA6
MBfAr9Si0IpsxR9OOzBsEVvBDHWOo+E0JoMAg0cIpMPgqUNnZu6daJyD7iLrSvhmsY4Jv+sVPV3l
/GssSouF6Qruj7LD2RfpLo4VRe70MEMaYt5z/1WTaR1rgnSwP2J93lO4JcU+kf+ecnR4I9AfTRfu
1D4EowXwFFuJsYetl5a0vXuvOx7kZ61rxxoj5qY/veSGbd/+sZfABt9KIi1c/8kRpBkmXVT1/Z3u
VrAlludwlfrWoh9h2u9kafS39AfdmzmheOgl6vGnTG5J7eD+awW245mfSPwWAojrjKry1crXNTrh
m2gukkjCagAwdUrMKLTZ2RH5orO8yj8lWBU4+1pk8XREC7muLLj64IgATmsg9/fJiamOkOZ3lQXb
AspmWNZlc5vRtd1cOalJ+2Sh2nDNwTAKtNGaloXbgpAbgHi5rmyNqhI3k21DCtmqhavrlpMXABe/
2TaX88vVfcp84aGxovtXa9Hzk/hhoGlvfUh6Z1r4hWMej3TP1azndJp9MxFPcD82LAAiSHW+jF1O
dc1P8Fog3cpDqh1lKmPsgTaeYhuof/jZTfAfi1XgKUkOLnCN0RH9ApohyvZkXad5qUB1oCTspGd1
fRX7JMN3ZcyR8qmGt06dEzkzR1TAKyJWY+J+7S5Ff0Z4h5rMpTqMmMBTzr13hrl6b3Q5p/FUCt4o
P7PzlbtsjIj6EoCYtvuNugydOrVbiXuCWP8W0olCSAh7i8LkBXizQLfMK7MXPmX1LWYJiKDkT1zM
PGntEcaGjQaRs8kg5djHcKqADUDeGkbcxYJD/HM4yTA/OClThrT9zUPK1cxPpQXznB1dAMF7NPiL
vQsU9DYhbWjQVcDVkHNv+4GsggLldvgb/ubyMMf51OdJD0UNGhGkF1UpTvP9+tMadBqkm/CuvDXr
pzhtNR6xn37/8b/9u7/7u839GPxTKFD6p1cfX/6Hv/s72MuXpoH16RK8dlqziquKokrI6QUAnyhv
CSACtONB8bzM3jXr9X329mq+XreLm9t6uRtlv21W4Gryu231qVplp6fZq5cfMuDi1rDpYZfPZpBb
B+JkAHUz/2b8dLysPj/NB+YNmDHg6dnw5bp+jj0cjrLhW+gIDnp4Phg8f/Pq1YvXH57/9tm791DD
yf+ai/+OKxhBCiccc3x8QT8frP18fPCj0M8EKRIm3fZeWA/LFFRfAmx2Nmt3PWDM6H7pgfFRz1Rb
Xz8Z2dotJN17ClL6A+ptkaaYB91zJFkbU2iHFgApBXyk4ibcmKBfjfX696vUYDocxa+Kj9m52M8F
iycPK0NTD6snOf+rqcTstN3UTGN/M1ILt4FVycf8r6ub/9VewZjL3pARO2nAzva22NIitGe22XOC
CtLLYIh2vPz8ZZCkUuoLh6JDCNT8+/07evI9Ll0HQFH0e8G9GFH8On4eNEM3u78nXJxc4DWtXU66
qqENfkau4rLFLe04uLvpyAA2YdKBjs8yOCIjS0TgJZ+rdV1prwFD2BHeMA2HayqA68Kd0SMMu7vm
U7V2zvFIiNGjtrgKsYRTrk/4YgxxrhrKMhb9ks3AUAhWkwCQEVIrgu4iLX0Kzkz2ogLy0iieQDXt
RqElpGy4oLPC/kRMuOz5zmgx6hL5lUgbw3VjP7wxl5q5lBixcJjKlcfjOfO6dh5TazGhdCo0wz6v
/VlJy97JASz3ZhEgJbWt7NF2+MidsrIjkagiLvjj3F+kbi2AHde6o6Kf1XmsEntOq310t89kCcj5
zp1haopOcPr2Jl5JcQPqgvSublcpbX5XKdJfj4qzm8HZeYIYexpovdXTIN9h5RJoSLTIP5AIPwv/
ka6HUZpgqE76J8omFfUjoVgwperWSdJIbUxQx6GFespUV7hcIiqyrzsHw0AfsPeq283u3i4OfDxM
7TlZuyhRzzGT4NRI0SSw8D/vn4UHLAjPAnXpAfOwX4uDFJNY3bPElCCgv7hMdmiFocyMdgD+5P0K
v89On07Ok4jz8k33Qv/sMXS2Bx3ryNlMXR4+wlRuQ4hgsl/wvj6wHXC0mAnh6wzvy1F52MPIHWZH
ZOyR1lL35Wq+/pT58OEY8rFobkH/ZQlCCDRCWQIUWFRM5nszCfA+g5ed4XUjn9SdRMedMwycfXMO
cu7wbBhWhelIpRMR4sQCiKWW8PqzIRD89wK8LtKZEaQ7sFggd5/nk86crlDuCSzrqNOiSFMggxiH
LZ4EhNhGg/OUjOu23QA221FRToo58oY7nBqh+EnSLpRPcpjAboIZaRGSLn3ufYI3TfW1t7+mU8n+
HtHYMXQB9ytwGOYkx1kvRCFC0eqM/I6dTKxeknR3BpLSZvA2QlrS9a7OUNKwegGPEUVJrvA+LMvw
kJnPepy/7aY2xb7OnqSk5vBOP0J+7gLB5dJFHy9XPiial3vTJ2gHKgS5Xtl+1sN40zGQ5oNAam4/
0JSoFg+L6UnpWPWGZGSrq+kQlI/rAYentTqzUqx+YDwurxek4+68WBZyjCVbzNmEkflkzUNiPfjp
9cf/mnWMqCn+6c3H//LvUb84QJ95VCCvm1O2ey3IgsAoZy/fgH68IT8vSBEAn7FCub03c9ZQGg4j
jrGkz6q+9ed626xhCDp11TEpzR6140etoR2gSMusJs3MKVQ/m5lfpEeezQ46TUdfctLKB3+3bBZH
fJZLphZIBGXWx6VEacvy4NcnNonV2A8M6PpfeVAxiYnRzh61555S8kED+9ljis6u2dqzOwjZ3IJT
ysaaCp5OaAONX7x+YzYt1flt6uGTf7RPX3x8+V6eurK//vH9fx5BlBlKGwvDyG/rz4ZsQuSJqejV
i+9f/vgK/Gpu22y/xnw6NSD6UD90Rz58//IdVf/0m/Tjf/jH5PP/ZJ8+e/78xfsRWknX9+RPdVmB
WfRXg794h+XV/FOo9QXl/2r+pxqiZAgco3Xqb++AIojhpmkJaPftm/cvP/J5tK7u89bmBN8QtoeR
x6DIkA2SJcA7rlbkdCkOS61Ows29DQ712EueIokBXC6AGbbiLEN/Ce4NQs7pvTeYZ81neeqy8E3m
wZVhsRPQUIddEUptbcKrVoMsYhny3/Zk+taLBaN72bOQCu1dBSlvVYX0rS9X9IPXq8k7M98dCbth
+sBqftqGSPLFNbpARi3/kYxUL6AAJtDB54HQTB/iBO3uN1VBZLzg+kcZPRh10IA/Dx2ZHk4gIxXW
1+VWPWRKNJyYCwU4NyyNq/CXlGownBtJ3QQrkJhQ+86uDuJXV8sZXGsSeotxOV8R2IQNz/IhginE
3TmpY5SnSrNnrlFrQEZnpA2q3CCBmAcZfDA3fNfWoJD5oI/hzsA5SjFxXsAOFy7evKeVzDrubLWz
mEuHDFwcReLig1KIaRLVwF8x1UmE4fo965ohSrwebFI85SQ8+HSvUzCK0cC4f3+o18vmrk0N3qc7
r80JcGQumFD7MoXiBDi4W+KGzQ7hBkfAY90RbhjESzZApnlZYj+WbpoFP48Bk+oUF7vr1jf3Gf44
7xQe04TJn8FYOUrPTQ8gr9ejLaHYUDBeR6rxow4S/G8wqPTdRPdtOfjp7ce/Z8Z41VyPzf9/+t8/
/lf/I7HGEOWwyMyzawS84gM/N7LzPcaWLTlYYmuI3KLaPrYoGK0hLZDCagBBRGi0R0QYdJ5+9vbl
JDOc1P1llW32OwyhAK/dFtqByPr7Xyn1mXlouvx7fJV2BoeTNzX9h+6///D9mx8/pOnrsrrcXx9T
EKQOQ9OmfoAefIWHPL+pVqsGMhLeNdvVMveL8MeJUukhYe/5N7i1II80HY/HXXzl0cMIxqFklg3F
NQr/9apq2/l1ddAoaSRDM5Zly3swsEnKW4qswJ8RPgX7E+sryMiI4AjSIQb18d0KZRSB/brqANY/
w5OUT7g6r8NleaxvAxlrqDFAfBKrNI7A5ZF+y8ch5maLpZFuKrA+LUvYAPbkwKFgP8y2WptJvKU1
kehlPBoO1xJ93Idy2obsCjVyid7WpuZ6TQB36MzU7pbNfjdSUWlLQw4ouSQIr7vFOPsRDjRGGwB4
t+G6L++zt/dv70+fjJ8ECbN4y5jllF+EN244g+aOs2Qs9u2uudVQDjLjTy2dCPnhjv3Gv25R3cBa
ILj4fVZZLlr33RD1fOFdq3YqRYjJA8mPHSaK6NvaFGjkepc2JfhFLM7nzHuebtV+45c9Pjk6kwrZ
lUYIxtTo/mGwYywfJp4MZ8O0XvcI2cRu/mmgawi6BMYMShzbL41IfZFIIi885RS6dPH3XyW4XEaI
kpNooQM2wJpsIfOcBX0q2tJjbD3IGG/ZQP5wXwUTr2P4oYZOFaoDARDS7VdlmQQmSL+j56+wCx3k
vYucd51VyEYNSG99pDKqQ/vgY+yRrmRE0HEHOzJerKr5NkwzFxfbb5ZYNVbqddxbAUdnujKh2Bmw
ORBs8gj77XgQsNw10W6M9IHit01rZJJ9vcP8xrZKQ2Lv5qtPmLvXJqgAt1W/OsxHtPg0shEm7NvP
y+1isOK+zlOECF3kcXyG5BdXaDW6xiYwxypRDOtRpmbFj1NywaEQmmjncZR9M8pOnxxjS+rdLmfy
BEJ7zgcP4rIjT/f+jQl6gSEPeGgNDjN5r/ZP27l/Rnb2g51kPtHbiMDa4KFaRF80hiij7S3C+iDh
gWsqMLpauc017xtEkledue/BW4uu0OjeK30DYeJCtTSm51K11c6i2zKFQq22MXSxTN0jH+434lwM
TvyPLLYP5u7FqO0tfY6Mnq3SN03ZFQidPRBtwWEBWYzzxHomBHspZaYHL41O2V4NI40oCNgdNhkc
G7u4LxnCta6qU/Bwx0HaZgNpUBHsH2o9iAME0562c/KcxYdkiQmPQ+G8mmB/A/nhIpmbDMeB6juU
GNGs2LewakMsNHTXJ6hTiDcd41wC14+tfY2ptLE7MedkOuzdcQXHZeuz2nlKkzWOD3xq6+f7S91c
nfXpogPvDuUvfftizDlw4cHgZHCSPeeutOYvixK5qhIq9Oba7QmExSCAKNhNBW6nUvI694FF6lB/
igGT/X/lNr4jYBIQ7c45p7JHTAh9fOBzcEQdlhHY3wpTzXeybb5vXQ/TBlOQh7nisP5glzGMbzno
QNOjjwAnY7Vvb8KDrqrF90Xp/P93NwdWBqGmYB4oLhP71ZKkB1/3A3lKznfyA2L8zUFCd7Ca34PC
F02eAtN5uYfIN1A7hEk3GAIbKwXRHX8kZpGV7TCL8me4mLYRwOeU36GHj9+/1OTCBOHG1XKKfRjw
orcA3DX1RwFzO5wPEUX0zsGI+t7QMzejt14C0n/rjRncLAzBhg3m5YH5+FmbG1oLFujwnoYRk+Yp
uAj8zey2MFN1IPwBoTePugk9NPLi3buHNQLAf0ffJhx6cg+6x8MtYGAsls2W8+q2WTuFSMqHf1sD
usw9e+EGmVPlZVJdoD7F+f/9m9/MXr7+4U0QBeBKyc+//Y40bJGZwTENm/8pvOaR78RZNRMKvO0M
3mDewxevXrz7Tfbs9y/efciev3v5ITOrmf3h2bvXL1//BhL4vHz+IoNxZd+/+PWPv8mFD6WOUjXT
LIfRA7I6PoiDEUUVQKs4omIja/P0BsBvy/LomOOf3tngtBkkhgJ97U/vP/6jeWa1qYPN/bLexslq
712iWpdMdfDTh4//vfmawyXH7acab+Sffvz4/wwp4q3db7BmxOExrx9/AXBYFZcMRIyCOllu4K40
rXJLkZ/WNjBw+l/3CwF8Bypf1Hy5bNB0V1CYHa/G9bbZbwiluCVuBJ8UOcFkr5is4MOxq2N4errd
r7H/vtlzjkOZ5pDluJrttvsqB7Gr3U1z+SB3jl/xhXZTrTZYlBPpCqau2cP3aF2iVEAZ1VR6Q3Rp
W72ErSQhQIgRdX4sHXFbollhBkNaOXw3UGw3fDoD7cQa5kBcciFd5sTuUvrWEHUZoqmy9IOh1s2m
6LbAetHw6N7YbMbOLWKaffzBVFwto/MRtWw+5FuU+26WzVAwtErN0Ocq502Wu6nPYUPWVzBzy5rC
GHGPKp0IwkNb6zBgsZoq1QcKTz0nb2RMBztHmxVbMbPsBfyYM1j9BvkiwKEjBBldBVnYOSkRGRS+
7MbZiy9zsDtR70yHh3AZbFbzHcBvgTvFH/O7ev3t0z/mQ69HUJykFkq8fCXmyoZaxo8yqWgMjjT6
85vdbjN5/Ji3SLO9fgza8Hb3WM75+GZ3u6IPygdPPi6dm0r0+G3NMWL33/2aM8uhuCng63gUaEQy
Krs6qufzFu334r0KLSF2T7x+kDzAXy1VzZsN2QhX9wwiYY4jd5NSAaP7j3myYbw1dB8wPcczjr7C
rrL7Zi/B2bCz7uZgRGwEkDoezzh7eWUWyrZdL3Rt1isBFLawpDLYUQYNAcoD6tbQaIKqLZjFEcH5
X6nWAKpbVyz5RE0HwR2w3lFyw8uKhwlwGzBlQOZkYr1Je2820cN2jqZniGyh3bdFkKwXuwK3zBT/
axkeJhGFynIyDuOSUQWKkACU3Yj2BkCjUBI4UhQ6QliQEE/ACPiA92aeqxrpJsO6YAlRmVoR/gAB
67lLzk/Ty1vI4hH02rxJ2yKjpC/LwRd/nIpUiiHUnBMmO8220wxK0CupuGNE9yFwkwMxx/9M/g27
e9sIZeDr06YjJoXVrQUBtWTjVV2+bJpVp1EIXtLX1GzJU7pu1n+qtg1OplThKr2btwhM31FpLLgM
zYEbql4xsL0OkzMlwvA4ndy1GNKjYSS00PPefIqqX0oNYVoU+qi71sLZPJgzwPNN429SzuNd8BZd
DkgJmYvQ5fvdi9Sw5JsziBJ5f7/ezb90ufBQTP9ZnuXZV5n+cNxcXYFa+uvslygs/p/56Dz1tfA2
uWpnIqtrugwP82N8cKgn9iCP4Uad72aWTs+AlBdf2S5OngYONzpNU06+lxXzDYayPNoCydoSXoUR
+Abd/r3mfxB21VPkUdv98pE+itzbDWQZMjWyAwOIR92ewmFqKO0/ydkWw72JqJrDpkVvQTB138NP
dEcfEksxnCjCQY/+krRRYgGd8TTa90uxpgkyOPXJCDbh5u8SpuJaEGqWK4pMtks1B+mzKfDzRMGC
hrQDl8xfGYdv28/RpaQjftsG2Maed2IIg9W2QQy9NarjvQGJEf4ZdIC4eUQLtpNOVwDuV2T6KLth
0xQOO0KSIyMX7olRRrUv0872/cBsMo0IlE0XbV72h1M6Gxc4hzE7aFgl9kbNEHFfchbH6Z8TR8eR
AGDjBJ1RuE9mQ95/eAdah/xojPn8DvxoyE4BV6ER61pg4ywj3I7z8vDU410LM1x2rXNf/Gy8CyPH
vgdvLfjfpZmVT0dQ6J62AxAovvKp7HCUhaTMBXdBcT+mq4Pj4ZNEIjDlHdef+pUDczpfz4m990kF
vPKZCtwSw1QauQZxoFa9Bj/L3Wy2SVsfe97kR8yvlLWbapLZFIOylhFxhA4OBgPJfgzi3diQbPQA
0HKBkarwX4IoQ7BE6gHQARTapz7Py7CCQxKZhxY2SsqPheVRSjKnxSpssWA5BCwfaA7SIJICOhon
ZRHLxkATZvjEMKcwIMZ81MPc3ONVgl7s9FsNNVGDKsNJGDtameisAopiduuH9OCC4epdZh8mJlRv
RFuMdu0etizCnkwGHWSQJK/87PWbD+9+fH2Om8mrJliX1Ha5BddglFuL2ezWHLSaVMki+sBfFgYY
kyOgCIf0FjQw8xa5IIRKJ0R1c4m7uNh2v1hUrL9S5jVe/bjg0KMOGxRNVK/Gkp3KO8em4BjpN8TN
QMF8EufmsZl0toa0g7BNjsg3zR1PSVatIWqkzb5ECRqxgXkri813H51ejGGj5Y9GU0a1NPvdokH5
ML9CcTSPQ/Y3eicWMB7gh0EosEDGuNnVCwgEUByE3h9K1i/Te1cly8RFse86aaYU6JTI6qss7PfP
6G+c5VQdS9Nr1EcVToUcwUkds38Ora/XbdKAAUJaz8KiCkevbGJ1D/ftEHWxeBybMTfYO12dVCyi
UlbypxQYh8hXYrknXcyPqktrB+Ip7mVze09Rx50r8/4whveYhbWeTjiKPjL0wBEckYVsM0jVrXZy
920wOFCp+nNwIjkvLu8zw7/fAtoX6zq34G97DVK3U/J61ww+nKHZxlCZfVvQE3fdujww8BxCKLj7
uX/+6X281zVj9YXnEuweuTV++A5pXA+V7KgGLgCq5qP5T5F/fPvs/Xvz68/Dewj0uDPCNtzMfymD
uUGdcTA/QuQeU6J4PTlSdtbubw1fd1+EH/MM7IDDD99pIrDbjunx4ma+VYLoiUYAkNnJeAoz/lNG
q9FfvKph4YgjJ53HN2VQlPpIVnc2Z/z25esPE0zTOzyFmAVSwoKgVoEm3lzCWR5XIlYZmo6Mro5L
8CTADZSX8Se+bMNX0EAgLVoHlYW4LjdzyuvQMV9wiqEIHOAvweltDacw4ymMFooAONrApdLW9TFZ
F837Q+qCVbz6IVUZgaV21QU+TNhxc6PnPzx7+XsIju9qoH2fbIA2zUNH/uJndZYjZ83n7969eec6
y4gjrVbajme7u3FbbYp8CgfWNLCl7ZLpbZSXETAaDDaoTdXoYCjY+++YjsNJGWWk2JS09nTI4vOr
jhX8sgMMCRN0F7iFep2laNamaREtazMGtN16GQEttTZ7LnYre1RsQIVY6nEd2tbU5JdjxmJpsB3Q
l54RfXnwkNi4OPVuuu5B5x9lu4MXLYw8zh3E3F4S4slVlFlZnU1M3gQeOMs8gVTq0AQyPXYTSA/S
Exi++9tPINx9AExCeBq4efwZ0PrGUMvoSdXKhxoLkaiBn2u5T5wr7EsHHZkCJo+Kn0Hj54PeyIET
5Tpjn6F/cvbdd86TDC5/rPP0tm4B6BijdH09Dfy1IOdDyclmWLsN+OnRLIBX4TSH7ikq1NFrU8kX
5tsKqdjT1vKgvzAq/RVozZZInduCabS4Jvs5vsCmToBEIQdFSPn4frxq1tcg0YbuvxzyUQJR/9bM
CpYOwHiX4PfM6roC8YvPzkvZR/gBlV35lzKlBP6MORUgNUOcBmHlEBBNN6hoOQKrlW1fgKY0sT5w
Yx3irYRNmvo80JCfDx2wMejFw0k9cfyT2faG/3EfhuyTzzrFLE7+yPFsyAeNrLr7dEuYKLcNJl4y
95zK/GkrgFmTvRGwUIEV9YocYqbpfeUFreGrQM9iR4L38QzvY9VxuZEDTToyqftbc3G2Pjgqkyq4
+RKtedR7jPFULbgRFPl7Xows78I0cDQQfpz9LwnISJ8Kvv/dy7fZ2aPleQaQ50tGF0pWXvSMxUz/
T//ieQTSP9X2pz98/A8X5BFYLOsWokBQphD1t+cnCJ+Aro0VV5hqkH6SkOE7CnLmyIS3oHMhBDqz
sH9tK+tPCBYZ8R9kN6gBZVpYY9SZ5FrgP20j+0sjGKLqr9MREatxZi6pCanwjN7KLC1wf9P73zbN
p3fgOz7wy0BqJinznszAo+zFx5cfZm9+N1BpdvaXq3pBbjTFynmrvAFHIoXT15InDCCrgcLaUC3Y
XCCLzM2Bmi9h/veQC6qFzllXFa7h7AsuFGZ4WeElDrg+v0Bcn/PBX+WDublf1ZfdHpjr5nTXNKv2
tFmfwhY82hPTUCv4rlnDV4fdMYt8CZhtn8FzGr4Dl7m3zz78FgVeyN/QmPluKIDS9PL6Bm/H2yVi
uXH3j/LUPMk+AJLEbX19s0PTIcnbqDmDBBGwRc2Gx/XbzT9VLkCS4tjZV5C3yexqv1rZvAgeP5Es
0Q1xEhaHoIKWXILnl3j6Ra8pjsECDpo3i1wR4kRF4SMQV1fg85X/zzCL4MAExNVQj8DbldP+zmZc
gyT09n3F3uK7Z9tr+9rGrsibTo8ov0JlNqQ0LFMp4NnrbsyZ3VYLzKfE9cAjVYkuYSqhQ84fwLvx
bKOyf+nSRPpn9KfZb1wc/gt0ry2THYVDg7D19Z+4BVufeVy3N67CiPnSpb2kH8vnZm92zhwDFoO7
eBsHWA2RJA0FNpKKBXEeM3DyM/uIPSu4qo5CAJ08hEdRrFUyK0YXtIBzr5CKg9xqy2qVLc+4++cx
JIGbGcM+F1999WhbfueQ+Rjz025AvfDdGRPwlrudr+fXXrwmA5brt0BB9d9h9vbVqg3h1BHolbsQ
oriFe83tZdprsQ7eWTlsKcM/g+upxMAGTIotBv2yf5x70p88hp1ii0xSSUptSQ1H6oYX4F3BCtDs
w3olGK6ZOqNQVczIUSAVJtOG9j6b1bcjL9MpzgL0u18k0O9C/3bVyZHKwTFGeyasKl2bBXWkDM09
dgyqniIFu2Ynyq7EOZI4PvzxB95mM8TmugburpAvgqMIlVpqZ1gavUc2t9NElaMgdgjwaaY5U/9c
eUmEVCwFDmsnIt4YZIdJZaNLjbNuZaTogstj7cqR4X+7Xx+aJdUrgYNwlCFYcI6AomV3HWCSR88t
wJbOZ41Qb1v749Od5YDi5Ovmt+ys8Lwj8BGl+2tlM0Of8iifK+T9gyMKubZDOn2FR9a1S7o7abq3
t0G2M1DBg2HRcE6LOfxhnolnPaTdzcx8AuN0v9F46pxgzbB0d9tmfU0JeF3uwxF5SqkPMITlsUSw
UGxnSxBgIJ2AV3fLtepmDGd5d1NttYes6WoR6lHywL0GFp4yeZTtowInq2wP+FTZTUREXyS7Xg8x
d3UV3NxWLnAPQAGktkepF6KW4DeSIkjDkyDpcQxCcIWo6wPfpnAfSFqh+9zCOyRBoLHEJIa7X7UJ
CtCbmgXJ+mLVdgjYx3qcxenkYD44WQueJ8orXK9J9VZ04NOHZALTpuqFWPmXPI4aOBIZ9iK+7sSk
O+uZPU87ha4y0dL78GcAkW2RURE4Rs3gQWi/2jz4RsG9EvLMFANZCqnPvod6aOexbzk4yF5BVtji
STm+mgUsJuWM51pSWWHQcwo0hVQEGcxvYlVOvV6O7OS6jDNums/qyXmPEwGzp9Oe/AGkIijy189e
vXj17MPz3+YWpNZbg4T7QYGjGKnJGXGzzN72uB5Is89/++L57168k5bRAwirLY1Mdvpd3teNfocD
O7A3/W30NpFExPGmIvsarpYlAvIfd3z9zsXznoNyJNmr9IA9R94FoqDgaYdePdryVmNMTLX7PCDA
ZqOwbIMQnYNHiVOv9+3Tso8edO5PEIf0Xj/v8rSA917+BWT4w8lYkNw0Mp2cuNkoz/0vx4jktyzO
8oxtXWCo+lI6JZTrkoqq8FbBhi+QeryMrqn0TGtnW7rMfEQIZUBYofngyUiWNEEtDcs0SCs0oFfA
D6Q1Gr83b5/D2w51CLx/BQpK5DI7KuAC6Rrgr/+XvbdbciNJ0sX67phBsiOZTLo4MpMsO+twkclG
Jauqe7tn6zR6hk0Wu+sMh6SRxW32KdZiEkCiKocAEkQCrMLM9JjpUm8j0xvoKXSne72F/C9+MxIA
ObNre2Q7ttssZEbGj0eEh7uH++fj0v96NVvIC1xgs8WFV8puwpTtdJbF3cD4AC0tUSd5O/4ijZK3
t1+kIM6LSraev7T8xoNWGVAnoUK5iAZJS/4ar5e550wnZhC21azcx6oKNGbJn24BVTPjTwcKCPRD
36ZooupKG2URwcEtq6r1yqqBIKCi/GnxAhTUhJ7BNI7qwn9ZYN5XlQhDd6ux00EgRy/tuqu8BJob
XTVHZNSzidajMW6WZhIYqVp915IXqpknr3FPMspJld9eEVV2CUWvSI9Y4bXE1vT2OjLSLOO/3gjo
vH6kUrUqG5y6BFVYkL4hh+80THmUX3g/iuunFHEcgzDQZFnMKtBtVP4/XDYcYTpb4FYVVywjKNUF
vAl0jMvfgAozBcWhmNdrIFYBbCaWnWy1LCqmqkJHKTeUTQ9N7/joaB/wPOl6X/U2m71DRkOtsl9C
mQbTKCkkekkhsWeeeFcEoPnQPeA/PHwSvngK2dJA9EQbtsxJs8DoBgeCeU3zKcZUR/RgD6stvVE/
049ASTUrHI5WAkhNrBH2XCsK1R7iKQujNDlD9DyeNnIpwC+TRSveFDpEVNMx9CCMNcXvFLmsFQ8i
B6xKSW1RM5AN6fbDokDlMx+ruG5v5iyDHVvrSJHAHktdyl7XZJBQIJDudW65k0MJHJHcfTR8ye27
ZE4rMHFucXVUDPc5bZHLSdwyHQ5o2Hyp790+VMM/hPGEKb29iqn02oR3eC4O/5B5uaYb36MZr/1r
fNuGr8bfm9uLbkqdtYMilmQaeknD0nckOCCvUObdqdhMNfMvZdq+Z0chvzZ3CMZXGN7bGSdwoXob
h0FVrIltrOdw/NUeOwOpOx17pkENFYY+SaueWPoaQCu0zpUmLTFoau3bPWsGenKqNHb+0QGbqwpx
SUimTlmoboZD54uk+YlC8wWW3lafy6bhMFCAbDafbD999Ggt82OiqunJcFwTVii1rJc0mWr0vNC8
eSYykG6QzYvbBMbTh/9PW4lJFsBX9IDTVaQNG+BsoyiDMi+G7yCwXbxeTQ5/FYdNDDZVy3qImyFp
s2LV4Vm121J/tPrT14HxDdbzUjzGjNZ1aQZDoqMjtvJ35JRaX6V7rz+3uQZyKELZ4yRx5ZJeLeMw
qERREc4XhpJzXX3M5/KXznwZKr4QSDYpi/aCYRxwxFy1RxPzOlw0mc6qY7Z8aMe3oys5AA/60yab
cOtH3k9X64JjzRsmXCuWX2wmgsxKn/XlA7dSkK73rY9q68LuLeFMXVV3zQoxQSFM+eS6rUq9NXXT
UoXXJiwprIiFYZPQfpFedvkY6V65w5CxWkDrH0H4brbYdLeSfnW3+qvqh+/bGxAZTriiXDTDg34A
0A+9NMJgfuy/YfG9TkMQNHwXZRx81EhC4MiTyr5slTZUeafO2IZhxiaCsGCjMjhVEKJduBKzUqxP
za5mjQj0MFEP2SHFX0XclvhkQYXyl71e0VFZeqC8g90EkrX/tXISavhRnJ5qJwqkGFbjDMfF7XNe
39FoqfqMHfiyIQIvFFN252m475KriViK5EMSYRP50Zd/bZrUViOLYolXewPBQEgu764QzHBOx6kA
hKBJbHezfN3rt4sssJTIq774woWSRDu8A8dqzQctOd87Wy0LMrwYxy0q+xFztovgTnX/P6Y5N643
wJR+Ble/eol5nGtrHzkwFP5NF3+DO0J97ge+UWJxMSIrQuluyUe704sv1/RF27FzgLfRoolC0TnC
4+s0U+za7AjklmWZq/VOqQOD0saWK7pLVhB+4idIreGNN6G6Sm5J+pC7YEKJoB6qRt24+jK1em6o
wGPoNz/FLvPbpEk0fi6QBAYuC4U7DMvnUTo0hENOPCe9g845yNXh61FpqtQauwoQ0i4XV2HoMd2T
5P50Sw+PW/KpSOonatJN1qdFVVKv8Pow/bSRhIcginFoEFYhUJpro9Rq+0CdxB4ywhTjWvFVHLzM
kM9URISq+YCh7R7g6nuwAmKPq9t5QFPGwpfHVw3yerzg/hbJppyLizUN0yVM9/BQ+BuianWvgvPY
0ga1jyOyqGm35E6qHtLIbsIur/NB18pzr24CwYxctqX0cesRb/FHPKrThi7YcCK+U6JEELVlpKUq
4TCemUg+TdvawQ4KheU2gREzQlonadvSHG97z9YhVPFEyZBFVfn3S21CjaShr1s+8dbKMPSXf9Pm
9oH/2qef7JPmdZGzFUZ8s+cvCr2SXMOQJTRs5yGX/i0mPnfPNv246adG39omRdgUZDwvZosQlI9r
8m7eWxPXUgEhpqq+XLhacrbCf+6a5qw55xAFE9rgmBK1eOW+zwZAQqCbIhvvJtvy7vl9MZAqZr2F
UzFfmZOSwErHlULcBC3mJp9ODhmQ0+vMgcoVkVNio3WNNSODjmpCi11RVosIznC168aSzHO6oXmZ
b6y6ECsmSgg79ZYyX+YLWCUYIcB33vVKcFQxSAGTWDPaGiH7uqDJrHYMrHYDfoputGHmelFGsXCK
kJko9EU2rpz2fEm29Tvpa32zXuFRkFi5IdtvNpoDTMNhj8FNhf0L76tW4Z0xqBuyNgzaIvEeHRcE
7EA9zmTtHI0W+WgYWgxCcRF10H6sL+fjgBXXlhrrhhhB928iDweEYfS6Q+AU5eah2mzF83IgYIUt
gnKPCX6oGceNje8pTt/OEYGSW5fI2oAJUjVt5EBWDBp0ql1CuWkx4JiyaMIdGCEmbNjoIwVFDLnk
Cq7cNq1K3AniecS11E/guGEQ73LVSDby7hbvubebrE99K7B0WFkqf+mErC6W/EhZI+2zVnUncIdg
faaMGWiePYi9Va930E49GIV2TQ3rmtjKU6IMH8rk4eq7GtJ2XNhOhHTiD4YbopLyd6ZJalhvPOWP
S2WDWTGrlPYbcHLiD7Ltbk5631JhxyurUCIfKBxyTnFCUp2I1EvnWcw/cPBUwfm4/QgTeHzZffHz
xY/Pn2GEWffKhFrVxYLN506us0t3fvFajYJ3R7dj4D1ovv/AsbxWpb2o202vLM707vayCwWpNfi3
sU10hGP2gsasRrsTc5Kp0XeJ0ndo46jUipdLA212TtJ1VBnrFmxrDfIkJGvJK0v2PA7d6MQ8Dmuf
LE7aysEA7XLiT4gKMUgBsbVg4tF6CV8ioodrrrPmZ3JMuxDjVjNedGgfPUb/yNvYuw76lY3/cRL6
7mTndw2vDJSt+hR+mOF/vPgR2giaPTgrRK2AybGe/clJL5D3vKqLwWQMrNRJZ4AhBZSYIG7EtZDo
j21lt7nrBV4SSr/n+32cURtezycnjcdbaL38RFovP4rW7FIGHQYZHfYwe7nTdZjXefYngyFsK/hX
UoP323g9Wwy4Zt7HOsnRlpK0xQ2+RIPZa0+/RHn0kTNfz15mh7Dw7A1uVc/7XFz/Jott6OY7YHDY
PVTfKMpexYKcgqw/WTTgoF/zPeMZXSAGIKGpEnH3Re2E8TZWFaKLqPCUaqJXBfnlTBa91GWHi82w
VBytHoEksGIRyZd1MXZWeJuGh6Zv8WFivkR7CeUpD3FVYqqqJg+r26rM60wzDNCRdBoIhm7odeqj
V6JH2y2mlwCtqppLO/BzgiiKGGDN2ZxG1XKJypqcohip04TBHOVz7A+SOk4YcB5RBxFvoMcVp95N
r5kadKKbgxLDGVOkG6BAFtl1FuW4XEhjhRpDsGp+a40g515KqVDf51t0cBuBNpaxYF4gkLVJoW3G
v5NubmbGW0qrm8pdS5jKq6C8eh72KuXxojee9GivL7y340IBBPJAOBw1qdKVycU5evPxc2ZhaevC
9Akq8+euUqtLLaDqn7YscdyxgZT4T83rzXvL9D8pV3RMj0XXjCY3VrplmqnyBjRqHZi8wUjfmsxA
5R47yi49aZsfxPbkEnsTOD4cxaYlvzfGJ+B+wEzvyDGkbgzm69kQIyYHeLWqQjl1XYdxz0u6WyxU
7p+qQufFvu0f59vObGtX/17dhYlY9KIGsztoMaIpDEAyJM8nQAL0vlKXqnchCEDXfnbgzuoB62l1
7T6xOmua6WfdXqOjljn0zgtrUK/ceLk7jpW7cnZwyNBq+rDoagvp5dHVroOBD6JY2F6spt3yKFjk
t/OBszIYxb8nyYgGeKCjJHh8lB39zXamwyObDBHZGKMtieAdLbg3h9TfBtSiWkSeUM8OAbF8ayea
nH+o3pFvU6TyYeQLFPx6oXNYk8+O1gNBGb8HscBZwwrGjVvo6Z4psoY5JY0KZe8G1d2f/sQZJXbX
dAkNjNmd2WK15ClQJAL+8WV21JJvllJ5LDaLzQADFDAdCsI5dRmDt/v1V7T1dCKyWY753htWT2fm
sbLDr7+KhuWKRRAGENLIcmqheZnSYtBR4JiPgzUL4rga8LgqOLn2bbV8h/JJCXySZBSu5Neft7fl
IDZNlkUxrMctK3nvVnU15sqwupYErgGFFGc5I92VlRPXKQ0ozAkAuVl7HUmtffl3L8MoF2Vdwmsk
k3XFeRyshdawkmJpDWUEWhXI2WPUPvZIY4TJV/kL7drXIr8/Llrkd+U/f/7s4uzls4dPcRIOUXM7
5Ir5kMSL71GOadB4V5Ktk/ZtOJZzk5VVVueTgmIccTA9jQbSdHI2SZUDYSkepowNseA4LFunhoEJ
USgzW0oZPAcSpfaMkHOzXFhO1OorTzDdGtjYWpn6yqtsR4g5JX6T2DvMhgfql4XXos7SOiJwLwZz
Ujna6BDB+jInh+yu0XJ8oDomDywPEMxuq9DN1BV5wJPfGUq/9dbeXAiToVNd5TcoeXknWMCeUOF1
13F0BFpIzgS5+MUyC2Bn/dgS2j6pgz10Ypn79+PWNOVEH6ni9ga4CTDEFW43hZ+GZzz1EebNnplA
DLygiLp34DLkvr9WWkOTRMcl7HNK8KjyMXyugND14xBcqREI2XVCeU2QuIhrTvlWHEbTYtUFOe96
jnhwKxx8Z+/o5z1yVjVkVHVvoEamYlugR3Bc42MV14Jjxd97hlcpdCC1cCIKnFc/KFBRobcKokN8
ehq6q9ToA/BBw69gGkhG56ItuMHG1s4zO/7e8hRKOLsS0zbBtH4egkBM9DB6buiERAB/Fx3v6hVf
ls14jnM4uuaFveidnrXendnE1j2KpmlLzLHFZiRnp8tnuh+1jbsBPoN7bdeeS1U6E8ZFbvarGJt0
WfW2BJL2OLrhLprKkfW3Rc9qzGI7bE9Do9r8RKFD78djnCmLQz2M2h2yTpuhA+28pZy4+5ZyfcVq
3+5I1sCVBjeZRjsP5uDwaLW7hhAUtqFr8HvlYE197KmmevKJfWkIJGybYLU+cX82EAOclWEfgnzP
La+0pzD2o3+ke4J/cl/6R06L+VT1Gf/W/cYfehkF2vdv2dXa7Isbkqq2UVAvWFOyAWorRVX7piQ/
sbYLg1B5ZGwKPAwAcXrFW0J/beFdhapoBcdikbO9uQbgl5aiFThCi9zs94DF9rJioxUI5xfF3er8
uY26xbQaKESb5u3DiSe8CHEpNJULsI8uVwD0Ttg1dbpJ9YXEcRbSPnItqFIaCQYjoBMjs1vcHwnk
Q67dI9TIjaUlDZMmU0qODYDjlqiL4p39locELUHF6kh3wvmaQBSMxIFYPQyuK2QWAtuzKx+0KkaR
D3Mgm2su8EFz2x1dRd4GIqd1tJr53jM3b10JroGOX4cQrWSFNMIA1ci31+d+1FY1/6Gj3lwkC60S
YRkrGtyeh8ESJMJq1jZW3ZA2ermz13oPd9K8p7P5cfCmThJjINe6Q1FSOoo2UAom3A6jxMoC4mUL
0BB9k/6Nkb2ogz6qF/D2xXpF6Gnl3NNbhcz5ZKXDtidzbzgC46MI6MH4tIB7cC3E4vFfi15yvzlv
IZotYFHdl+UXxxZ2+EcPWlpqWWFtS0tB3KMX6DF5Vp80DGf+za0pKve3oUvofdes5mUWJSwyzIEz
M4XduGGLYTtS4l5gTdv2yLwiqtFQSXZoZu9ldDPueHNirR7L0RMAN/M2mvqmJXYY5ycu7vKRKCyn
n7S5tISoVqhqdevG5sblk30a5g+QTOXKfKDa+sjetgGciVLqTtYW1Ebqk5TeZxDBldCaQVcPGRX7
fYZsVq+SxXXhjwU8WxbomI+O05r13ltKGk32kkJzKU0zZgx48+a/++yzz/AKFeQKdbK///nN//IZ
JwwgdM4HzFUInB2VY5SB3v+XN/+DfEknIkthoDq+v3zzf/+3n33WzAiw2HQ63zPneKgKc6p1J8Bc
/q0zt1CnQ8griGWF0h1v/+iA77NWPfY9NxDuw42SC5cFztdyIR6L+HmJGRqqRc3m7oHOFm9y2Fl/
Gxh/wornwgg9GlmFmMs+w3fT6QYNGFExGxZjREeZF7fMmLDfRT3KSR+BJVIQwD3FleWCU4pxuUWk
89bXp9Hb+Z968J9fyEb0dv6XLIouFAh9tLqtqFYc4Rza4ky1BJw/x9QPY7uPNa8CZRSlCnNlsXIK
msQu7CFfR0n2AaSh1YDwStNexL+0vStJU+kXiI0rMqqUphYyceEzqw3KmVpzem0gpUbcVyBj6PaB
3/HpVYJQm29w6Y3LyaTOOrYkvsxvB4rz2xOHh2+3K25aSfftXEBwD2QSzMTwCYrZ93jlAMWZ3n9x
s73pli6PrkySmSnjLalXcGg7UunUCRP5UzfFfrkPfwk9/Es3JF4q/jDd5uLAHTk8vkIwxu5bGHn0
hUIL1YGccrQe80CgZQLgVPE59HtEIAzqUeOkbA4V8y94ow3EznDN2L+ASQMa7CLpo6i7T1pxLM6h
mV3PMRda0cQibRvH7fFeuy9Iq+PgW1XNURqKbJWXQGKkchf6fZ8apK/Tw2O86kf/GyQOUsxLZ9eg
2S8+zVQ0VKBgYMhu6KMzjOYrHsIllUAaEBFUR7eFK7V06S/d7SQC2hjSNGhihX1JCR34JdIxbmJW
EmU6O0pTdQ+LJHjOtF/u+U41wc+z4EfbEXyCdxocFFCTgz+jCJBLSAvAmgZegaO0uGvRhgLfbm03
/vZyuKzeAV9WgGaYfQht7/eO7sbftdq9pa8GVq8XlWM9gm08aWKp3E8IKNiHDU73oJyK8p1wReje
x89C0rSULpnBh6FfWptqNClQNujptQItEF0gE1ZQMAeTFn/Yty+Mz8tzeg4n9V3LDVGjVUfJ2U8W
dnuNBhfu+RgEoTH2OtVhLtsx/Z49P3t2sWUSgn074MC/ERzqoChVUTUarZcIq8m3bSyLLdFmTngG
JBuADBSoZ1QRvhtDZNco5aG4EH+7XM+/i7NOcLK3Lnqr9URHffVQWl9PxwMUnwOT13Lu2FvJ3HSN
C073xgnTUFCEQX4Xh67nqIYmw9CVK88x1RI6Loibi+3FE30XJV+avLkNjsXRVBxwpCRrlU1KU2PA
yGkx/uNPEWce+ADEQRcMBifcfBl3DF1Uri3WAwZ6J2BtOk2YVAn7xG84PCoY+Ukv+prkoqTVt+cP
0LFYwfe19QPEvF39sEZjPXWXDPW18/7tm//JKEwquR952S7fX725PCK1qdP5EVQlC4OCU+lhMeVU
LV9yDgNc5yDVduhTK5laz0qjBuoTEP6d4K3b1MJcEECwL9uynjH4G+GRdToU3TGARgcjTMxQa4vG
qJpWwIlztbFD/lscGnLazG+lU71xHXZ6q3N65fG7BstVFVCfXOtKs8OO4r2tGX3pLpoeejaVwLkJ
2EV5uGkqwYxUdW8CotK0h/noR+wkRTDZcOTg86ysRqtpctyT0tnF+fNHP/x0/uzVf+nFb4+OjuL7
v2L+cVNgUrHebTnm+EGqL1vPFyDYJFF8A/8TTPMojS5PT65sOUc+juhrK8GczupNL5I0MBPOpxjr
7Y25s590YaQKX5o4iJ48fPr0+4ePfmvg/aUtDMGwQu0oDO7R86evf/fsVbcX/epIjnjfexdjEND3
R0UAOD2OZvkG9fphdb2G/cBeGnU+xxSVOTogukGY2JFvo6+OTr01xB381ZFNZaGuS1RGGW1QGrOx
Yz/X1DDFAwxEl6dNDl0E9ZEmKoeODzgehXcelBPDKLtYi95Jhw28mK7rGyeIFU1yaFFsoIApd0Nt
XuXph0YIKObOAOWpeqBpYjBeuijqEWeIskIl6xFp0PCu8X1LkJhmXlmJvqYb22lUupV0RWK/7L69
Ox5e3qtn6C8OPFoMMmS2hnau0ij6oglNjLU0H3NdRzNQnHkNPXz26pzZD8WERTEoCMUq1imCmeRe
70g7nnc7/mgbLGfLMOGzYxmBu+aGfKg2jIRMZiJ+Yrub43NM5ojkPb7aJkpLza4hGj6nSAKMfU6Q
KqfRk+cvz354+fz1s8eDn348vzjrBdyi5mSyCicb/fK4lzq1vDx73As6Vy2LcUsVJ14VP7w8O3sW
6sj1sijmLZV8Garkz42OHUSbAo04LbV85dXy/dPXAZJgUNK0xa6bfPn3gTqaHYE6FuslTHBLLV/v
qEWIBCLwJm+jyTdeHa0zfHsDsklLJf+wbyW0m4KVmEh5tCWim7EsRGL/xGj8BhydGRezZ1zAev7c
tz9Dr2DY4Bc/64KvLh4Pnr++ePH6YvDjw2ePn55By4fHx877s5cvn7+0X584WYaFxRpu6uXKI2R2
+O6HYvVqNf6RfiZ+vdv2aXsNTs9TG8CYWFjN3zyC46+aFucg4iVcV5rdaptr3fEJlpjv/y46ujua
WAarV7o69Lgwhluut0d1pF6+HjTEIJ9Ex7ovT775+leeRo8MkePLsNTlKZXx7BjW4XTJdThBM/h8
a637j0APPiRkNGrVBy2evl45eqayfYh2OK4Gs3z5br1IyCtfH9S+uPPi5wFIPM9fvuqS9ap73G14
OugTYY/Pj5qfG7avhEWJ88CO9aIuH1ES3eEcWtFbJ10vvPabvzh7+bsu+Zt2x+vZsNv8AgWJVn2s
i/qYtAxV62w23Tm5BEoKCJGsfiKdyfW7H1inGKZMSoZTEJf7Xx7hFdq4DwcSnxN9OFeE2ffhdAib
QZCN94HrCy/uA/MmhtoH/stcsQ9cNPzt99TuV9DuS2j3K2j3B2r3K2j3Z273qy9bv4V2v4J2X3C7
X0G7j7Ddr6Ddn6jdr9raRW7YP8YLWBDi+9DYEMSWd/2/Rz9wUPlW/W+0cztKo2NCv8pXRaQdq1Qi
vTZTpyWHqm8kDbMJmW+i0oblUYOGRfW0WDIsny8RYD3Xr93mFvMdcOrMRMO7pnRLnUG9CtcR71az
ct2a3Z0TE3+IubT3ilhC7CcKkG7RSn5SSmtbEbytsCypmTdnHNqu+ERp1MGG1cvsIagcF9VPKLby
iJHoRe7Fm6juEOiI/AnSp3JgFf6hXoHG1gjmYd8NaR3/cd+gOXarEuV6GOY1s1RUaYJc1msX7xan
BHxxZDmGiRYmy1trXO761XqM0/Bp6Kj6mygsRj/x2Te+t+M9aLR2330Am3rU9HtWaQPf3TY2I71y
EvIqnrqPI9V6jrBuc+nXqWRG5Lxgjajzd7ecEqNZMTTpYCDqPvAH4WBKPZNMB9aQcCbt6MlCEQv+
Gt3khF6x0hxNL0D57dESGZleokFuZq9gd0l7yLDXVT7FKlSqU3xGl6OIMVdHU8xsCv8uqrouh1M7
/+l6PuZrf/RQHZdkY2Xf1WIu8Pzf9qNmu7vscaYFNIQUgoBRzrlreC0P7GI9m9tYCwJni84AXh1o
NOXxrFfio0D3KNFtvsFBw2FUTjYP5sWaPFf+WJDZ36skIUi/DS1HggvErrDfEtVNA0y9j+pKjDxA
SZhyNAMhfeFk+1CV46iYLVYbuZKXwbF3Sss8Htq3uihx4nJpvxM60ElsT6IvohO6rgReNKXbSpSH
8fOWGRLqZ0WGf5nvZamm95/tXYn6X6MC+xsKWtK1HUYnLZXQV0n7Z2n04EGUuE25s/Is+isrQBLS
lqKX0f3oWcNhQaKyTSg2fiPbm8SWrS4PZt62EKxlpjyqwljsOkIdNeNIWr+zQTMpzQbw5bpcrdnF
xuypZVXNiKmTRxHsJl27QGsuyaDr1oYRR+VoPYVSvNuBl9QlM5Z8xfZJU9G0fFdE8SCKnZs2YGRw
AExRCqgXubkoZXs4klI6UKA4P9EsrhF4RTzrC3vw2g7pLvKmlwqa4PxvOk1PcnGe9fDLBNuEUerq
6xC7h8enweA4YwulLxO6HeEjOJQOiq/6KPEJ/JkG85YbeQYlDf9oplrotdz8MzIgHf+m83vIwnYl
8F/Xzo0UwXSBiZHUeuYLJyxrrnHA+91ug3b0vRDY7xvXTUlTcVsmfjpF/owdrgyOSWE1aU1nGM6o
RUQL5XTWmEPLgho+lKNldYNh70oEjYN9tJdV2+hcL0xngMtuu4iqd4Ylqlok02Swfa/LyeSETuq+
V92hVZ1FLvPBd9FRAHiS+xnDvr9vympFnNQFMamIMu7q5pa/zL+SrWYbyBreB/vvw482zP2VBjrL
uT5fT1eD4TXnE7ONdUdPjhrlxaJqPgt5uby7JceuLpoOYB/7aKh7W1a31b0sPqHql2ePw/7musew
jT++WjSWb6+XTEQfXzFZ3bfXzCanT6z6zztp0+bRo2r0l8zRN81Z+zhD75ZDI3DM6fZPP7pZyzqr
2I+2muyC3qAOzjCp4LjdSNVm4fAYtFUXFLJ+hcWKcb7KA+BJqnIvVQAUxu0K/6hMZ05pRKYsQH8b
FQ2TitWRhNq0wnbIGh0MmuuQS5B/W8mlXFaOVqoGu++EHS/Ii0QuPuUtNMHvxfaorkVHiFGrMAJ/
EqVyxG10dl3TNK5orDsaZ78/fPRbGnSfF/0RXdGhuzLZUhrFX59FdvFjFHTRKKNuhzmulMzDmf81
bVT765OWr4nHND6HbR05jX/V8jkw1MbHdDdmf/yNX0Iza1XiV271Zc2RAHU5KVUD6KixlZJAyOsl
hY9xJUTa5qcuVY+DnwZoa9Xh0/Zkex0Wha1KfAp/tb2SZYAMPp2/OfJL+HT+VbARn9q8qH98/vIC
TbO0Q7LRoL6BDcR+RJxk5Pnzl48Tef2KvIIQgt9sbeC/xXRco8feZdJ9A4cN1ZmGbwyS7s+6xJXV
zKvfPXz6FKj16GL/tp4Wk9XO5i6qxc4yL1Fj3Vnq+2q1qmbB3j96/uzV86dng1ePcM0Mvn/95MnZ
S5iWJ8/3H8349lX5R5QviOKtvRjfPlov62r5oqrJFrfzA0vA6/Y0Z8x+2vZNvWTmiIPVE7OlS7/L
78rZesYfOcOQYP+BLbma5YZ2vek0e1cs58X0y5PMLtX8Dn1MlUvdpR7IYxzJVaD0sqixBB6bqiwz
bn1UOeL0u9JBzBRL76BZRqXwCwoQ7WNr+WBbZeEB8yC8qbzaWk+AFN8/f/7UzI189WqETOz79WRS
LFFLgA/MjWr7nLV8vav2rcPbCR0v3XnxnCDYkvYtmF7t7EgbfayFEtCdLDmLabWFDRgBaks/tPQp
YxtulsUkwcqbUD341IUbaHqKfpLuKGMJD9kyxr1aD2uMVebwOU6pfiM2cpCfdWobujkglgWSar6w
QU85lQ4Z7FAIfjuPCF2ZgxW13RzlNYmoy0JUyJhzZj/3nJ9vosPomH0ijMYAugKrCqfWrTSaOhFl
sI5ieBXjQYnmQ1wQovubEiS93N6UoxslWlazxbTEO22BglYm0wy2I90SorlztaRbiAK97EdFJFmI
f910hz3Ay43F5uQbDnccVh8K7YZN9qHKAlcXu+m8qTwcyA1ltK7XFOFJ+JBotVX5lmEwdX6NoZLl
RCZBdJGydhIVLci/ocJMMDZq/FSsNVrv2gP4XA3ua0yXNMND12BXKmRtGRmDP9biBe7e7LeoM8Gw
GKejSsux60iDQSW74NvFsK1GwemaMCIjalKTxih03A2x1upQoN18wkgMFpEnMOMo/Z2qwFF02lbz
jqZr2LTF0jSrbbMm3zUXPuTv41SjZeb1qCzjrevA7ur7f6JwbZW/a1mMbvPl/P3gzf/133C4tkbo
ifAF/Fur0BvCWNOxv4w8TbuKYg6a0drqe/E+13jq8xGcMhg0Rk0nAkhqgqRfKiidn6QChbCpER/5
ikKyZdImr3H7oO4LuivVcz/6/e/R4IJs4LpabljZ//3vTzV6Cho9pYsugKT6JNMVMaQSfU1/apAf
M0Q+64voZrVanD54MK5GdcYY4Fm1vH4wLYfLfLl5oD7IblazKeODmDAfqgTvVqVX0pGy8MKV28Nd
vrGOmWo65rQzeJjrduWRDYih39UlhmpLrpqu2Ay7bnIzcuMcSC0+wm+gFTZTym/rrAiA0DpVc6u3
nPXPXwiy5YK14CeZ+pk6zvqU5s9aigRqh/dWahxS8E9d7RU1HqAfUPc08p78whV5TxNc3G2pv32g
KfwAZ/n3v8evEv+j3/9ejqLy+hrnMI8eS2Mw50IQd1lYCJpAcpwLAiQoR+WKvWnIR0fNESJE2kVk
nulwtp8n7YnMNbqjG8bqN96owe5ie8+8Dn1yP5rNd5y16hPK+d0sKiWacgKHewXnUriwl9blI3qw
tRdNOM2dkDV8C+ZFPGNKuHHJbm7AW8drYLTNNYfePLTMU2dzYQicwUWmbTqWL1qtsiLw9DTHZd9C
TpiiwmK9Gy6+9OQP8Yjkv7wsaFIdan7yp+dlpoJC+7rB5o3xvBKIg3nVfCmvtCXaY1B7YtnRMeK4
ZinwJJnsZG8SORCGTgNqc3iz0ozO3n860j0j1GEHDraPJqhTtowwAGbDouEF6AWtEuG3ER/DJ9nX
n9rJRoc827tXg33cWo8D563zjV3URttWSP/Tui9T56EXQhmSWxjQRRAXx6qZnmx2jQij+EVxB4uj
zmw0QsEruw2CldWrJhJHWdfrIW2B5DYz9IK+7sQnqxlNokz351pu9koHtIy9DKHdnqlf+TlrsYXB
Kpcbey8eODNSgXCpSmUi9QWKDQbypyo7GOjSe+BwYueayJtKZvE/aVsugZXXef97gl1S0r0OYM6s
KOT3+Zv/7d+1oCuRzaD5Ncj5U3WREwS26KxdBKbBqmIv1X/d2Bqk/b5Q7AGtAiCLkKMEfdzvn+CW
OLUWXnLX26TtdTg2AtTybvNN7VbqpqIWxzMyGjAEBqjdShGhxtiLtQFWYOprQFYTT67YebPv5ohp
V2fdD+wc2/uAkKwT+T7In88U69lRR/Aw2IUzEvzoXqQ65OGMqG7+G9DIvwGN/BvQyH8NQCO7wT12
YHtY0B7Nk21e3Bqwjv0hPlpqg+N4/9reD9/899ZZrdwM34/e/J+/ZVOceiSJDcjgzLZMZCZkj6ME
WZLot9MheMESbQXXVTVWq59Q16rqHZYVf9wP+bKs1rVVMYJ1QxUoDWpxABuwhIOmxIAGeNe6l4/H
FbH6hHJOq0Mdb70pzRw9RMZET5K4OcSYfVz0DwLY7cfXxRykUeWESV9nVmvdww+YI/nwEJbJsKrx
tjMnO2U/Jnj5uKFpjKG7/ViKQzti7kJQeASp7MflfLQscpCcuUy52mSxXLgGmn/Pzb9fl8Vq38ap
cKjpcfExTS+7Vv2q4XpVLXlY2BITFA3NtdUeR7jMgMHBeoC+8mtTF3cG5UtxKycLcL2ezXJQrmkj
wpquF8WI/BcQ95DqiJJJSqj0MJXeyOPkLCWWDaJUnWpo/eQuncgXyZtUgPOT21TLvu3zPmXCYy8p
eWHdSg1QL9aGJOYDiyIcxhikAJelgBGlpcBAx2WNlwg0dqkk3dLZQ8luseeEtc8V1BiYqsTYI3vk
0Hy4TLd0ZjXsWlXWq820aC7VcP9WQymuO9jN16uq2/h8dFOVwJ36l/I+6mJgE/5Lbi34x5wfw1FE
v+Ag+lB0rxo18RA18SU6aYY3KAnW+YAqfID1POBKHoCmvm0ykItSfbyE8Oeh/N5KBbOQwqvG7u+4
wlzCozWw//nGXTr8tYSAQYmtfSWHIXu66MG+0yWF95+sDbmmyNRQ4bYJYU8mzccF5zeBCoD8D/BT
Gpd9SHDCyPWySPgvOSQkjySPORPOjNFX7gvimmJ6pJSB6JqofBVfyiOpuZn3WupqSZymaoRBqxGp
R12NaON2Z1wM1xQE6z6mmeZHLjT6bMNQX8kqRxuxp9qay79mfkoq6qNjcuf4xo/gY5P40mr7KsKM
vY4nrvSTCmWYqzUDcVVkCLQC8ySdcjFYM6rDqQaR4lZhoO786cdWflz4RakqIn8WubAGMZBiVmKf
BkZ8/PjsxcuzRw8vzh6fCnOjC8Eip9gixVsjacDbGq2o8sHWlTEMKEPSEcLiSyGVHaMXynelyvfV
X0HtRfsBS2GglpyELWl+LMp+gYXjsI+5XeEdn6f7VXgXWxPIR3h4vuidmjTrkZvygCLGOEtYs4Da
RFhGgoxNb0JGQ7un+JV766DeOgyGHw8oVyxob+taNrYB3eDfjaRMU6Ag8ZM4izuarlK4kUTJlK6b
pf2ESabwk9hefFLczUjnhbnpTydxc/SZZCDqSbme9zxbQ6+XJt+OGJGZU7ba3NBmYd3QcLI3FSNd
ozBE50zIhZ6cGvpUw0dd23Ab8Jb/8K5sdJtoUtE/Oj4miDoAtgyMDwaNYtHsPhbQG8B9qQVyZYEN
Hlpe0h0QH2+AS9EUerV8h0gEjdKTmtIk7lkahR/WBclNzP/G/wQTe0smM8pBcNTxcwTlxMH/9Evj
+RLzX+uRr5f8y8kdnqT7gX64QHQ2EAd3cnWr2rlV4QcK88MBf3FRHlR0hTsrLKIgigyKNYFrMmjN
wZJwbh121Qky0h5VulBrioDLYr7Sk+3YqCRXr82QQyfvFjyMRi/MVQM8wjpkQ2CosZWXHhkzzH5X
TpDuadS9I3md2R/+rru/ENoPwye4Mc46GTCfAf44vCCZAY9+wGDcCmCGniEfqz3oBSbV5/0AAQNz
4BOY/3Bt7+5WU2s8G1JCxym+S7hIGp5jErm8GwH58NMrZf8t6doXKAqmnZZCSyfHFdO0mNcgXA9A
FZyUdyr7Hf3osSKPiWBDjhR6qTuEA3Lz16d7kyBIfq5k24i5hLOdqb+n2z6iEr6zxdaOHFpJs4RW
gu7o370FCfJXkoG2eTBUDGNOoCSOhXesfzSZQTeL+osgECvdqFTsu/bFHepBd6skkL5H3B6swrok
LCu06dT9tug0j8qB1ezE6DeGJIHZu8ajsBVWy1DMtaFe/HYZS+6ALRR0kGJclJi2Cdo9Tq50oVEp
Aq3XnEBVQfpwOfimH/fjcJR9uGIH6cBaDqHoa3ceLDQB44nAojXZr+H4pQmXCou7Ed7MeYnPVKIP
f8Go0naawbbIc9FoVUp3ivX7LlJz1zh0jhvdnVbXyq/DlmuNANmQcQM37xYjX9WoLYsZJYlVCZiX
SwuTyNzSO8J2go33/R70rc7sDLGQzvZ9wVz63nQaEOcf+d2cTzaEDJQFRKNg8fMQK9xt5rCMGC+e
vv7h/Flkqj9V2CzcQM+/TGckZxAZKYE7Yf/c0BjUvZyXhZuuIJqgNRQPEK3xZmu1nuerYrpBh28M
96qjar2MdD6qZfvn1/lyOHXLM1QKecW7i3TrAnaBM4Ty4wKKkRCu8rSvilndTHfqrbmu+a5Lqy6D
TYVTzJ83mrHyE5MgovJG04LpRWrppXZMALMxdlPF6yI6uWBOVOKyYTHh9Nc2FMwxZpMq2C86j/gy
L+LruAg64bhA6cOwmUTcdj1Qa87Xck7DiDqEdyHjIborQfK+HmXbNLkiEx8Mcewl2vH0s1am5cq0
qhNY3ZbJYRlZZscxV4gZhG6FtdGMH9aeGoq3eNkOI0jfSdIMdDGGg9tqOaZm6h2LkL7CtRfK+yxn
CbaLWUgD+hS5q7JRQ+EqYtO+c85iWQ3zIexcuRByt35nS8SFmiyjBn/b9/FNpB8KTJGSb3dxPQr8
6NbZ3jrjlC9bZp2Hufc9vZFR/A+bH7n5ZZGATUR2fR7QW60c4s89urQjdbnBEiKFkUEzTnGyfwkb
KLclEvcqQ7yQXVW1ZjX36hLMjUB17ayj5iUh85gCD8FfYT6yZSVtXTUhpsMTZaTChq+Jt0y2ld+y
zloF1QDM6PbxhMTry3s1Xjzco1nCD7NrOIJv801myygtX++igN9YQBzcW/vTF1JkDlNC9y5EKcMe
wldW3/V9lzwbS0k1BrJMlmWY1Zgge9mdp6Vn2w+HdqNzOwePSYCPfS6OFXf8jRY0fm+pWl1mbK2c
xBV0gbUSGEhbkmDQ5W93PZWl9By+TK+2GTW/YPAskYha0Z2882YbPyfiCkvvnnUDcoQct9KJxFFe
nTcKRBmUqL4PAhQWstW6+tY/whSGi54w0sIFOsyaHELmppcsMlpnNU9VyyfK5Od9VIqzclAMi800
xDsSTFqlKSdsp4HrJ7ne3elNySRGk+vcpPDowwiBcfQgujeWIsiZ+C+H8KEV7n2vVjZUIH/uu7rC
NHObAE04tK5soDmLVbTT1lc/PBC5BtNDz7tysiGKBh3WvRVssarfyJ7EIYMyuMGoSr+hGnRVjIG3
tA951DBA2EVRs0MYN/gnw/+4NwuaK5vbldNtEqFFE7SUkCklZn8mblNUlThIY8Sq5duVOPOv5Ru5
tE6/tDeLKMIadPdeHR0eisO40oVtD8aeaiwI+U0ZvbqLzWIzsNv0D+Zd/W1U4HVadRyX5SUWPrxX
w/9dUW+l8raavrxyB09LG4ZMlfAfSPdDZQfYZANdwUAzeetZukWs/y46IhfQJtPUfhlOAgIVa9j8
AMkKi7cYlgjg4l0M+mNBkaPm6cg4EBq9wEIYj5YBwN7weyhuvKhdp3gu3be+7EXqeqHvXDZ4zcEW
Rnram8i20U1g8a0EH7JOP4WbuJ22r1qt6tjmIysz6Imj7FHjkjV9e+rN1x6PdkPo1NiwCmXHIjtk
y+f4PwkGHFNU0rJCMLmBExVocx547rgZy3qOW4RjqRv/ufzm9KrJ/3XCdiiRBngYMg/seq1sZ8i6
eFNPt7J1OHBKAX4LsdzWi038HtSgTeiig0xA+ihWRwZVnXnylmPBMUKFXCumWzRg4tCf4zDNaAjL
Ac13cVPOx+lGC4m6b9yrNTGrZBiqxFfKiepAG6KDF7gjD4/+mnPpQGLucO7y5XUg8k6mx7uqOwga
yulAIz3nnrK0JuUXx7p6Gwi/ZSpDlsgD9FBkf0tEAmHRXaBiyiV6daOtVq2cpY1Wgkgr+TsEVVgS
bMy0QKx7rOVcJHuo6V0eJTaQjUIeAa5ULCM5CGoCmr+uJP0lBlUtSwUrPUP3SlAwP1WS3lX8MBBj
Rb7dnqeEE0kJZML5JHJt5Q1YInNMod3TUziFjlM/EMttm3MhEC/Fn7TiOU71CIXk42C3uAB9waji
SzRBSw28CtJdej6vtnvIkUgYTqxK073SsexJH+USZFFoe0GhotxDokk5SU9PkZWgcXmPUfk3KSFZ
UmW6tyzVFK5k/B73GOO8KMaIG6F3jRomhoeObnJg7ylmNkfkIHQiRJj3FY3PxdygRFvUpaABTzrb
b7Z3eWrypF+F6T9cFvm7Jp9uSRPPyU2mON5mY6at00BjXKUchPBVwPBFnrfTTGWEipM0DsV7s5QE
wkMgA5+epcTKEH8I2yy6DyJdFHd2LngRWrkiXPDTtHkEy3noHL/FXSlm+F40GMzW01WJnn4g3dqw
DdZzkSvbL8vt6wlylFANIP2TI+AeveikF30VEuckhmPAWnDIa0GVUKfutjLqsjHo/dCUb3VOH/k+
8V2tSZJtGxxM/UlIMBEB9F2xGVb5kuOwluvFyuvUuJjKB42Sg1kx86Ae1Aitu7o0XIJEjqS5FlQr
A9MhfWlOWp6vAoe7hXDE/EHGbmDLRICU6hbLIbzVbvZNo6bWJ6kv3ZZWu+lHUtoIF62FvL6IN4A2
wu+YFdam5SPk8svRMq9vsqBDqaXuozDpKGFAg/i30ta5aivGrRNCoQ/IBjp4JJC7SLq3U7IMn43N
0QUrMrR2Ly5E9SLGO1A2S30dJTHA42qWl+6lr/sJouiBaMYggnjtOpZLcVg5UR7FD+JDTvxa/rEY
R45fnXHUo4Rk9Kc5kN++xcP4QUw5At02PQ9RZbDHnC7fHipbSXhkabP18CUxZikLuN5hKgQiSeBo
QoFMPhWyKSHtspuGi2Mcf1//qQffzdCREkS7FAnOoUHyLoJ3kpYln9XtRkKQYkCyg5ZZAaQm0j3s
rJdqkVzFvtcM141ehi56pbB+VGKslYVXvUFWwtdEqhmfdYQXIN9p62u1kHbFBbeMUCnJtjEvZk+S
X7sDos3UOpRGaL3CK5GbQHRAcPdl6meQ1/CugZD5YOR+SxPp5enfe4L/Hi3YxJCpPBDwEj6lStzQ
ZC2z5BOrnBPeo3RBskpYLnbNVHJ3Ab2b9YHLq3TrnfsdHj2L8RB15XlIR9e2kbuGr9fUcpjz5ZAW
L05xe45v4xbcOtfRC8dhPLyCV77q7ekuCMhWE7RyFZMxxE2JmtB42hvyhMKfnARYtxSMscut7Daz
/eFu1WHqZM3zxcE2R1nXO4yjRCmuZV7Fp4HYNU1za+Fpo02I5FLo0yn+5OH509cvz14FSC02pNYm
to8SlRKchJaYK8fXwOFGjvfMbtPr/vAaVp6oEEPfp12i2yAOZPNwF2C1xunTIj1V3Vg9om78s64d
vub851k65H+6ZeEY5nEp/bhqy070cZMS9DDBkLE2w6YW57asCx5OZN/Acqxou8cNRalRvCK6YsWn
u2vPV+y2hXa6/atXPl57tqC9Qbc0svea3mM9+29D4sRO0zKfxCNlOSN/fuE+mbiA1y2hBjSAw7in
vm+sdqnu8hDtR0jQtyFSqkb7uvzp4fHVlugGKRbY2az+Nu6OSc4YjNdLFbln3eZGhy2XvsafAZRB
ApaJ+UDQfoDitGC08qbpRgXARnfykTnIRXp3ZwKa8sQYbDxpyi9YUOJWsUSTqvhUiS3wt2lmkTPL
8sQnablZGTd2iuKb6/OsuB+ljdXA7FC2F5XXc/HMnTU69iGfNgUc+opuO0NM50PIYUGPRY0yvjdW
Agfa1eCjtIfdSVM/MsPcXFEFjVvye2S8upedUBRENR9LreT74C8nK82j5exHScxOI9fXD4aihAp3
lvGCWFi1+8K7+/UcE6l6YMHBxprncdNJMljBtnvufnt6PnNASdyG7znXrK7fb9xoBK6sw9EpTZNY
8zy3ndS30TVwdfsuHFkLy6nhtIobZIvOEB++41X5znNsCLeAIy3uFku/idnWJmYMPRnNGjx4errz
DCKhAjYP+Urb7Gy4kWrDmHiur9alTe0r2HqxuR5OXe9CcUdbDhgTVW7Wkg92IDRu+2bM7gcJ1rVd
PYIYyKR+3qsz+j92G8GBfLAzEiQasbVnpeJspnpBvfiDdFu7KChAL6V5Tp2V53oMJlNyAmCv6LSJ
77CxnR/uArLUBvMBRZste5tL3HU678dv/t1nn3222GQ4ne+LN//PZy7KJoai42mNCGd9UHwHaNgY
DCR62EXdotVCziVwUmkfK7wSSi0YMlN7z/+Q4y9gbUh/ulcUu0dlOgc0+BmeFA9fnEdwpC830QJW
zAqx6zhBSJcWZRf3PMbPo3WG8garcPpssem8n7z5ny0IM6T7OJ/CulnBkYSXt++v3/y/QJSDz6MH
63r5YFjOHxTzD4LI2+kwQBnDhcSd37x6/vrlo7NXv2nJJzDM6+Lrr9SvP07LoQr+f1yOVuczQX7Z
lQtQGm0E2ui+yF8OMutY0OmU9+Z6OmXBDa1aPoyAJInXN1X58pqAz2J2HN4N3a+vzm8xwzNDnyFb
IJzQbxcabDRhk+wCoSVxblRDflAVZmCXRO0SiRNhLnVM2o7ZPYCSlnG3XCGNKMYJA4CykE3MCVW3
h6t5PdMwbFCDAsGPv4i6GsW1+/FV2X0zcY9V3jJ3jvGbnRtiqzTeGeuyRmAje7id2PB3VBoZWrsl
0RiWeCyXql736CvrweLddQMsQJjmb4tNwOjXWrVLzZaGKDDHQgYiVWC2wJSatb3Ku3gZaSnVEt3V
t1mN7QJuvjTk0Q9Tvx7oJsI4MPLjvfqByJLqqTVlzQ9xxoolfeqvKx6lJ4mp78i/m/K8mbkwOE3V
AAcM+k7PfDEGJjMYpLBY5tX7vHFUWTzXq1HMqgOeobBZ1QrlkonUZtTU9fOvw3gaWypwV0LDkIrR
Xv7hFKvDKT7dmnHEgvPklQpEiykxhkXBaTVKTyPnJ8b8mmHJyVeO3lnpeBr8WKfCkYw3Jk7RzUhk
PuQaM1widYLnBSXNmS1gxHXCp4mk0aEaEnUyyAJ1z3qV5fQFVYrAh16Hdw0etxeKK9XIGf2ndBf9
QTCi2fTXEgcIK8U5EVU5IxwUq5x2QAbCUrFcobeA+ljqYpkAVsJvzp5dvPz5N6xgq5HR255gIoJM
ojbF+5s3/6MlEAzgQKJsXCAQvC/f/B///rPPEGE0x3AsPM5vDuUt+feBSKIOMI2Irb6m/YyC3RR0
uA56l9T26+/6R9nfZ19THXw8Rl9mJw++zL6MEhB/bScyVAM7q4oOdRJ0Zvl1OYqYpHTyDR6+/OHR
89+9eHp2cQZ0+JAhXCpBM6MPLSrjPTzIOxSmPK4KxmzGPjEWJqjZhPjc6dgD0AXr9UJJetjPk+zv
oySfYq756xvOP3STz6/lqgZP4A6e27MSFPsU6nyiEjAh7LzVgAJcVWnGhgVlmEEzSbkEgWi6kbDd
Duc4QgDZKnq4vF4jPPQLBmalOrBWmFTCky0IqKdWjRAZbjEseFOtI74AplRmnNisQJGCAoYlFWY+
RRpQCxooVN0zIKYaZXUSZ74Y+okc6EHMmK/84NuLh99/F3csoDesX2Enqm+imNM+gSonELP5eDxQ
TSePWNHj86RaAitfIgdG54nu/e6uoPs008Pvu9TodJ5jUj1YDgvOOEdxBDLMXqTL1WpSrokFrISs
nXGloN7t7tLk1Mhh4M2GFxbQCCbUQK3CS9OrTq7zieKL22I6zaLkfOJuoFrMVrKJegIkrFbTTWGq
UboAtHqLadTwwyGFvY9xWbx6cXb2+PWLTp//R8uEhFTYR3aT9WhZLla0jr3N3kkeFKvRA3w6ME+z
8QPeFIdWLVl9E6UdzkGo9sxiWV0v8xmlIdQw1mQ/XC9Uq0ZqBmV6US6yzs+wZBHrG6gIT6GkxXww
nSCta8awnV/zxCCWN1e3wpkbTdfkfXcQvfj54sfnz2xOMXj+W9BniKi0AxoDORzdFKN3h0Vebw5l
Fg6lcsUbOtZcy+a1OZWlp6mNQFhrHI5YcKK7HBFFO2TMGEfMR8Od7Zw/e3Xx8OnTB4/Pvn/9ww/n
z35QE2r+17nQwxZqCPNCZWNOGSWdtU/NoxJBFktQEc00dHisIMcf4mqn3qOhSaXhCHcS3STzJVO1
WqhRk+uArAWLKFCz6qy1y3SiLZu52TDezi4RYGuXt/Y8Logf94SHuiWjP6zrlcJHsBukI0OwKYln
Ag06goNmsVdJhG74Z6T5Z9dhEknaOUdicIK+ymRtXFL6w/w23yClMUQB9Lpq7mxMio2hJU7rIyH0
e4RzBcmVYe6ZM41gMBUakg0nE8oVd0QnmK5HA1o//WNoDZRLfv1Rq/8/ihoLC8nIu8Xopor+I6bj
JA5EP49UKkc+ssmmjIttWBRzTgXTi47lrgzHrlOElisaXa13E6wpmAJcPRwxua51nip7BfaPnRGq
HnYUQIhw9dXNEuSZ3GhoUZdqOH921lU6OqFnVIsC28X0AMBnGD7jeloNeZo9vpkyGnvAEFLVnKgF
P1UrFv9W5pAneb16guvqkZo2HloXX8ie0CuOPum2GUtYdqAchGxD9Kwm1nuKn9G/rMuyATu0ekBk
AfgxGC6GnSjWB9Mzr+ayZyxmXdspixC+tWb8wZovYbkB9Jw+Fb8WfWlBbwaMWog2VPWlSBEKfwwU
JqvObd5VTo1HliKtDxbHvo2TNKRo20sH5ASEEHWnxDUSs+j+2nvauB6SoR4e0yWf1edTsR+h5Aid
WM+R4iYB8LyiA7kh+Ej/FKIM/hSakA55v+v59qt3/cgWo9prSdOA4xDHH8hHzfs2NaqyBioHbbTc
gQddz+jFJnr2nV8oOUt6nEzLdwXRoMdgJPgTZw2Oq9SLuNPSiXJFujSzfnrVBD40Qk6Huw8qRLmE
7ykeyWYvXZ2Dyt79rMmoJLXwFI5WzPdLrNooDdnXp2y1eoAvHnAKjzZl/fL05Cr6VnJmWFtYWZiP
06YtXxiLLYqZY8C25rOe6dmmdNWHxzoJrX3Q9QNcynYgbtFuTAN2vxBl3C9pmcm31WfS/TY6SIbE
939wsnT8AbbS6m42ff/uzcn/qhLmsshQUPI1BAwgW+x/fg0lD9/87mnEU9PjXM9Kt/1xPa7xVIYV
iuhoY8oTci335NAznD0Qtb/Pa+aEZIMXzZLllpcVsPqn+e202Hh5O6xzQv4Cra8te8eBikU+yd5Q
d76Ef9WyK6flahPKwIIOrt9GX1piFHZspK2icNLNrRnA7MU3SFP4r37AMMGwPzl7aUWAaSjGyTH2
n5HWGB8M9M6eqVytsgRo2rD3PxVaQ0ItkhJ+r4d4xIqwWM4/5NNyrJtcsnadj1Se41UF1eBEHWdH
crAj3+SvSJDEw5uJRHOVRdGPBaYhviXhcrTGq46OZBMab+AooRiOjQ1PfpMvxyr/9AoqQJ0J1Tnu
Dpag9qAWhnst69PoEWKonp72o4O7f4j+DP99SP99DP+9PLg7OTqEv7958uSKf58dHeGTJ0+ePL7q
HIQUWSp2fMTljo+g5JOrzmBaXOfTgQKZTY7ujv6hF8F/H9J/x6kqIXTry50kFDw5wiLfnElqEnjy
K3qCnTLPsF/4FDtmnlI38DH3A17ohmC6B0tcGpcyZRh4g9eIKVqFeSkl0+oWZGr5cQMybxryTsMt
h0V7ERWh+yt7NIFP8OYWpLlv2VCW30kfrsK9u8EjyRxmNjGv4Mx0vumAIOtWsWSWiuZ2NdTu5T/d
q6+6MNStZgldvJvyVa/TEtAC41zs3tgPZOzWE+kgXbQOyzn95kzjmGTOuktDb/WEsmVXwz/YV16Y
53k51q8yzu/jgoaUhMIFCyF4l6OHdHB37+jkDZIgKnf7fTuffWV/JgWEgSzz28SdgAz4BDp2TXuq
jDXkNP1b5zWyc3s4qT3UmcJZSOjXIf/0knl0nDgmzOsBxVA4iq28IPLbyRtjfyhZO5ZFTtqqNBax
s6WcZaQigCZ/XX4o5nTFme3oPotE9gj0ky2DsHLPLHf3GWokiU5kOEyagucEZfHGzWcGw2CN+2Ud
ERI2Eh7I8466H2Q8SjzXkIfD22l1jQdzPUUvLAzLrKPkbkx5R2W9q6oVxp1y31TIDV36tpxDX20/
TukHrlLo1dPqGs6mROrqeb20iN/I8NGS48Sq3iWQHTHmk8hK0617P+BFapzHeMh2upOpPZjt/VvP
dQ+5Z9Q1eHs9LQhMkOaZLrSUzsgzjxBSJrwoW2xiwlo0TFkWCHQOnYm7SSq3ofScI3XUn6aeB1BL
N+s6Oa6plJJOZF52eRvAKsHdpPReO5KC30RGcSvuFrBUQERcJu4j2CN1IuVTLwCxWQ2a5QljXD3I
hxR801aD7KcGQDe9ZNckR2Hlj9irUl05ahgpL9mAOGFKMY1PdWT70sKWwlZGIOq2IZypmW4sB4Hm
asKIWrvBMAk10Y6bq/KOC+nY5lvrpswqnQaIpfREEl8zPbJwvX2Nz2OaSt14DSrmHco8DBiFVxZF
+r7aqEwbWMbKYbKLt7W6vIMFwU5pgpA7HjADbZsNivPOF8rhI+lCcWT9ML2+PzqIwcbLOI47bWAE
tvczTijdl4sTdBI/kn6hswo5B+zESQ5iJnCXAxYEq5dfaGdo3xBhCgWy5+bXFoekmUf2vgF2Njvs
fqFabkmfyesF6kh82Us3mdqzJR/w7LhymFmGuDpU1VjG/3yAO7hthu0t/oWdr5g9tsOrRSH8+Q1J
hENbWwcSEKFXrBtpp1OmZQT074bAuOGHtNrj27wm3+8gqrgQpDERvFmFjakU8H12Ij+UyxQBV6nx
Kmo9B85MHpfTTZyGYuOFIzrUC6Qahvph8Ny8IpRunlqUp37UCTxVo/FWjUfFUO/kQ6wk8FaY9l89
8SpU18aK/5tOvz0WJiE3tYMg+8OsB/CHYv/4lEONqNVGAbWw/kVooBr7q6kQpoPCQPTp4Kx4nxBb
l8A//zRKbDBeD3/KNO6Yvk/lQvtMl6pnr+lyxqwYVORyka2JzyWHtQnTxs5hKtpqruHH9QQ0QBJ0
WQce7ZXgakbBhHl2/R4FzLvLf7AN7R/JzBGm6J7GTku2jXEnkdHzsx8rd2f4Lt71hZoO00jjg60b
6ZP478eiy7cnxHPf6vg8TlIHerHEG21BZrdC6mxB34cDDkwsSyjbQIlb4Y53t7Vf5j2/R8yXQvVt
wdr2xKBPQ1nePSK7LbX221bGvrDSJmo2uDI+mtZt4O92312p4dNo7Z+7bXTYL5tN43zQBo58lTf5
lqpgi27YabKrsKLoluFuYqs9w+fVGNiXIW7hYZbGyWNv4VxcRNUZt6NbGTjfRiTFmgKc8P0gAOK7
Gy0rbCRZqKAnbVLp3oLWSV7B5fy6312vJoe/svybVT+qxWALljCXwoeDcTGl+fQ/PAyPy1gn1jNl
JnEUJ1ua7vhDEjj47re/RvOa3K314+PsKDZjimlM8a+/s4blfm8WD3Uvae5PehcwKYTXgSTyspZ8
r6F3IABE3xqbWwL3nLxW+d1dGwX2p69oFrBfxPeyLycEse1NjSmbZsrsz0hs/aO0SaDRtKpDC66B
PyaJtjwUMhsaxntlx/BhGHZsnA1xMrF1LW/YSxY6+X7q3Cfj7FG42OzN0//A98njsh5VsB42ZDdW
8ToVIvmND6flcAldjmL1YSyWe2YvFEfFXm2DyRqFhMFAX+QP62q6hm7zb30LrJQL+4a440ajmZtl
FTsGX83rSUEZmUg9XopPkhqY3JeaRqj4QJV1DM+LjeLXs/xdgRiIiQYO7ImJyJg5DsjnS42fnb/E
eX1MJCtrLI2hTesh22qBeOUKc8TWq0fAYn8dcHbgs66s1TdoM+k5UR2G5FeZqiltxXYV34gz+gd2
th/bKEPZFDX5nE+LVbdWUAzQWdvsjI4EqsFERZ0h4mpfUynVeXCdwjIXj/CNsplXCjdOwn+wFzwr
NLGT8g6XDRpa+UJLHuClf5K2CbsH7IoLXVBpozBn1uimnI6X5E3oeL6rHrTWZqXCA7nSPxNGU81p
YZLsKdRXFFNESR8M1IyRHDAYwGnhJ1ywyIxx9h6mP2uMfa9eePp6QTTt+qHzpXRZue8043R4RKnF
WiSo328FXzyGF60N6S+3tIUS0XhMiQfKP2LOefnGPv4wObS9atjAnWaqp3oy7AQWtjUYt73ejrLh
sbKnFJq1x7Rh753BxwMy8Q8Qo8PzOgzBsdJRIxjUA7OA6VqJbmYnuXglCqtXvfVQ/jmWDPeF7n0S
inkTyBnehGqTcaBb6oyNfFLJDfWG0wG6gX3KAM5hQdgcVqvmgTxeEkUvjwR31npRRSSozD2u8VIP
mKEpjZhb5WyAL2BV3aUeSJnLpxP5vhfRvDAJ0kD8s+q0ik5x+ZSL5xmkjBuJKIvCLeUtAlZug3To
wkvsUTcEbYHaiHzbunPIxWLlxTcSHtjqFu8ox9lqWeZTfXzHWzA01isVsYdgE6ppUDjXK32OZNLh
MNhGC4mtUTp0tnKie1/IUlW/ZU0NBFm0kZs2yH1NdjfUVozIix1A6vM0uCtR9ERnHbr1yK1aI97R
xim1S3epbwNcGdU4DFOqCmd20UR3JW2HQ40Hy+L9Gqe1Bf+UXlI40VTYjQP9qrjsDtxVZzzqm/2G
5JW2R2VmDxUVXAJavuWP7XwM5ESnLwHhqJCloEwC+W0TpvYA2OftMl9EiwpvptApsVBSTkTrKCEM
Ad4oEW0UffQPC3TcMrKQbsBaGeYhHuLmV9fpUHu0talxsSH8u0xLYec4tlAlIq5pXIo9sBuDDxWo
iTHZs/fnQJMouR9q30UXMXfN8bPnF2en0fnccgTWo4leFhgcCpMgIXVxqzEyBq1iMc03FPoPpT8U
083p27kTiep4djOnQP0uiTWaCKFM4MgkcVUIupbwPazPDfF7UQNwty0tQ17WRUvlp7v7S7hgpyAK
v5ujfNRCvC3EWjp0BToRGIu9MLeTwkTsu8MNIM81RirfBikYxAreutZ9HixR7E5C2YHZX1a+Nr1E
zWXIeHxm2ckM8wqyCW7PYin+spcqnzi3w39NpXateOnQrJKN/FsgXy3LfiKlO+GJwSJtE9PoJTvD
jwasRHkklWua3XRQA+jH8R5joFsjSWNJA2kbyd22NfZRQ3mtL8VfrUejQrsX+DPgDoCbMBfqNX9L
9010X+LMa7Di1iMNbXgfcwSKdLZd5knYy19JW/r4XMDXhWb8rTDvnhCWbf/OmmcpYEHBW6dMOV35
yaKm+Ww4zqO7U5Rx77LJEhMXTQYccCbpcYyO3HV9dXXVwbM1s3tjLEgdPx3cpFzWq07gSgoNPXL7
gPYeDqY0AFQWihO/9kVZV6qitBmKjXkeXFBvZrikU5JSWBylu496cmTEinjmPwWYmdfZQVgsClFo
sawwqGUqBAjljPh4QnXD6guD7RhVx+2+rwhd6lrYyKf8VjyQGWFoyh8yIoVHHmo4EneuMERUbzO0
U04LCn0dUHcHyvzaSOmACSJG5F9sds/gAwKLsS8x/aa7W+vnsOlnrNzEUKL/x3xZ9wNp4ISSuoUw
HIunMdhirstBPwLNV1rFcWxvVJYll+zTpXWi+5vu289EVWGRU5Eu3V8a9paBP0FmblTd7Tfq9sxY
f7eKoS3S/Cf3LPW2RWM1M2umhbg39vuupC/sl0Ex4yEbRagP3sDY1r735hrDuupr5HZKjGAvNMv4
Rpa/P1aLIqP7wgmmMVAwOWiAONdtmOIuAyqZDcpH5y+lDx19Y2mqSHzO1jPlRaCEfirp9TjtvJ+/
+feMxzcoq0zWy/vqzd2hhuUzAXJWWJyJn0MUO8Lp6OgJpDGUlerxK0KNOX/eaQuFpPKqVOOrUGQd
hml+2VOoSsqQc7c6f56o71LbxX4S8UWgxGvnqzyMuW+dFHyBLDdoAfYm19rqio2L23Yb1MvVHSU6
/r6+eII3r2h4Y/95D39bdTxrdNWKEeRBQruGPG1k/x6Pyi1UZ6pJqU8n20eQjPQ4w3CSGEmeEzJV
RNzklAE9uaJ0b/J0FhhQBWsE9TYEkD06jbr1akxJSo/5b/a+PuEf6IP9iwYCeCye09xhvF2UBxFQ
elU9ILrmEudT1YfT4kPB95fjglEIqiVfKrbFN2BWwWI1QTgZ3ixyxILm3WeMWzWCRrJr7E6NwTOq
jsg0quAyCoQonBe33g0yTjheg5JvAIa1F1kUneNCV91AEBTngDPQOXpfZxfyR5IyysSwoCYLD2Z2
Dl/crdD6XWR27z0HDzWKvh6QI1DrfrEtmNInqk8+b8D7TrAau580YIyXTrq3wy+8DaYq70fjNY9o
YnlGxLQ/fQdifT/ujkLXJH/5GZphwmiMVZ2NxTqsh+5oEGregy5D03FN0N6aqxCEqbXWL1WdV17a
59twCvqlk34q6JzSOIVhCBNE0E3soTXU5OevAjIE73eDXZvE+PXY2zrEeCmuN2Sbi8flmHC0CEhG
xoAH5Kj4ddxEbzbrCxaLjimzJ81bQTJL5DgzLj7M11MJIYOHzwcvHz9/9vTn1CcIzOlJgtv5qPGK
18tk3LhO8ZOJ8eyGM7y2TbczxKte9Liar14W+fgJcKhzjJBLtnquqp7b5MjI1bNC7J2WVfrP2H+7
I9bCRBnFX5fIB9dzqo2mdTQt8nm0XvRE1qo5qN3eneTFGknsWZAlOQSRpd1KBz254Y3QWIGfh9HI
Fd3ronhn69V7k/gjySu1NCA47M4Y0tPhWi2vw0c+TgKVIEjcpSSZRcJXy/KaQnmJ5mZzB8lOLHvR
zrvTrdwoMGNcoV7KaeMghJcsNrDAsFVlkPLaMYoUA+vEwBNOzu/hesLuPf2jHnE7/JNOcOtgsdRj
RsIg8tOBzWe3tUzJLaSLIG7QIAGGEerBpGNfsNPxqN+PtXMAyieIlq861aUgrWU1NYc8v4vq8o+w
PcS2LbOIcDfqy5ShdWREXVsipLTstzcFo5rN7Vutmhn+mMGxcrznMncKHdt9Xo0WISPYDRAj5PDO
mVvEF0JGDQOoV4B8usIloCUSpmoh+1EtNmf5EKuf6EXiUrTP/6BvD1pwis4eVivT38Cx5283Bt2F
ObflAsU/2pFjj1zTlFpU7Zfy1mhMnO6Qo32dgnqylV+B12E898d0LFKned1bS96scPR7gL0yGVue
Iu7hM+Ehb60v/bhxSi/PCO52/MST5tIm+UXatz/YFZY8AB5X5DOrWt/azQVgdPKX5w2tRtHXNXic
Nhie6CpV5KsR1qnYVwCBJAT112nVc573q4QOe9VpkJ8W6YFaw7sSxzHAJoQwWfeKnZ5hGIm6kMC/
MeWArWqyMt2VhI26RCBVqbBwa85EPNbT1kCNphXgulSomWVfClkbovY11wXKnux0ha4+veg+Oua1
obMZDo93ID3EWeihx3DKGRaQO3qWL0z0jgwXCkX6VpWPVYpdeUA+xvLDtequaQdrgFK2jOGFScO+
T/WxDquLKwDh+gEPxI1KRilLFGGnH9IoOosG2nNUPsJKn9bbTnTG5MYuJT5Zt5/Tiq4Efo2XDoWd
rsjgdpsZ6Gha9Nn6IY4hdK1i3W9RVQH5k14Qp2aDwgM2JbALsgCbqSBsphgizbKgJG7HW6Q9qt0X
9poq1C2mlp3C6h1vuEOxvxe5mx5Xh/5wPAB0WHRXxiYkWRvkKP+K3iJvTMeHgybItWkFS36z0DRu
zBNyKSmBHfcMt87HiXsOSGdld9Dfn/cbzcurYPM8BFUi0LzzcXMF6cVjJfepdWh5eOuP1kv09Irq
eb6obxChjpcFegcWM5CV/1ho2ddbGNCcrGmcHewuP0nS/aYy1H/qvfC4V6uxsLknjxP5y5JOLxCd
i0sSADmhgSouQPyXnjx5fEzEf/L4pGPzi1m+QaFzjhJaHj17/fSpWJ/wk6MoyRmZ5oONHIBjlBzK
srUweTBZqvL5RgGUHfWOeye+dmEYVllrCLNy5eAxqx2ZOZJh4LQHQokxDuglf83Ku2IsEr1paz7w
rXb8U5nzGmIC49Agn/uTs/RiaDM+pSXmPocewHP4r/ec+gNv6F/vHXQL3sB/veeqk/Gp7q9XAvoN
L+G/5vkvAYtWsq+RCU9L+4Smj303v/lAJ3gU8lx24aF1P8qMwyuCdlxTxNoqpghad00RIlWzED22
iinSNEuqN10HDQW6uof7G1XFQ9VmZvRYa5qBvTXmymWyytQfQT+qoAWuIdYx7z5tsYs6GWu8Y4r4
YJdYQMjoYyqpPBmlnTA8wYYwx4Yw8u/WwGFDtl0kauxEWUfa1X69ynxL7qeQFlZeE5pVhDtacNvo
tr0nU2sqkC/sNRVK5tl+bb5l/tvnjneembuTf8G5ow1uwSV9+ty5tnDkW9u9kTEsAHhUyO8YnmsT
e/vX1XoV/BrnfffXAewgTYOGeR/dcGZFixwLb+Rk1ADn2oSHkkkUEE1aGby21/boqso/Ay3BKPFl
F7Y0oQea6U5DIDKrObROW4jsGv+tfdUmpPrb0BOnWmbDbcZajG3CqN0MFt/ZTGDBeUwDl16zGpyK
0LWSd5QbMdGZGHsdaRm0XeLVki67+3AgpqUi6cndpQeRSJSGNAil3IC0KI0l/tRuvfxYe+hazaZR
6kpD2kNb09Z0b2taaVE+zS+VbmHlwGq20oucQLLGxYEo0L5qU8xH1tUhDU8MQ3EgTg7ehdTyxSZD
MN9VOc8GmKf5bsU2jQKtILHYFy0FdJKtlhi3uCrsHob6bLJqNRSTT1dLjGr+Z1l3IBUfksq1iR1F
hTQEVq4Fs4CSBZVcD5LS00iy6BzU3Pl0Y9uN8a68brb6Z/oishPOQJM6f4nexdV6dBOt52PMd4Bs
GI2S0WOj3UQJImHbGptOd5R+kjpjNBdbsdmit0zHlXZs5CE2S8gW4RK2xKFLAC1MiXLeCUhHNvIn
S5q4qbpBRiCOPp7ZQ5sZdglkLYIYD8MXXknusju3Q/biWsI91JaITkA9gP9+mna1RXJRdPFq0nNp
nXaNzxqUMxNsnV6NzxpqkZ71yDqt8O/A3fW/IolCL6mGFBE61pskla2zQ8YImsECF8N6+TSEjS29
cXTj6bihCzREkaBVrK03Wya6b+/8j5Ez/qXk1X9WiYYXQePIb92O7j7EgANqMEnDSkLofHW1iPbl
0yra4Era1jDJOdsaJgUkfMYb+6mkIfb3vT7ltRMAUHo95OMe7YDjgvPn4a0yr7CSMhMUeHPckwxB
kbU8THI/vhSGyQNGwqlBqrouh5hZCs5/unVS9sGOXFhMcsxeOF8pzwY4jYtiTskIytrOGygw0Plw
StfUmJmQVd5hMcrx4EYRYL2qZrnGlwMSUp6lEtMxrSSV1WiZ15jTMOe9gnnVIgRsoYzSxXTTPOlJ
0mPueN+7f2KR5vy5XBZgSbogIlmGaHd7QxfwfD2BUpWMP1YozvmYckZRZNHYeVbbD0HcWElOWXpm
JYKmC3ZvZzUvMqBNyoQExHmlZnVRF+txxQwCAUzmlaoujU1AXIlH8abRAq83gx5AN3faY6cRzqRt
G5bbl1zN84POlvt/Tlqjkld356uu7Tdr19d99vppN3An7pV6AL8f4INu5/3izX+w0G/QQWJJd2zL
guSO9+/f/O9/JBicl4V446gi0cNXF7imFstqvB4V0bwciYCroLBqO11WbmBroNC8Uj9wZlcVOo6o
B7OF+nOWL+ubfLo1fQrwitEqlEwFc0JbuDkuGo4ZhRRYr8opgelwAViqoxtYhEyGFYLub0Y1p1uH
v/DlYJB1LL0O6iHcodVglV8r1e7Fzxdnry4GFw9/QLFrtsjkfYIpreJDfh3bCeQtRRF9ruLFZrEZ
2M4attaIMQPI+rFQ3NEyJB6PmO0dI50JHfAP+Yc8bn72B4oWisO5danEaGEV+VAo4dvuT3OcnBwE
/iPDQ9AlrLCHNVweXfG/x1dqkxHYFuW7hRl48fOjwdmbC6wGkeOBTMlgQOnxYPujSBKPYnSdiYEQ
VPji4flTKo1lrX7gD6qq03l59tPL84uzwbOznzAF3KvAKCgf0+eU9z36Jg2nhac4gOgk7Tx89ej8
fHD+avD47MnD108vBmfPHj1/fP7sh1DFnJdHnUgP1brj7QT88seqeud6AcCueXH24sujE/Gjj26g
iOTBkW1Zyzass3ZX8AYKHAOy+AIovZNAMrw0+qVxw8tJAAZ4pmH+brzz5fzFjmpQaOA4JUzzL78b
E3Ss5KsPBoiR5AOwM6DzScwrjjLD1v51sxmC/GWfBHMvrbwCPFnd+FFjSkxR1bXk7zbJ600napPU
W3qg363QgY57psY0kFnCV25BjtdMYqvbKsveBPMVMlaZi75hQ+ZnS4HWz+JedGwh5uQMxK+R9W1g
/QWxf1/9oFOh1ffpAA7MnPKlSOpSlcz5y57+Mo8GOhvUC8xrkPZ4uXo1kahCaRdBjpnVypkQiAU7
ZsUpo9AVJ/Nchajb+CLBvxvewpg4kF4grzsOWN/peywBm9EZOXe/OejgVQT6kU3mPboSFm5uLzpF
eSZL8NosmJdt23qTfk7G24FxJuOGQwGNAt1esbeXJ1d+lfiOx/Di5wGmvzt/evY4GFfpnm+88wd4
lg7oFIxbECMmc6FR44tkMv+YIEuqaDJ3U0Toow7G8bkex6vnr18+OmtWc4CKQHcVIfoEJ8umxGBl
ne01CwFY5rmV3YO8yBboxs8bc4HbhYBFYHOmQHs86vEss4yUcxAiZF0vNlwNpW53aHMQndfc01wg
o4En/tqHZCJ+gw535Ypyikzmqb+Df8KsrB8KyWNMSIajfFlgbln0DzNKBK3PWqdYo+xso3zuVQcS
6vU1euRGo81oWmQB21H4qGnfWpglPqd0E9YZ0Rp0q8kHBSm/FW7/tD301uG49IEG4Ub9hOLAIiBc
L23F04uGoHi8+4hl27adWyNHAwfclhW4Y0gJem6ZCCvM1jojtSqVsLfWcXpkOKC8eAKwBDXJUTUF
iaTmJJ3W2iTgRJFQKKneBW8zqzZKFlzVK84YnE8RKEsnBoSDARXtU8JeY4WYmEdPV6r0b65MTlcO
TdPfGJGdv85w/VNCZWI/bhmrNnUKV5ytOmf6YRc3I6pjxpmkMZXzstB9JiWf0jLCI6s6ONWUa5ao
FDI8rVXgmap6TumwQcMAmlVR/qEqxx1nw43ebSKcbKx2TNwFhn3LibdXuUquXeF9BflLzj/kyzKf
r05x/uxe5bRSoCk6uKe3+QbZi8mrzX5NNOTni4LT1NCFRblSBLBnYFXNSij64vmr8zcYS0C/YXwk
g0CtnAjsBoa5MXyC57LP7Avk5XE1XwmSOsZnjpRPPFtaUNXifL4OxzVMQJIFE0qh1s3ceC2qfI8z
HlqYvcO0trrZj/N/KRpwAqDnZKzvhtAD8BCmt9nZ2ZvzVxdhXnIQnZUUA4GTbI1Rp3feaC/N4q5E
3OPE3iieEFbNZsgPEHUFFakSY0CGcPpgRpXhBoNFqvkhEhxzOGXR+TxqrwyUTEpZTVgyt0V3OlUA
9CVxc5lVXE4t7kZ1HWCqRBr4/FJo8+z52bOLXqR/XTw+f3nVRqvnc7M58YjFtQ1ceA5kuc1ZjMoN
5XrExayrNrcydTTC2JbCFv5YsgU4PCK1tl1gSW9oZtYfPnp09qoFqcJm8XRLSLm4dc+Fk0eNjZB+
UsdaDzInwoRXnmgYpuUhLAB8CFLal1eieqNibmHsjPwNrLdYz6rVPnmeVSs0KRF6Lbpx0kY284Ak
OXRJ0ovOu7PousLgB4t9Voy1QGJGbrFAOaDQkVQcONHtZ5ZPsyyzcuNhyAU2hsvYsB3EGxv17Fly
OA58FlQtvFlV50CrDCLiM37W033hXcXASFhfz0xFQztq6wjT5gXoYjnZqKNXGzhJ7oixRXKgkKns
YzSVAJ8l+y2iCw30oUuElI474yOKjqp0X8GH4EjVhDqmOprrIFFte8clLrkryjRJjYfiFo2JAc/p
ponByRPZk4VuN0KgRv7aPgdtur6Bf0Zk4//DmkyOU0LeyKghyUM7JhEdY5dWCCEOT4Dvo7l6VXVs
NoUGGJQwFnjBT1xba+onX/REjYD6b9WdAp7JWP2oWuLusTjgAR4DTh8y2xHBwx5SJMSOz4tbRSB3
wI3DFkplejhI/2xUDVQmGE/feIaAjSAzq1EoEzmuaxxdFqiZ1wTVbc+rKcDwvgPlGOwBtmlPFAw6
GSQ4sfwZo/X5IemnHR+cqkGh3VF7zU86HesaYrDIR+/y65al1yDwDmuFOz277BON6449DBMBo0TQ
ICF1WwaJ3/4wgPP97NHF85c/MwV+Y0XQ2N5L7ZbJ0dSLlNJ/n81rdOQRakb6E1R0cV/QwhCPHFpv
AgQd/Q6OiaGy0pkNgVGtGGqFWRFQFeHg8ErJzhQtMMbbRsRmDYcuNSZPXVrYQ/I2Bd9JYuSrtR9Y
rEPxppiAgIGxrozDv6QLLw72cpe63QSsb1wXFgjYfgujY7GOVyXmYReNh5NwoLYkmgYaOVbFWJ0v
pKtyUsaep9tFjzliGA6oD+WYB5AvkMGBXrOyhC93BHpNyP4mqC2KpnPLebWr0Gsjr6rzCbe+f0bp
vAcXxehmzqnlSRsbk5lJ2Vr4X9RiUVySQwl0dVgj8n3yKGUuDW0Q3aA0zF+MmmCs7n/nBV68EIeH
sw1Ys/AmqYNnOIt+rG4Lukch9JUuauerFWWQpmRe4+JDSQNCjfM8uoEFKhUQnDdmVWHzEN4x459i
EPIOI7TQiqI9y6LkVaFqwXGiaiFZQ62zEmqsPhRZGoiSXqg0MnTwx7dD0drUsnvuLTnj/rVNwXJE
BL6QVGIWTYLk0sYUZPRhn4zt3AeMnbTPaZhUUWgwPkhw9ZAK2h4hVgXHJ/yAJ4FW/gw/5EnAVYiJ
S8jGczgu5iX6FNgK3bAgo4BVEfW2WFn6U4MZezSV0FV120iGFhunYCa5d8o5SI9mZWf03JKTMHCs
b6/9jB79HSWM5/81W+Xr2Ay5axJ/O51iInSst0f12b3gq91svJ4t6HydLPhlw0Bm4SNYw2d4+pfP
8Orv7fLtPFZxyZKhJ+3wq8CLzqiq3pUoj5I/QSaLO1nG/3QZvV29nVzdP8jus//q5Wn/Ch9e3b88
fHubXX0B33///HeD1xdPfoVX6W/visnbu+EQ/n/SFR4Sls/N5d4F6OvK8ZPDOXkx3Z/M79vxnbyl
xgrBIAsiDWD9pIplBJxjzR9NHL8iJ4546W2vs/mHclnNcct6+8yS8HtGzocDv/Xm00Yjp0LIr3EA
AvxgRdlryfQki34q0ZllRRaosau1cfISwdFWX7AvLVmr0OgFnIXc5jaE61Xbx4pVE5SpgcnTK+7b
EpOpZNGrfKylymEB7LrEiGNxnQUqrTC5vSNnq6VClpMcs+pgNgt930UJu9nThgD44CfHVnP3ctsS
OoWhzQ+PgYk+XEVT4A9UcqNleBHQoRsIvlGuhEIRL946S6MLu2d0uC/pwIHRMNdTY6KziRUNOIFG
hd1zxeqJMI5tlTqupw3OHQZBJwbldiaLXgMrXq7W8xydiHqkstuWVbZ6rhcUjo/ZuYbFEk2dN2u2
YqoTk9V0WLgg437gFPEwuZRrwqoO0wmFp1QtAJKC5pUD7aDo6Cg6kiFnnEVP0G0F+Tzld75b4f11
QemHiujg5Ot/yKKfQf5DrVLpVd4V5wH6v8t9zbK8vrFkPVhGx5qVkhiexA7QOxQ4CRTo8Zdf2Be8
QJKE7KZc1srrqdgSeS+4IfmK32V8W8LfXh6dYvVXqY2Ats93qlP4+Yn5vAnXpz1ZmAfGA3Q1Q44W
ujQ80LNVm+xRa9g017DT6WaBpSvhllvNEr2mcYKFAt0DP2S91SLcCqMqE8DVJXFej8oybkU+fc2g
Go+p9BYI1IPoaYE+UZyHmtKvazjObNdFU+8jrptISXVp0lH2U7XExY+lFuaG0gwuvZPsG763QAUE
fQmRGRbv1+geW4FGfmxk+YOI2MwyYizxOro/L+8QmYZc/DJ1rPgOOqfNU0zIrWBmXj6DwaIT1bPm
kbhaFviFYXL8rXP2WcYu++h6Wsjd8KJaIL+ixae4SnAqXHtUroRkavg0bIEKz5iSGuRSK8FRBGRm
MgGqYwjL9PjsX7JNMkaDRbzPUM/R7i8uqmJktY6OUkXJg3IACse10KBx+YYarrZispXqXDYq3bZX
wMFhLcPhBvofubkW0+lO6sn4PpJ+bEURQ6nIYh9pePRlNJROLcMicQ54fB/K39fymOdx5+Ravi3n
sZOq5SfYbRhTdA0HFoGajKbruoTjTxR6qF0DdZlYHq1W8gWbVd9ScicDfyQjoly+zsWjeKKB0gzR
91RyA26B0H10jCYUDkR0oaGgzYN85uXWr6z1teBjJJ8nxtoHOfYyFysAX1aP2BWarBWVWDjGUh95
ZgsDwOtlYwKAogM2vOJ/vxBnQIIPIlj9RTlOXDT9fWggtaYNVDgeaKIKCLk6OqSOq5WrayrAiUk0
/n/k4rS9UI7irBLo+2IYC0ojwIJQhnGXYcZGl5cquM1ZorgiOS8D+njAdqbjnR1Gsi2oZZ4+vtyl
j/s3AeQhNllsMUs2lU/WO30gPUf7JMXTx5hnFCdQDkm5OT5pWDN99QZF2sYdMDKerpm0e3V62vzw
Xt2lxKEyo4XX19B9yEH0SF85KbAQhlVCljhGV0Fli8gc2Q6d2Qh1Cv2ajk/wI/x5efrVlfJ0srT6
qFo2HUlYBUcAS6OEUx1fnV5RtYmjku9DkvYhIGXs83UbURqrgQ4zZQlAk1GirAD2PGrUv30n0K5R
zrNPmMImTDeyCHIrzx7BLkNs6b2IxzfOFgv8GJoplJ9KLJS+mDCrxoaRvDRuMnI+8wlLeO14hOMl
htr6nisyZnHArHdUozKH1vkEs4IwDJtu5ocCB4TvCEKMDvm5AkukS1YtG7jxAMK0Lm7sMAI2ipJe
maAXvknQBZx+TgYA4A7kg1QuxQ6uJEixWXKe77Exom7Y6KxdeEY3OUzNSlKqqrNDXSyZzmStHWC7
AJsK1jJKDgWCUxcEJSIEh9sYoapS/DuqjFOC6hN3mkOB0L8Kuo7RzvIaetV9O/9Tl9Ex385/6dKQ
2FlBhq3Q28QW/J9fPX9G/VB0VjNNfSNPw7LKnClV8osde432c4q9dlKur9wIbSwSEBC8UmT26DhB
2IulluFXrAXDnoI/8C9cduEoDZoXdS3SnB7kDIPA4wPYe+9zWcvynuuc1dfumn7CpnkSVnhGzQqV
5cuIdkJbtBcI2hqvihrjF7R8UpslSocizOBfuiikadNI+0ojG5dlzylmaHpZkQfIhJeNMohoTYo7
VJGXEG/E2p9vUtTXNWFiL/Fe31kjBzQ303KItywFMFB9g5GT/M8q4KzICf0YfTUnGpi3oz3ecK3X
jDdLpp5yxVYR2aC96IZvKdiyo1gGXmfnUkk2wMQXCLCYSpu6Wk1RqFVb6aB6VXsmVfyo2gD2w35T
ZA+6hf/LOdbOJq6qNtuyF3Cl4NnnvqLVnbh4mjWDZFpYYQPZfcZe726ZesvOdGvQBotwJ+u/3W6l
IIfwPp3/JU5Tk+yI+uYOxv0w9KVS1dQhQ/dcPO2cC4uuIS3KygdO4D7iisbqwjI22CjWgFBjwOss
AQm1ucCwqqbVIrE2HvBeGE61dJuMEwpBojccfkTBR/wf+p2yN5NdEwV3pbG0SIlkcHAEUIyQn4u6
F3HiKmT48CH+k49uBmbEdEz1GFISS+Bi/iPmwEMzJwmJUEua+vVs8wjAOHdxulxa18ntedGtjxxb
WSMDjesfTRl2gbfZYw67CQij7TfL4+guyytDFu8Hmh/dPARS1y4MXy5mzzDSDc6d9TxfbkCsXmjg
PLQfPatWpxGlDrlXxz39+JwCVuDNX5zHr1+th/Dw0H34cDyGh1/Aw84vnc6wnFeLRjvfl6vnSyj1
Z+tDePYGQ4Hif3IfPpxjfX9nPXz66qacYHe+/dZ6+lI9/e4766n0xnoinbae/A5WFDy6bz16XH6A
Jw+sJ0+mVbWUx/bz31XYwL17oHQcaMFsok9LS9zTn5y9hy/6fasSoDs9/Nx++JSG6Dw4wyd2mR9o
wM4DLPOdXeZFdYujs4d3XsOT0pnimueeF5Qz9/h07naWHpK2MKdZ7qiQOHJ4oPTbhECNB828QmPD
dFBNJlDCSCCv4CTDUpH6hnL3IrHYyDZaL2tKE5s5SCyT8m5X5bJFYi4QIy8h396BQY1xNwu95dIU
eYV/ODWZJvatzXxB9kv1w0m9Propp+QbglSliG96MsAKahqkp2/R4KlMcPQdXaaVQO7xMi46LYGa
oB3xVI+LfwSNcKXPCZiLh01LaKSzeeWMWKz1HnTYj+D5crOAY4P0BYIXApGPtK9UgJLnajOhB4XS
QHnNkbhJQuLqhiyFaCET0OWm1scXyodaMZypG80F+77wTeWsGKN7CANQiNEN1teKTbKePgfSpnL8
uFmtFqcPHiw2Q4xKzYbT6rpeVKtsWDw4OTo+fnD0zYNhcQM9PKxHBchuh9XkkIX7+hAEr0Mj4d+s
ZlN99KFsCCz2Q1mQCRmkRhl1tXxna5FEyIgpSR4XUEaRkYRHIhNOH2U3QKlQKNXR5ud5MWPl2KU5
kgC7gocOc06PtFCtclgY1zKDH3BtqDnECBjs5VyeD7gGvHPN3wGRuTty54a3hqD7gfRAqiNtBzhS
aTFh9gY0oVuzSv4DbAlVg9EYJf4aOEWBWS2BWjv2itquZ4DXVrVzXRgLrF6q5cS0xoZADlZGN8fa
aUQMAvyhWWC20ibaLMnsdAV9h+K6CtSdoBht6WC0LHz6UpgpfvuB9yoGAsBg5jg8euSsAsQxIBaj
NHQz25nENhXIYWgl8Oc028w29EE21xVxKikyey95UukS2h4kH4awPh4zjAgOwyxJHOg7lMooIA6W
CV48CQ+IbDZLn5yadXEaPRRGgH2x1ou1G6x1I/PSsW9jZuSmyBVTENEQ84+qUC68GFtPyYdvuAG6
SwG15JlE8lBVSzonlCbjh+mSKMNsrkHny5zTa+ARAfPEN9K6b+yMMNZGFF2P39fBqMKErSA4PeI/
GPekQLLiRlrPy/frQncS1cpibBw6hxu77ij6R10OxViuB++lcdLs4Vt359rvj/AXoMPxbxYbsTsc
xaq/GOPO7mQUo7VlwtDAxVkoKf+KdQ1nthWvU6ivVugx4n9DZkerMtCmBTLDIqzya1OGDtrmvHlp
LalmyByuBmAt54EKNVziCowoNd6kpGh2XCV2SZDu85mx5dOquXdo2QBpcd4QiFStNlK5dLdSR+su
ijXU2mWR72pRBaRN5zE5NURtdFe0UosUvfWVkiiuIOwkQzUCz9IV6L03eoeDpj/U8gzThghT65g9
alD1I1us65tmw0iAcJfkTp4QCvVc6m7O2RSkKYvGUZa/GZNHxYsZOs0Lfa6iNUuxIQb4R8ZNccN8
FHkHIfHEgp3ZkI4d2UjkCE39ED5sGybtFN/i025s2coop0SboDH7PpS/73jQkVeqB/mJUjmavYfV
eHPaiCwg3yK66a2ykGezdV8+5y7ghbgy4aqAaTEcIg6cLABxfUVTcT5Z0Tnv+B1VI2U8xP4PBpP1
ihLEqipNZ/JpmdeE+3CJ5xL9TGyrDf9rnLd7zG/U8zgNgiObuuJW8J+Yq6Ls1/kyTi0sdHLnGPx/
7b1rjxtXtijmfEkAJkAukOAEyKe6FAQWZTYtyZ655xKmBxpZnqOMxxIk+Y4O2g2KTVZ31xHJoqpI
qXscnzx/Qv5T/k6QX5C9Xnuv/agi27Y85wIxZtTFqv1ce+2111p7Pew0QhXBtoIB33dhKUSWuO8x
/JAhW64l4vWB67CoIwBXlGMbhUhDC2LTJ6dF86qMOaEwyr+7epjKuVotJD05Fh43KQ/OPkec+frZ
d69mbMaCIpGp3mbh88rhB2g+l2UDZ8gyZVHRZfITfWIgfzrFiBhmAMPsJHuQsEiK1i72CURHxTy4
/3LAJocACKvGPqQGSpSq9KvsfupSMqMyPO1/PwXVneB8yhrMIgw17Uug7T75gHdm/g96vieF3T00
7vwUMf9MJMJpLBhO7ye9KFE2gbooL9DOPNPOUYjGp2YYE/N/douCAeiL4cocjJCzgiP49LScjCM1
7bhGiV7jt1isRn3ekn3MgnSikO0STUOQffXEanyTlKitYtcuOjO/QRIlbyDmTDCjPkuH7yBFphXt
i81+jdIYNdwRtcEfBkv5jjXvqMnbrJ5vGmTBCNLjzvJmEmOy+KVwyyTJYa8dUSHa3WZ1u5wCmxrr
LN0+55evDkzYYtAR3UlaR1KLEJaYgQ7T/sp5jA0ypFZ664KukOKKTZI0w0YWxsj6D3Zyy9PaWLRb
wpnvsvQwocthG+ZqeBE+Or7EMvdx3E66AAe8l0Ie13En+x69sNyts80XA86paFtpJOzNjtnCHYd8
wDCUVx7ZZ09rJUGIZdMG73wQVQPJJ3R2taKOzNR3/hPtFzhe2smbvspL4csQhD4AwGLtHry/B4CA
uDcaADxqr/cwQrGSsnhYvLehXyTWEFMqJ+Skc7qqwU7ojAcUzSBR51uwKRnqYL9lYzjzm9aJPQZu
07+Ow2jdnh7Cm6tOcXpVrLYFph6mqn3uwvXPJXSEsntxWjccxZwL23A8dHh6nRusYFjbyWuOzQeC
Zf3M1ucaNtxkzi3BnRpgWAzCBLBhmDm0RsdDjhMZmZNgpIwz1eSZJU17sKL5ivV74aISE+0eFL7n
TZ1LJOfvmN+Dc3AQ0A2Osg4cisXZNmx6ofOvQovLVlmXdI2mhbHzpqRI9KhygEv1g1VDI12JP6Oq
We+GYnw5zt68Abe3+8PmzRtSVepmvRj22DxoQLCAdeD0e5DWrWqn3KnAEehdi2I2byigYJwusEoL
uQoWXrBlkagp9uXtKWJaPD+1j8C4wYxDhOnfxetg1z/c8zZKlk0J8fHx8Zi0UYQW8eJ4GFCEsLVG
HKBG5eS3WrP4YU76YdsGG3EITDjGtNML8FLGKpLMyLzuOhlxUXSy1AabZC1lcefLpejGbL2U8ges
VcgvoUW3wXNU2Qus66yoMYycrZUbop9ueBBOsZIeQQtitSNHIjQmanzk8OqoqkhHYraWdqxmMQFR
VkkpVQ4oz9JbMMAh27wKKs3XHUb8dbplqx1kD3i1/DACTzcnutAWDZ/oIUHr6EjJUmJ0CeawRT2P
vpmXS5Xte+yUpB6nqRwkhe67ObAGJoRWy5IL+KZ6TX1pyoYLRVVfIiB5G8K4Wl4YzrfFjRVIDfRz
83uI29k8oA8/DWkM5XKldGHEQe3qlG/lFzuo3vABLDVpN5hDa+hV5lp/LDfPyPQFUWIkt17gxqj6
GCaZUCpwe5LrVvL27B70+fPYvctiU9TlYkbSHPMdvtRrkOGfrKelFU58Z342LkFkNYPx+BBWTSrh
g6QqK3sEyXMQGsxe41CSaWQjxHKMxlh4WFNQz9W7A6Op0thmrVxWcOfgXyrqS0+ZW8icHL5VVDeK
Kh/jz7pZDBwuf+7tIp5iHmFrv2FMHhAKYp7awxe2Or7JHiH1FkU/H+O7hJC0eJtoSdHF+GPyJkEJ
p34kd/HCOd9vFleUesJdrbkDeDuzkQdHHkBFzMMdxgiH16a6S+Z4gnsm1z5ozWxmCjs3R4CKS+kM
TarAVsuQMDa+AbrlDfAoSfPpRS7NjrB/EFx8JyGZzbq59Mm+tSGWMbNYNwhtjAcj3UgYS1ADsf/D
5iteECCu6mNHMCw9DjlN+v3Obg70AflNVhTI2h0VtpuRmJAByKVD1YyO7+CAk2B6pBt1Pq13ATD7
sXE0RJPQYDTbPZb//NvKFgmwuF5wJRRipSGDA2YgZ7EMq3mBRIT3r6bZ54lk2jPu4wX8MIBahM3F
S9pVL6wNWCsoTfW8Pbcq5jUuKCU6cZQJ7rgLEjnYIAgbHkccj7v/98bo0biOo7tdXynKaRvx1zbp
a5EXOIdppngFW3Kk1hyG76/0MJnRKqAD2LyG2TfltR+jwLuuanZrF6NYGV0EXSmrP6jhSIBcM8jv
0A4tVCe5LsJTHmedVKPcyb4m9oD5bHLPQXEb4X1BAT/naGW4clKhurO5026SPQQ5cPO24UYWYDOM
6k9301ctGr2zcCKi+Oljp3AliRusdZNtdCtsESzkBpWN5RKaIHPMHFjEU6ihLIKXFRvN+3q59LT6
oyDSPjIyTAHN4cxHzbMayN4pjW7EXZxpmrIVXH168eR6m0MzzMoJz4b9ONopk0lqY1u5wECnSCjB
AyWkIPN2L2FNM3s/rzuUsigPgBQR8Ki4p0C4gNVKbjCzmSJ1om1tZI4c3GwthJgs68khVPHQNIFx
taVaz7Q8IcwCcObTaKfYDWt4iJkuk+Ar8MoSEA0uTLlL4kf9q9Oj2Krn7LCMNlAY/22zHIG8Xe9O
FmW92JNh6AUbHPmkpRxl7/3rMX840c14mYiiDjMuNxtkLBPXceihgxH7wKQDY6huawh5s6qqLRtJ
ApdwXqyqDy1ZtJNSnGGloOWRGgExU+KAdaAt8OO1NWO63Q14xm52k/C40fd+KX1cJoVP3iSR/EmC
VshU8ZA6uBzNIQUNoNpTkUkzLAMy1WwHeTywGmAIkUP7wygWbJl9yTgfYw/ixtRzDfFuJWdtIUu4
YgtjDl9jRqADS7tZdsIzhWKH6ptOmF742BUXA1rRO4ChXhlcLcUwp5hX2sX9UaYIIkJnvyanoqFP
gTuwyeuuXRviITFT266TZBWdJLKKdJSgY4yX0IxMaaeZdZk5xSdDrCHwKljPz2ZuXZkSznCPyo94
r0oT+L1LZ6LxTCqNdC/DtEZFxn03y/UoRvFBijIPn6PgraPd+27W5xWM3LrxnOJTy9xXxcWOdWzy
GEybasNHNWqIHsXV7HOyHn71EUiJevndJsP/DTGIih3BiKehWz8EcQKKmo9MWzVSt0Bey5kBrC2L
OEKKqCANoYQv9pD6Gf5NQADKj+GbYkTqSywY6F6gKbiajN++/ZB+DxFcwLRGioSJTNAMqL7M+IJy
DK1MjjmRTEGfYsnYrBhn8NcX8WVOUsJfLVYbf6jqpR0N/z5uRFyY+Ix4bAQhTYS5gq1ovqdOyGjc
qjxcm01FA+F78cH4AfoxRGVdUpPwqh0YR/9ee88020kaCq0dU62D3Sb65Qb7d5tcdqnF9lE2MP8j
51LbmgIyjCvkItymEbwaqVUcHTJUYvjaGp2qa+7eK+Nt06O01/aUgBS5MwePHzY/3oUu4eknBIw0
P8rcU0i5HM1x7UUKciujsI7c/Pb95AJbIigwXuyu3Yk6TLqW4uz8Cwdsu+fUJmhzin8SBA37CbZh
mhi7KYgZK1QlQ4ykhPaLlmaLPK63JmNalhCPoWTLSrlJq9G2HxfROcFKAD4qzK8wrWC3kIAHF7be
cg5Dk+ExTIyU03QFkjPJbRijzzplYzPoma2YAAiFLLn7nLDx3lsrbOw9BfxT3SrNPogj7c2ItHKw
nRIW0/ckd4Mm0I7RQR2fyB17Dqb7PtUKrzdu1sEb9kuHlwL8s1DiBe4BbhBnzJDBsMtdSH6veeXw
KV453UAkFcIQrNUbtxQIUjdrj51LM3IyR30QvsQcreuEZEjbhXivBOulphLXjbpICI+8repAQ+aQ
3MwC5EeZ8dlRylAtEitsOy3PzuxOroORpPdVYs0C28hkfJcgYsAF2hCBmvG9Eb18LSPKQnz6eWJX
GGyhPzpoNAvzfrXfgqGnWWFfbrpFZbfNf3YTHAriZ9a2kSCSRwDnybR7PfsqTJVJZ02g83y0wbse
N7nOuwtswZXV6TaPvdxuE3xZofGufv0Pn3zyyfZmPFuQ07mgkGFI3jWv39zHZNU99Bzy7pkLOIJ2
4kxPxKncJDyLdlc15Fe3JoiPXr4a93RCdrmmdn1Xq+V4e4NJinZ7sGsa93Tqa5WVGrJghzmp7WSi
pNSJmEaj7I80Lv+aq9frdYWhxATQ7WmNH46y30FmY8lr8LIoxLP7fG/Yf0oUPa7qy88wIvKDL/7j
fyDneqAJsOh5n5AGXIZQVoQHFtPh8VsMfQhPqBqHBzBeibZnH1TNQNCgxJ+AmYLDh2v8M1how8Nj
2dwZmsrGrbwg49v+d/s1/DF0FP5Yvgnf7c+bRV1ud1gObwFSY4GvuL/4WnUGVzo0428Mpw0L8HVx
gSOBQ4OfycYCZ1lAXj3sHWlr3Muj/aV8yvrPa4O08PANXl32/wreHwQ2/GlWE9uH2764qVf1DcVx
wVHXN2xlx72D8Ti0hLjlnsCLJ27qCcSRhTXAaxJ4Ar0iDtFME5cZvG9oNTYGH/cWQoATM3FmaYpd
fsnuBnPQfuENiHcBSEikwHuryrgeQxcPo2zMviR9ROjgouI8uEPejoB6jRqC9o9vyA1fJXU/clwH
TIiOHFSyFSg/tLEtviGlZm5j/rhYFhTwkvJqkeeRDWQPtxdAC9szjnuqnmm/H/LolEovTODRan1J
RrhOdYRBVexwbCzHi5ruyuhuD9S0U/Rn494pY1U6GLSEB5hmX0OG+afStuEksNVE/GWuwswnxkpU
UZQYshPvFor0x1HWknDhDKJz6Zz/2jhbam7Reuf9XMVSRIMnisOMxsFgSjVHO616j6EWs37EUvS3
QHQoN0NkmDXCaGslxdaC5iH4x/7y0rBvYCg3SbUHTMbeGRgp86rz4gJi7KtIJWZEgBQnJ5uKg4EM
+xyrCxyDq4uLAl2+Z3B5zout87lDFqqaww77UZ/oNUY4w9P1mxovzc0yzADU+OuBH5HMIVeyXx5X
YqEmvVj/mji8pbzGcsZENKieCr7QVhEkQSO4moR9tacoGJ63ScaYFR4/+AassHr42ufYKHigqKiU
5RB+AMuWT40sk335pUgypHBoVTlDuhXUWGF93j5gtY4SkaS1z/1Z3j9TvssRZEAOAINmzc74vM9E
cEkTHO4V/pw++P3kTC8zhsPr9ShHV1Wno3FlyXBcWTIeV5YMyJWlI3JliZBcWRyTK0sE5criqFxZ
S1iuLBGXC97dVa+evMuSgbeyOPJWFofeyuLYW1kcfCtLRN/K4vBbWTL+VpYOwJUlI3Blfgiu1oBu
WXtEtywZ0i1LxnTLVFA3PlejQ6Q1ZJQtpILP7KrsclXM10APIWz3Elq7JLJs44CNs67jl6hUcOoK
HcS/tzKNvgOO4atzMEHFw8Q3ip7rMAhesCsdZ72D7fFP5yf2lAjMom2gaJyNpCJzR29npg6S9KQy
gmAMkmCus4weDoNoLfMs79RlZKn6wnieNIhkrGdPsqNyPc/5tovj8wFIcavyUyh0dgz4/GwRnWDU
0OOMmL8m+NpzmLQYSAYhaUDvQ3iJhvyUzw25X20gb+dOyC78o4FEMe0DUvRjbtpW4cL9L5WQLpsY
l++rPjUV3SejPUbSQ1BfHNA2RAdAvgMImvmLTjzrR0rRtonpUBqqCxYFjrUnTCCo4JJPQcIMt6BD
KLXlIWV/1Mk/JLdL/25NOQPBOJDcvzC0L4qe5gXyDGwqp3iPBFaHUdUjZCarx25y0I3Ld2y2Kk57
gUG/DfYl+4lSpiIjBh+Td0gyyZAUdFCL29yUgJKW08fgZUdkqOBaF82uWm9aZ/+SBFBPqfElTG3H
NUJCve91LMrjqGPuwqcc1XZmbTI0P9ehxw9Y1rSi3g7HdnBYFds2p1RC7wwjoMB2h8N9hgtjhqFe
oVlFn8xVZAgHqXoqq8Btz8VRFgxsqnDheChEc5mqlf1lh0crbJNT5U1HHadN2OJAMjykPxc359W8
XiK/Vu+3u5ahJepOjghXc2jbde4O56GElx1K0tUXIJr24LuUyh4/KDzTw/D6jCwK6yJhSujvMG1R
eDwd6zZntlbDXRbDqTMsuKUk8mUvHz2b20kb1QjPQSJPyStI3bM24EliL4ETG562GVz6OMS+Lx3R
yVttwyTIQ3v08tuv2ccxHAzIsbXzi1agxYTQn3mLGaB00MldqLqaeBeoGB92cB3HUlt4mPpTPJYt
OYJC3mLzdZhA3oJV6DRuVGPAV+0NBFaO6fO+ywzzSLtIO6DbHfjRjBI5eI8+66OD/ucwuR/5cI8O
dr2Afxd8bTcjteZ0tgV4c7QlafrUwN8/6jWF2v1sgo3/pFvZJ/yED1uPBt2P8EVqg0T2pFBQ3PXV
+Cx7u8kNgQ/4yE1zKtUg8InrK2x579uLcJ1h19C90slVPt6C9WiodNi0fjz4cKczJdU207vcsDJ+
HXZDNWjG1JW+R8PjIJ1uwTvkjrW5PRriBwxwExCHwv1fAw379xjGt4WTV/EAeFKGwbcDTpuRcAI0
bz8sm18JND8fNkcAByZE3zCWsZnEKOv7BsuWp+y1St+x5bMGqOrA7zjsjmZ+4OiFLmx/aBL9EQ/a
e/f8ef/C09Cxz5F5tDoxt0eon1sZYlMaTtht4hbu2PMY1NU6kFpaqajnUpO99cap+1Byce9FLdiP
11aN3WrOxKX2Fy6s08z+LC0hN0Dqv38zFuic39ZnduldkrtIWKRH+3fMhCLVdsLSO7kvUe0tBMfa
sv82TDDKXB4QfsVNG0ErsqFH0KU37C10beElzS9q5riFlAy3rOPHoJKSNRwC99iIQW3qfjHsCtZi
lA1mGH9qNhuMsh9/Su17xdr8StgCw57ZEGIfEWnCjsLMat73EKO3R14uHnNWfOyTIB2OyovbjGFK
vUbwTZIUQdUgrBBahvgteFYiSHzN7PFvNmX71x+i7RA1YjggbuKHTRicPCyMNib+q9MHv5ucPGxV
P7CxCpO7CAaR2Y6CyeQIxT4SpbSN8K+uc0/hgRpuAhlcUGHyZ1LQdS5Cti161YIPpvaBw2k8HiPe
O8ulFkhHMYQi7Z4yqm+npxxNXo9NYr601lEh5716qdgw6NSHhfyQOFzRoD4kW5XYtf5oDo2jfTyp
kSSsDuzYzjopcdLuIMLkhL1BTH81dnxktZPuqvdu9/rfffLJJ5I246qq3oIlzLv969c5eh9k8Eqi
xC5Yg3SB+WMxssx2tb8sMeHo5n31lkNtYloY8CIAIiW+B1xyzClT7mQnv9Z/PchoUu7K+ar8G9nX
/pqNo52kxKFeLhFEOU1mbYB6WShbKA7xPBe4oANJBhnq8ZIdEtxA3FBJhIZtGYIyhwDrFDyY0tnp
1s3ReAlpcWvqmSJnD8l0WY8N3VS2kKvWjYfXH8NsVhe4mU6+4kzFlHd7PV8WLK5A8nBxT5LMZcAO
8Urb9iUaKmJG2Uhca7aHNZtjDcuOoacqRFu6M8OIy2C2zOFicAJq/Iv1EurMyLTZAwGJz9G0Sll0
DL66uSgvOQvzCDvimGA2xPESWyGrs1SfY8wlbSk0cjPJARpmAsdIfYaDy5eGlSkWEE92CMSlvLjx
geJUqQwygpKMeRxBxmAMFcmx31oDglAD2sRvJ83uZmXhThlmNiW/ZRjRlaDOkHTB3h/Zem8W2mCF
tYKjmMrzbFIXF5M3jNRf0t+qXhb1V284jU1PQsFgUGzM7UbRz80QNxtJX0XhqOq9pJGcgPKYZjXJ
XlUYVDeFQaQMt3Rtsr2ZwKDNkLDu2IHIHJTD7EshaDTl8fOg1FdvHC/PvQKYLsqVDeKJ+zHVjymI
ndgG2jszRaEnLPmMlwTiS0PUkxrjFi8gzFKxtI5aEvIcOsa7n8kbXrWwl8f4x4BfUB5SipkHTKvK
2fRO1AS4mjlVGVDkf5Ju1hUzHdj422bb1WXxXofNxjVNLNi4awAAwUO9I+goiit1ymOY23jd86US
0ATFYQVVTk2PKmDKvzmCe16jVwhg/HxzQ9ZvkG5S0s0DSYZJvnnDI3vzpifpBlDdAdfSHClcBrjk
LCCmEk1KappBXUN2IKgjC84uG/PMoxZS03baTSnhlGUi5EiC7EE8o4sarG2FEGK+Vp8ULcgQF2Bl
OpiLGta3guXMo3jAcyI+6BOmRCcYBryFMLBmcNAJBOFK0lnkCw6QWfS5ZNIOfpMXmGGVVJxZMa9X
NzMhvCE5dOOmlYCmmPJwg5k0iJhiRn5l8IhSjl6k8LidKFsYtK4AJUtLUTMyuzsvig2fiF5+Akzb
QcySEPDU0LE6zLDtQDW77uAYxYkFWTkO+ma2SXENSfDGcZu8yrDARvZBE+fjUY8rY4w2rJMLiDCL
FDr9mMk2V2YveOijek1iz6/LS6rhIHp/NG7SdZSAJQOQ95sdkVmkXWV+I5Dh26Uh+KAupWwu3qaz
tQ5yNrbkjBgWyFTWyKBGmew3fN2O6A5sc0ZPngS4ca3nN4C4UNKMvC5OkH2wTCY2bVCd4vKP491m
R0go0o59R4wGtxXOtAQsDzszgh+EDOAmwFTmSmCgumMmFIBJgcHRZRrTDpRmZpIJF1RRV5QD2a2H
Y70sE22qmdXh3QMkgAmUGemyNCfErjKnTMNRDjnNtiSnWMMeFhmNkNaRhOSkjsSImXR9w1AAM7zN
rp2O1HP0x4bkhnbYN8HsaWYpVHXdHTu+C7ZlSg3Nmr3bjsF/BbAP8/H6GwjaGGePNpQWFNOnk5qi
WKI7i02E66SJN2+oS3PCQ64OHsCYpdtVRQ4vdEL6EEjMBJV9Of/Q7jWZfZeRQtC2kzqVYBvx92KJ
SQ5VSx+K7F+AwbcFhB/HxIRZy54z0g7kGaQ/yXEJ0e4c2bJoCjWsJn1s2OE0masAQZ7F6iBqdz1/
a7F6xmNNgVHo6Zs39utYdvjwzRsO2s85jjiv4QtszsPURHe/wZH0nBKt0ekvwlpdUPLqj3tIbW9k
tjB10j4c2HHzjPPOOxTp2HQBLZQ00QopMArter5bUIJbAAIn39QNFCnaQG3aXSxZm8qL7KbaY34h
EW84rbZqnXYtkWFMAbWsKAMvjIS2vCodEdwU4A6RNb8O7AOHy6ILNTJKgshRVg+o8ZnamHCUSJY+
EYJgfWg0I1mWccfAkTYcHDYaPMCS5fQcUB4Upo3EVRju3a6hIPLY79w2dajXSwqpUcxIQFgXuzla
vrlupUSWr00bpZENhhnGZwGRDbVGphvKWRgM6SOoKPky23LBH2/7BucBHS2U8BYONu1yDlBSIugI
Hdh9rtsIZJfYxHDcJg7MhCuVXorrXYABgWQmFRuDvtvPYBk+2xnZbll98Flcyx8SwbDHA+ibF6s9
SneL+dbsAXgqRFFOfJNmkYhU2wNZq5+gvYlrmrORQUZyNVI3KmDphJHUzcikJ8QmLMyBaPbY8mRX
nZwXJ5ggzvWRC0HEK+eySd1lcNb7AnhVw0MZ9m+ztKmoRImolBqYoCbVkNJi+csmIBcd1YSpCfgO
FPPNhJhbvLszW6PG9OfEsHr6gUZl4uJrgIgahphyaHOHyIcukGATI/kIFG7BTY2RjxHsUBZkeVN/
Q2wocKOroovR8ZAxT5Aux+C+CUGINA+rQN7BtpZdqahhzCRbLPaU8p2G+eYNlO1qUFaufcN50lBy
2G/e/Hz0Fdx1iJFCPFcBIk5JkzEO64SKaRQW7q24nsPVBc8erp3G5vTijU7cbbFBTeoFxAsyRL5O
7i3J6iizNptndeP4BDyvEB1O5Eho0koWp/Z6WzD3SUuijLc91khpdkX5Cbha1ONX5pk4TlHq9sSo
2FFAVZ1rPwVUsSnl2psHw/Knm4vqTevedHO4xe5sEw9Em8Qna4rQUx06CClLniX3Tpmdba8gU2N1
wduENjVO7GNcJPJgKcTLb8Nhc29E5mK1BlMe0fCg0nfo7t1iGsElfT3JCHV6EBpur0WvD1eV0EYM
qkOSHAvnvzZslSiM7Gppr+jYjsTwZVfV8uNBmrrBlCnsUmjv8MDXDtxH2IUnumvU5hgkzEP9soHT
r6RgRYiZYZgg4TReuLAAFPOrEp7d9wkDUvoB7vUtwYBA+6KhpvSPDSe45V9WZAL7YZKWN8UHiqgD
wZPmG/cCG7pXbu657KNSu2gMI4VK3+8qPIwgRSl82tNFHpIEUmPbPJh44C/pnrBZlZe7q9XNiPR5
5t1mJ2nomISpJkgYM4PYr9fg6uaI68fCuXJzsdoXRjQxc3bcYO7ZLTDJnEHoKDg/hh8NFWkEM7h+
KGqLhUgDlmWdOjhcKlEDOA5vTJdO1AiGVmElCo3eTTPSp3L3KDkhTYgJuItyaQ6BExAPLqv6hrPA
rIodXjsb3vR9UZ9XBmHR6+UCdbu617YODx0xMokZY0guL6gl79oH7m/huhXwbQ4H90LyI1pQcCtq
cPFZbk6zD/Ma2Macs/qNME8miU3AfF40CfbTnXRcW+kIzV6cZ7axOaXdpJXkEDzcNoLSNZ/l54YU
wJYnSQS052bZbTwRtlSVDjnKxK4khhJ1kbtyNa+x9c/soD+GXLusFnh+fNxDknth44uCAqpuQNWH
f2O85VUh05AdZxU2kCE2iptTCNrSwW+g3KNEnxjvBY8QifYKaOsCJ31kLR/eN87EnqRYsgEO3NsE
Nk6c69uz/8ouMSqBVE7opXlCOFfIAgiHMPAkC6BaaV2wBQJWauJG33JEA2odQhrknQ1K+cyWj9u0
KgRq1EgbHHoS2hllEZUEu7uiLlEWXmG+ASA7D8dfDKVnCDdFW33jFBQUyRAM+Ja8w0EG2lZ4aKIp
1jlvdB4FmnVwUKCltt2RCyQUYLjL8qKtM2aw5zshGfONgrOtsCrfFlm/eVtux9b4sJ8WgODwr2fb
5XkeA32/hRvL5fkYEgnuarRNg/rv3r/+LzHyMbTw7sPr//e/+OSTO9nzf371T8++mz168afHz/7y
/Nsnr57Mnv2557boJNtvyh0RShbKQOqH6I2SQJz05BSxeDaD+NmQfvJ0AEzz4AzjCkt4PghMMJih
RcJsNphkdzBmwX5Dil0Kpwv6UUPBB6xBPVnzpAccW/gD3HiVl2Z7sKkT2Pf1Ic5TP7MZXKEFSuIN
5n9IrTFWrRhaUPhkaLbnrDpf3pidtH5iWPacpTji+4dAfKAD6BtDi2MDZmF7moFhAxJpHuqOsu/h
EELr6VEGZA5sB6z5JltoeK3Y0M0zCbI86/WCmjkkRoPrz2q7x9xRPIB7zl4QhG2Iac4zGQKnYEhZ
79316/+a419z6Ld3N6//n3+goNfmLQ4BVnt7syrPJwYyW3NOynrjxSjk/6q2qEOAqOumUbxy5duB
PeZU3pbbt5dgjAokYDX/282JmIU0+3NR9fdQXwhycyHcNQzJ1DwB40lANcM88ArL8d70yKCSLJLW
8+2W2HHDwot9FGs25htk7B1MeqpVjHdhMQFvNHr5Ypj9U7UCzPpzXbwtVjhfbMQs1sP79784eXj/
wReM6HZ5DLIPHoy/GD/83aAnAbntKhIkAIFE+2GAjLTVrJa4aTRkp1ku3qIdqA77DeFGGVynA6k6
ACdObnr8aFXOG7ai7ksJcG2ENZYfA6pnMFmqMaRzFzkT7KymPw64gNmd/PQTOnGYARkC1Ex/5G1I
RlVsYQs89GYJVN4gjV1eKDhodkvT1GA8Mw8T+DFKNrCtmvIaDp1NBRH7G8ELaoRGj83g44RejIgk
D8zWMfz7IMMCM7iTgPNoQm+pvwEvFzSyvZmo5RuMzGAgRuEHGD+KmLvy3EiWu5seD5X20MnD8X20
a5hnF6u5ss0dAV0AHf4clsKbPm7Tt0WxzSoM/H5hsBQXXPpZ0YkzQAqX4eDwceRey41p62eiIv5n
7p0OKkjQjXrOanuC6fu89YLkvtQcs2/Q0o9WgQfeVNXCwMlqtgHKXHQiH51jwMCMB7RGyfJj/jiR
QqreWyPgDrQGXdeDj/A8wVKq1jeGrhVLCKU+iGtd4EdUw6tyVPsnwR7Gd3/SvD0G0iIVmshrNYBH
25J238Ar6V4H3Zkm6KAIeny6KR/zezcRW3jiPqu+0eoVT5dBqo76HAwCKN5Ry2zKpda4eb/5sBiE
awVkFL9MXr7f/PXxY7LCew59+XX3tVppr675ApVbqqIrb7JbCon3LfwbVjLNPdrDdNvHit99EN2R
K1xDpLck234GguQJ328Ck/Ho+VMCJ3w4AE7pGIomdw35tyTLj8lTcMJlvHov8dMg66rHZVStxzjg
LF0Lh4gl9D6DG9xBVw0qoapYLhY04INUFb+EqvoK+FYgyoO23lwJVQ0iazfkqTRoA4Yu41et5x8W
Gi5Bj6qEqrcFwwRxWW8GiXpBCVV3v4lqB3WjEqr2zPfOG3g92yijk6CUbqAubAjSmTmfBukGwlIt
LQxCoCVbCGrbMHMdtb1iunocH32QbiBRMNzsgEnA49Altui1GlAGCexpq/OvY4gnF03tdpumeZAo
7z6qGmDpCFbLg1QP9qMmsyiiBPtHKvBHVXy+uYmJiBSHj7qsf1AHZf3zubGYkRqGjxCGef5bsWl8
XJKy7qOq8ce5OeCEiAyCGv5HVUvlhSl3YS3/o8Y3w4sX1y0A5Y+aMEBShFlLcf7obwaUQpPraz/q
Crv6hgSFQaKC+6ixDlwhBy1rQR91B+cGfqQ2HcQdqI/eoCoIjt+yD/ijLl8258CypmctH/0KHR3w
R10e3FrW4A6agpL7GFQBlhGkxUGqiv0YVNKHR1QpPDe8EyOskCL3sD4XikOIFg8/ao7C7EQQJ5MV
7Ec9pNaViJZBr4FXUsHfEddys93vTqr9zvxBrYQYCA3K6jDbJGxtlSKky/32ImCbbPkxGSkVEymk
GQwzzqfPUiyQqseFNL0BQIT1wmpSSHNPXz+mj4OOeq6Q5u92y7hqWFMVSlb95uvB4aqmkAcgusT5
a12ao3vgV5Ybng/4cRKU9U6VppwhsUuMPmhFlfX5Mnct9aFcIiPf0kKirD6J5iB9b+tBau3k48SW
CpG4WYOeAp0Yivkmu16vPrvarVeZkwcIpc2HI3Aa+zVFTe0UWkPLgyyBnVwFv+vVml8Oso7y8F0z
E/MPncXhuyr+nWg6Buni7rumV3i1PWjtg78HgumqCuRiZ9RvpK0sBx3Fcr8w3M4A12KAPhxr/L2A
JH5ouw5OfhIDcHjEQpguUqsAsjzc9g0S5cd8DTixhbRczoNMVoTObAGfU5LJDJKVdAGPLynwejda
HVWPvgfnVWely0QllKxTaGOnFYrer75+9v2rQXsFLuBXefLiRXcVKKCr3DSINu1VqIBDtZ+GvXd/
e/3fqiAMfBP57sfX/9d/TzEYlmWzqN7zfTvcUXCRhrMk7MVTyLkH4k0vKfNnF3ugqZDVjRM3njfV
ar8rZnwRwa93Isb2vFuJkU3uKAMUbQSV+oa8W18U7/ZYGvRa/A5E6SAvJF6Ac02h1S/wLhD+/cYM
/Vu+rD7GBf6yrvYYLZxcrg2i4Ju8z3pKjsuBL5WPeP/khCF4wtBTSQPJJ3bap7yxu3oPOQvZOZay
sbmywEBM++GKAHGGC4e47aUpMu1zWfl8cIwQnqFtgGpsfSh8b7y7hoSIoHl+P6+nfSO098MB28Gi
i4T1rOCgSbZFMormFtNzwKENb+WaxVdTU/GXot9i1YDKMMx3BUFvtjd9L5gcu4mzj7kPx2QYua+p
yF8Sbis9m5cmt52axcsHMF/MV183uwEFWuKhig0eGK6VO6iFVYdDuMP5QYcEvBovIKJYfrFB6E6d
8zveC+U+/OI8Kjxu4PpiOEqOJtwx83LFhXO9n4aTtsRKwFGIQQhF1mnCNEu6CEa9dz/9gpK2zdmW
QYcgW9FIuMvdhyDDC5qAlRvVSBAO6gO+xdRrw/axjVVPpg+bu4rgAYaxuTKSbQcJOdYQdEcZ2ctO
KYPPEqprjwkcyB4SNKtuKA/w0F5k5rrFcALYPIAVH/yP2J35hn9dzCm2t8XhRotF5r2QYdA1obLi
oLmjJPmT3D3aSkLdQzNW6i3HZeEuT4VxzLnk2AAB7JY5CtEx8fmk5vcbMJ0AHsnl7gwClHFRl3hS
BoObKIgBNgefBACdV2nMH6LAZ3FJb+aIqWbncTgtHLH8DjMd4EdS48apG/2AWqYpLydUOi+UV163
/qnM07144KfQIlMzBardzbawOQnDGHx6l4fnby7TlfBbI2k9SN0ChA4RWlb2GUrXj+l9kB7rxZPn
z168mn3/9dNvvlFV9OtoDYTKGCbCXbDY4Q3HEAuE8kQu6un9YZwV0o9ZLinE7YKBHUx60UpTcz2/
ztUajPDFfQFKdpI9uD8EI4jXr1//IWpAEzs7ldNyQpXP0ilc/EyWd+9/vpTAb+WnD6jjIPaxG++n
0wAj2rHLdjF48voRGNxk3z57/OjV02ffZd9/9+fvnv31uxGCprkisxUMUIVsQ0X57AUZB/FgyCoY
zBG++uqrQSdYBKPpTsTL+nkEeAZ/+MMfKAPCAAGE/XbDyA5tPB4PQqxIk7s0tRu2wBUWgXfFmDRN
s2V5wfftOc+3nVQGZOmyMvPR+2NIMKKIjkesdQnnw4wnplLreDdeuUdabXbfYfsMT/vff/fk9fMn
j189+Tp78vrxk+eAOpwXtfMcwFif3qg4+OFZe29WOBnbGxQefX7vmKEzXxWyTClmqCthHwcnbjv/
vbNWTlnvKEbjYwPu4CT3gjQ3xO0RA9I/ZXw4YyKAZfAY8tkdyyt642L+Bwi74n9a+IkWLuBO1hgA
NRc32Rtf2HuDYSuMiITe9ONxkKzTCIIc3vxHl0jiYi1xFIWbnnF8JLY0VYLGBRr6hXQA3B39nqQF
RvCLNTDa6h2acE45r6hpcQr/3DaW8GLVME8oE+Pc3G5q1OOsJuhAyjwPXATutuIwi9WKX+qUx5II
2zC76sREEgHL7Qeb3NW5QqLhiIXQGTlZvS9CCZayEwH6X6zmZkrS/JNvv336/OXTl6OAyzL7EGQW
U7BcYEJ7HvA0nA1fThg40d4exbErZ9WGbKimYFY9Ek8COMJ91BbxLYnLEsThOFyG/NMOPmO4OiIW
bwq5vinAki92cozQ/crGUmZhzganGlNf8tNbgA5i4jfKA9ryXVk+/DjbL7VfAo1Nnhu0OT3DgNU/
F7fFmp4D4QPe3hJf1PTRjtOZxLO/gQ0BQfFm1sXiar4pm7UaMgQcUFtGTm98rwBsJTJbEKzuX+Db
3CLlqHOf+ElgyHp2w0MYw59cYi5wJAhr7HiQDOlN54M1IIwiijCH0YBRM9hwZ8V6u7uxKqqow5uy
WC09qRmbId6flQYIDIqIPOy9+59f/wNb7lKSPmvgUK2W7356/X8/+eQTq0NUJqQjsWAipSDbENXO
9rVuCgxPDOcWtHxZbAL9oUpZS5WS4aFHqYz1vR6cHBxpsbgmTogvC8d0aS50hxm8doWBn7wYN5U5
7kcc1hkTy9Pz7hw2+E1j2ZI8UAdAU5AeESIOce//qSw+5OQB7Gzq4WUG5IrMUdkF4OlF9hhEGHCk
O6fahjmHsuiGt8ke59dDjm2CMYrq6vpGQoFQFETrc8tvr9n+eb7Y7SHqnDSKoWCxOotMjynyPVw5
cYIx9DbM7slQ7kG1xwZ+pKQhdyiKqUAB7FbVhzEF4HxflRTDdd+IkukDxYDJ3sPEaRQYCy0eT+7P
/jEkghAwDHsSVo+nl2jpmoFpk1k0fLeG3qg41wuMFwkOZbZXsJOe73cVJDNfoMsGxqQ0mHlNlPgZ
mnxTZjc04EGviqbYkf266gwsrm1sskp1ApuBcVDDsMQQlQIWXi9ZPkQHG8cXd49tj7O+YwOzmakx
m7mBqLiZCuYUhIkdOeOlnM2grGlmt9/aWwgOYkr00uAQFXpb3JhyBFUz5j/eiJ6ZPEZttCfVOYXA
4Bhn4JgtMc689QYv5kYNZU2ep/Eq844xUnuzX1y5RtBF1SywDGRem69ouAvxANCzBEGI6jyamkIm
9Bj4BuIUsWRnNh0wNENyrWWnV7B+Jg806Vaceh9TD3b40wziy1Km0FGG8WwRf8HBx5D4EpiaArI0
4/EC7r09StPMPVTooZtqsQTRGBocyTptMvzAMWSFhzAwAiuumx1eCwD/qWH5GNDHkC4JWwfR7sjv
8JxiWPCyyq5a4cFNHkwE4SR6savEskYHPXCcZKeJOR0ZDC1NqfA82eGuuwgWewQtwJ1ZXaKvh0JB
miPk9DalrRJD4hVDuJWKgrJEkZ3wARtazI2A7zgrDWmdpX0GoeRneV1VOxwaQnqU3aNgpvfuQXqs
4ByBZJA4vXFUOzg5ZANjhfCTrYQF7K+eSvFm49k70OSH4tjbjJS2soXGqWnhrBcnHcYD+aimCBdk
e0Dyt2FKQNbw5cQr9mAO8g/d0WQ2ouMbYBFW2VVpKLTZ8TcIJqLAcHToVuoC9xf6tEl1WqZBA3Sf
z8lwuJIXRq8XD1LPwsG/Qy3A1R3cdAt4y+SaGMH6jiyl8W54gIQQFzYuGwfpSZgBEJh2KWjmsa4r
f0VigYYr+YjAjpcANPjektjo9DEWRJbCjRrGynUfj2WPnaUTDafygisH9hCkp2cKfAHiWQh6Vz52
UFMMyZCDaax7m8f7bujLBMHcItY9SJTlJjyiszWhbXRlgPCoGiptxVVVLnDQPECFKSGOhNlauG57
fi5vugoxQLtebHKuPwTh+kGyFS5xev+sA60gIu/6HINGQCRhjuQmdVPNwp1HPvjDgCFnB0Iirdo2
qK9r23UDSOhXDwc2rZU3XSfAedtzyEkZAuxYWGENUAE/oKmA+WNquoJAgv2rUvPN8SmbsF1qKUAN
kudUxZ57uwAnSZYznoolPHCKOXLLVtx4RNKYOQaKAoUTYinm2YU5ulEiFf4YLU3ijDV4zWVB7VLp
8e7ChCgxzIMkc5ivKl3hjuFLVqsTUAkjnPabtxsIKuRCr+Cwm/YDDUpy5pcntlLuL2ZYfqzuBAdf
wvCC2w2PVB8qvCB1PEu6ahTg8WKNv8HFvB4CDYbX+TBNQbtT+3mSbyKNTFC+K9GMr+jhO3keREBr
WS7WaBG9gjCVLgOYam7oXX/fEgvQXgz9go9HAtYyko4XXrzcrXf5qV7Rs+EhlDBD7V5k6uX4BfYz
B338hbVA35R+5rwWKplQtOThIg8tycGUVB7d4RbVQSapqzzqQXe+aSSwufQGlI1PsmyKosJm4xx4
1yeHMmYS5E3fYAOWq/3FGfh+5bVIEkDTO43+yKn/5z3V7iPCm2uQUDXIs+qBIY8JtFNtjv203/+W
AdRxGNKF5Hd2VkMaHBUL2Tqctt1SYVNddFvfOj7mAGpHbGYuetRMmB77rImMp/bS49XJSaIGRHRu
GC6gfmhNzaptzHdfzRsro43AuQ0On0GC275Doa8We1LgAfd2RemiVbi3+PadgO4jkD7m0kYO50ao
fBudNQ8j0ODb+FB6mIRNy9r20X5CbEx68f0DgNqLUhfAd5xMPteaIhfi7EGH6hUG3YPubdOH00P6
rEe0J29xvxuMbYozppZ/Xis4nSnBJpXU9DCRiAlFG7E4xKGpXYUDkg38aLM8ZvOaYsduXJsi3be3
whDuMADZhciSJdmwGLcT/FYbZqeSv4e4m9gLFoPUqvcO7mBVOLGD/d2b2HKDfJB9mrHfCCV+93LM
w8fhQJbqWX3MSj2r//+F+iiLZMDStUa9O6Dg+J5i+bjbnum0B9FW5isIYotwRvV/I5pg8yRZig28
f+SrGcP6GlwjrxHAuiBRKDuOQLmnm/cQatSUy/81KDUU/5IxJfIZcj44wiYc6aMakgKnsCrGLNIh
eHY5IX7p6Uzd4/AI5Ekc7gcxKLFYrlOQJCGJUD5IA+92/3Uj5u0OJjfGn3esYKpYjdS/+aHSIz0v
o7VsXodRQ94NfywT2+E4/H+0XDL+5yHP8Gl0xg7Vhni5P2+reNJZ8S/7VVvFe50Vvy7ft1X8rLvH
qnWOdzsrPq8+FHXLUNvHmqYDtEZ/F0KAA04SAvgyjMq2EgKcZrolgkBc+jZERe3Ygxs2SXZg8IMR
T7idjBzdHs5gMJKZqPb+nnQJmWZcp1/ONNPM/m3RN7VTnCoL4umD5dxREjCX9bUdVXVYraNuhBSo
2MIIWhgOfqny4nanYjiKqZZl/85qELalShADNNjyyiXJQDtv/H5eoyWl3owXEMIP26Lp/5RYP694
PvB47blltMEeIFJMqKWfk0L6z+RHnuBl2cMc0G0cuShRnGn4BGM5wmPAtRY5K80j+M79XTpvpa9m
kqImV1C5uwQdHVwXAoj9GvDmlKud4QTSXL+Mt9X5itfj06kdhOHdR4OUqiOSTObtZDsNPdfZ4G4z
vduMUAnJYxzJCIZHdU4tBA200H0xbYYg8rMYo+zr9A6xn4fpWrdcVqg36FxM13JiURUM74EQ1r5s
SahhHTX01AIKuJYt8FoeANiyBWLLnwsyMAbqBtnyaJj9LKBhpeUBsKX1h/ndZhhrD4nOas0h+lXH
orS/KuQIZsbUwJ1zbgYf6qeFvNLD6eTkwVkvAYaus/GQ9tDw0z5B+tg3qaxmQpipuxBCn6yqPd09
8g4p1X1tJxPfpx5gdg2t+WHz411Ad3j6CalObXjNUZa40CMm6E9s4HQED8RFf5tbgOQBjKWJmtKp
a4bTfT12EEmOEs5/kzv4aC15pnmsvvcmrx0tOKuEy3K/qNaQBlNsiIUfGZEFMka4Ltk1g8JyxwuQ
D+SCJYDVCO4EwCtkNuvT/d0gwYjyvWa4ilIzWsuOqzyYxkymYJdT2OLYR/V2q/3rLnc4Vsw74ek5
1fe/EwVARc+L4sQG1GSDDvDaozjcO874OJ3aSwd00zjq3gFLHmMDwt78CWJB3iq6XJJY3MkaSGhY
XtxkA07ghDIHR3Kn52nmuQtD2BB2trEwGVOCZjq0sJaBptSmSCJRGAKvvjvyAPhhNISwuMQGUK9O
H/xucvLwTM0Mg5hJSjUgY/Mms7P8UlVVVis+1aMccgcNe6RN4CHCYfU6r1JUB+GMU1oMooRJt5/f
Xl1gsbq83ByJ1abkMVj9y4/Ag3cmqVU0SA5/MNKEf26kbK5OIPQRI9dOjPDnOMe1uEqQ6rLxAOCE
clIEo71PrK9XRHVdLbvNtEwXZ375LsOsI4yyTAspm6zEsaINtP7OLIH4zZbNYl4fdb/LRf/tomSE
hzxHXPYjJgjljpkdGtuasl23n/g9goB5OYyKYUAenj+ZBHMKNBsegPsOZovdjiPju2HPBqiwL5N3
uiCPgV/MJOvTJvb2r6+xCKqRGS84Kza7ZbXfjTGWZW7EO4jqABwBingYYkGZPxfi8OirJXDOFAWr
YWjrOEpyWRsrbeg1eU1ijFv4nT8YRgUkkAVGe9e4xoiKFszQ+QCQkaOU8DGllGvEjPpmwFQ2qU9E
fHT6RI8eJBWL3Xvd7XMu5/me4vvIg/RUibsRViXWOXnsKsVlKF4TBhR1LRjgvG4pGMEkE2xowVbA
+Ts//z/DW0KIzc+yJxsD32xbGSYGkp/9/AYRGy2najl6vrOi/NUIRA7qMOl5Lvo+CjBicRsYDnkw
VDhxh7iuPmRFoyb64vfdi5W7PAaDzQboGK+pIZR+ZR6Hk+PR3kNF9l1TVOiX4Jh4M4VodivUjoIz
uIiT7nSVMC8UltxUdKRPrVJkhpz38xBJR+iPDOnXwGcSc6AVyLAApe9H/F0feyTLPvByLZYl5LNC
2gae67tsWS5pH5nmx1n2cn95CVJvtTH0MdEeuLeDEB3lhJWUvirzLZium8P85GRTreeX5WLYT+3j
2iWRJYQGv6zmMsh652MXf4udiI4MCMQYzUiKQZum0O/uHCP07c51gS7svMOjd5sQGpBzmE5o9Ja3
kcsEF05Fvef0frva9Dy2IuZwDPHft4wq1+i5Fm51Uz6x29FRt5d2wbqGhKzanZA4LtiWKG7kfdsL
L00BCLI54VStd2s8L69HzJcyAK7t2v1CVgCgwIevGqREPPMEUwwYZaRKFaqLQpHdPwNNaT/LvvxS
DEDlPB+28AnQDOlwVbykHcbszJhXSPMJoToZ41Vc7zyx2RfoJrI/Bp68f01y6fXu9MHvJ2catvCS
uS1g9H5jvqP7uEidFB+RZIdsQVf2Q44rzq7QLuzFRR47fPzOfr1MfP3cfr3KrxM+dRtwLCc5jHjD
vukjuwdtwZh+x3SPvyG1zYfxy/yCbf6hniGe94MyF9Tcpa0Lkfy+0CVK+B61DfeQ5iVWvu9/UoTh
4aeff/qFwa1VNd9BAz2OIpP3kfT49a5lXq4UIzXPzuBFVW2bAVejEubwGmUL09qDUfYw/YUGr7uC
yISn0KKZ9xnO4Qt/LIOrYrWqBqfwHVHgyut1cLl/S/exVwgF8+3dv77+7zjUS5A37d3/8vof/isb
5wUSBMrj/pyLukDSFMXFfREO4Hm1LTaj7PnT509YYKDGc/M3zum7h3RihiXn9A0VSEPFYo+HNuR6
G2D0ZSO9mR2HuSh5xD2VWZM7GCN9wfqc85SV3wtK98U8xNhGR1HZVCGqg2n6fbmEzI6YEnDgKc03
kPVPtOFYGTqTrBNWnc5djXVGZwUipiYQ7AFRxXYJT9vCDH5joGG6+UzgwgnhrRhniBjHYpCi+dDm
ezf8hmGRSg7laHsprkuIvzP4/tU3J/84cLEPMPIaj2yqhjnGJYT1goPVoBcF8/LYITM+SAo0X80k
D32iEMmcU92ywQo4gkESCd8T1poKIwAuEGXBTgNUgIdZf3VSGmBIlkWglXDsfZl9DgGSIEPtzecQ
bcZAKwsgOaLPDwEy7RoghvFMgMyEOQn9O9m/UPR0yOgN8L6avzdn0S7UDNlYNInwCS39sdBu34LZ
Mq2irW2+m8I8yRyhJ3BL1A77UQwBglzaMb+8dkBwPNwOpYFXK7etVivN3MDnkPd6Qvu92nyD2zOn
UqNM/iIaClZ4XJd5KZqjsBGbBhVh3R4m2naGyXnTXboBW2567DUTaMMsFOgh+Kg6okW2P/2CZgzm
u/k3UKThMpl//deEBQgQN9Nm1+4J3w8gZli8ZYYqAGT1cjWTkR3QyI6BkhZfk8jnkdI9RmjDWBzA
WkFW2eCUoTUZ9HyyPQ3XsGc3ZfABIh5QBERMjht3MGipJXxSNuAR8MbM/Z05cuIAww3DU777X1//
Nyqlw8KIe+/+t9cPfi/JjfHO6jmmUP4Lhb0cQXCvcoFph8v5qvwbsdsYp8cIQmbbUFprPjirphef
vBJdzR27dwxhh0hBXkYHTHRLEbfKerFfzSUCW9PrMWuCSZltXloJODvuD08nD8+yryCn9gOw5fhi
cDbKQA4yq79a0ZANgA3XuxaZGLzLII5TVUHiWYg/v64MyQOJd3tZz5e47AOUGPxeh1ad/Wp+iTkW
65atGYZim+3mlw9hnROxDyk/T2jaRnYVEhn4vtsYl8VOBYmPt4Yd2sv9ORek4O9D5UrNLtIcPluC
4s/hfEH7IT84izL7Mx9B+B1h9MTImQKCMKBFW3OqDWDYwqiRb551TGyyJu0YQKnrkc2OQNHP+iTr
redbQ21rHrEyWxGgGRmybxh5BcleJJOeej33Qamcnd5tzlCXaIM3c++jrD/hzgFWqs+znqe1J29e
NkfY0IScxXtHZO0G3pHEyrptGYJqchgFIaJsB3Z1maAct7oaB1XocUr1GNmgCdwooKePRaqH+F6f
esgHnqB+MLiTbJtTaPss2UNnXKetdQ8AuDTFjodBIKEf4V61+5EevOoM2ar2gGrftu4bKufrbnDa
c9wVFBQGfghhm/SHXYGN4jsD6oFUar0uIDr+pqojcmZoRnuajgrifiPihwkfKgwEDH/8DzK9+WWj
WoXYhLbVewFG2gbHCSwehwjAy7K+CRcmtSSu6Wglx+FCHk1u/chhue1jpCDwqaLABHDvoPUjayYg
D4nK4RxtorQf2Dm0/XCLLaZC60JsKJAFgwhxrgBVDczL6RPn8Up9Q2UoGAIJ7uR4OZ33qTmKnd0f
JvuaLctmx7reqM/mam84/w+bxDfM2D7N/sn8eVGs5je5hQyc3hAheLueBlFDltWMwiO7bCP02wva
0pLjXh0/EGDZa2voUYZZXVxChs0a0Ruak874p+pOaVt8X/++bUVqRQAMCwCXzY86rDsVks2AQCcM
5ojh27qAI8e7W9IHgkYqXFY6fxD/QE9zkgyf5a4dSS+Jf8zKSNQ5GchAuNkBEsW8XPKnYcJ6vGxk
OsUy11MZpqwP/hOckKwMp12WzVcQB/Umc83A+Tp10sIBdxNOncM9R+BRQ77jdkbel+7M/umPvEaG
7Rv4VCyr6aeiNpd0y9GNLbxCPS8Y3WV7ShHz0QdpeEPLeJI6m5lsCPMSTi0+tPxq5v9geHc/Boow
smBhaFF6v0kiNaPzJtCp8wyYKCa9MCzBxGEZyNILRPTpJrYSsQMncSGab4Lvw6h88XZiRjCOlU0V
p7L4scXXsli1oYyD1Hy5tDQ0dykGomNDysgCYiFlDL1pIBq615Jq48MVZGrzWwrMkEx7LpKmdLat
tmE8KcyocPyZpEG5WBXzOqrsnXi2jPJ0UwSlhUImSKKPIsOuTeXwV2OHWV5ryuPNgm508xghBEen
ULc1nqS/Uczy46koKiJzOiK5vyivp33OiNcPkQFqjGeuqq5Ef4aeBMrL06KdUXjPBfVZ+bbclhfr
soEg+WnWSocFMgekhnoAIq1E8Hc3qTmgM+FIIBoThNClntFUxN3z4v2Q7aqL4bNOkiFCpCDUPr/k
hV94pX7Ij7KNFhwlG7W1QVycKIrAZkud/ZLwF4xhAKcNNHbmBDckRG4X7jgutti8t6gAZLEO4AAb
YppVzdgULOuKeuZiiZMM98+BTWfKyN3yaBAdKZoKSKBoGGWIx2h5Z/aA264ylf7zf3715OWr2fNv
v//T0+9e9lNxqggrZ4IBpp1Ut4Zn3G93Bpka07hZYDIpCkeS9BnItm8vzdlPdgT25gqEVWpqRm2N
wOqSVINltfmu2n1jo+sr5HiKtdvxA/OIGbg3e7joYYHAj9CCAYij7vMBodCDB6G3FHOHxXacSl4H
7/QNfIpoJfjM0/8wiQLxcg9JqpvxeZ38GPcEKqByE+T4S5qDW8bC9L6q5ss86TSeWpoje01KUnKK
56bXJV6YRdypMnhlTso79gJqaBEVWECyF6CA1r68TtHNdg9G5h8Ma/Y3Q3Epmjjq+h5MzuJzDSpg
gpuTbb8lKoDrHsdo2sqhh+QAbQk7Qp/YmhceQm2qST+Nkqbk6ecxHt3u/LdzUDyrYlM36TgPSSng
5EEHPx0PDGbe4XMcEyeokIColziI5GWX1zbBGtlp+iV5wv7L8QwT8c5mKdJpR0Blg/ZSQ+WCNFCw
kFditmEBldBkPhqZiYkJ8yVGYMp9kROKHoqEDWVMxRVuM1K5JWgTd0+lA5ZXnS9xh8cdJMFHmX9w
xMaKQi5D2sgOJlfaOnTkth9V5CFB4wwaTXAo59W8XqKZUr1POhYcfXCZuXA/xx4jmhXy4coNweEy
vKU3RLc5VDf7yjgn2iHJez4AfnZgfW+1UeVIs7zOcHJ4jIORr+uztxHQ0rZYZpZ9ZkO63OFQMV43
l8PhQanfEgjchBEWtO5/2NAO5UWuU6kVtFqladUANF0qABeKnn8HKSly2w8EWCNVvRVrDnHoShTy
pFJMSXCbSwx7AeOLxJgwxHvzAZjxok5E0hNtyEYmfoRvG4RUVzRUGIbYufVIS5FWpkbFPYU+Da6D
KMqTSQU/lXkKukKtMBhG3KiZJKa/SbUIsLx1a1Ap2ZhScKUaS+bATfe+gkzw8Br6SrwWSHSqQRDh
JFPEKr5BVEcrXM94x4qsOgyLNhzoyj02kJv5C+RCgNhCOcfin3JCdd64rgXZsqf0cDaMg/5QJ1P6
M6K0ReQeQkbSY7K1Q018zx2JPHQ2IIBDk4bJLJ57n7R3xciwgeyrgo/x6TAjyhSeH4baE+FqTs3f
s96h00q1p4bb1awrdiZ3SRbmzjqRAWPkNUxYZqSxyqWp2JK5F6jd0FXgM0mb4DIKUcN/NSL5Jdy1
/JUQLE+dKtDdHG+oZEc6y73zAm3JFlVdF4vd6mZs7fZaLrowC6pN6u1vkBbrpbBSfKawHhL+xB9d
enJ+6rVdwjGUBPd9fAyvUbhtStfGm45/BruUWjNl6CHlmZjSgqrOYW7u1+FkItZ6q393yT7YZjZ3
Kb0cGcqD0bEewHCU2VcyjQhN+19aTDR8gsBperf+Ci2ytDEWkw+lbOadHAx1YyickRY2YH0UHWnR
eaVU0jzI9HmGm1wXSyilHf1RNwT0ghdymAqsrc8HFDfUIdZvCaxd7hD7oEIOCbhiAwoNDnUYl2CQ
Au+ia5+QlYR6rWJhV+uQ8esa4mF09XCrXtxNHUI1ojEMuYMR4PoRtWErZ0ONMLFMPz3YGs1XYKx5
O5AVzlm9iiwpfE8kck+f6K7HgwtMeaO6tQohYZDB1UXTuvghuWhfGpeeKiWLHG5HS1E8QJ36FjJS
TiIxWMB6QZxpDd4YTbHMg2VI7J0kv8p95algZngOv9xV26c7DgKbru/x20es76+JzYLEa3LHm28w
eaIgtHfDwNRIn04KSpZ46UznEOKPQ3CVG1HIW4J2hDhArXpB0xw9t69TQkFatHEKOpvzejZDToWM
hPppkKeHER5IfKIia2jnqq5ClR+HKbin5DwwDJTxOCCisEkji6mc0rFG3grcqSTzLWSMNJhQXQDf
RSGFISpRXxiJvpw+XppKcmj4xm4FSmzZhwn1wWHcDEiZ5pWbxWq/hKx/mxU4PtxUe8rWOsdbLc4t
a76eY15B7hAzZT1Hvu/zLDfn5OIKjirKO7pFjpKTFelqTFOEZxP7ITkgiO9yIZNcem4PdSSxGgoh
/ZksRN/jjmPssJswkR0wuM6O70gcx2eZxdu4M7jbIoujjWUc7LVu0qPdDVUQh0aL5pR+AT4Rgsmo
8fuAHojh3IDv81ODS7fOzkYfFhQ9QLwFTfv8UhXzgAnOY1zEVLFrdyrNT9RHs4UWBnl23rq2Qfma
8pJGHcZYYgpe6xatcXsCVZw7K+1Rayd22LIOLMc6r8ljLbCqC/x9sIbOMs0wrfZHaMvglQo/btew
WOu02d12TY9k0IhXttBY3zdIkfaB3Nu+UlY59upfSgkwdHbCwLRAA4/KTtqmJ4yKfeGGaRotAHUp
qG5sTSM0E8+rRrWQNqhOXCBGg3OMghai7D0B0/mB+mz2HIl4URtXCzZIBLnHGifR0LWEqJ6HCW2/
1mtS5atFXE5g5ZmWWOMzivGQ9yWh96b4gGvUb7HtwtYO2dDZEAOQ2RlavMtYlaFKOGk+RwDXCDJ0
UaodrDp3ZQ37NgHIlOUKljVgsc9BQnt3SXysEJ3ccn5/9Pro3J39L93EM5GJbeYvLweobx99L9K1
Ob1CMCJDMcaejjzIJeZZfCwh2ZbV01mVnvIiwO9aA9j8GsNxqr9IiX678UmJpHomRBOyyVSDCMUw
V4ooKnuRfDrN3BXsGvZ6pOJMa4RCBOu8KkABcaH0mbFg2HJFyLamZneWzVV/pJPJ9U9OvuqDd5Oa
ZVIMa5n6iZ66kg177/731/+jciWzUU4sr/Xu/3i9/x/IsYzZS7p/MLULCmiyNAQP3NoyuBgDJxL0
d6Y4KfDdttl4PmbbG+1jhqYwMop1tXlb3Gwhl7Dcq6lXflkXl4VL7nflinXIfPQb2lihtpFiJYgb
wWVd7bcSyKmGYxffGDJZnO8vyTacj2D8MHbtDE5OqN+WpC9z5Ban/WZnRL9+uszSDG3Kzvprw3e1
FOPswlOwQUYnFoMG5pFig8DzdmVkzP5o2NYLesdOXe1kMYP+c3MkT/t/efb1k5YyV8VqOzULCGJt
Xa3cumYIsEtYa7SBGmfZAEc16JSVjYgM+IKSS6ItaEXm2d2QlDJ43cgluYoPOr/YkRvoDbvddza2
q5wrvB2Uiq6jkHzc2dCAIT7IchACeSGGGb+WkXa2oWYBaeHBu5IvDzKH8LuqG8zebNqmApJhK65v
qlugO8b/6ndjI/JfXdtiU1GHnaj49ZPnL548fvTqyddZ8W5fvp+vgNSZBZTdOaW90TEvDIcErqY4
Sf710ed42+H78W3NMF4CXljFB/5CquxiPqGrLJmA2pugBHdGVA5v8eMrD5Iw4U+ah2JHmlB0sQPo
D3067NxtPKcd7odbM02gCXWuSaP1248KWURBQ4rEZ1xTLfxxd0w3e87IhFRWQic7VBK87+C2Prtj
qNe7+dHmLNK5JeCdmen/WO2usv+JrvhA5/CY1D7Zw/Hvx/cptsOjl68yQzAbirOxnr/FwF9BOw4l
aHZAqfGWbz1fiWpjHPIpOVxVGiDtgEh4UZT+xRwVaLiS1mmH0TAmn2M0mPzhKPv9KLufMq9Kg0WW
5d/b1VK3rWCNyZYlTb72ErCsEWstv6BYMMaPGbgRbIAa8BX9eL9ZquzX67EIcdubMYfKl78QaMkP
I5UkGOpMGvvFhx7SsBectRDvRkUuzR9cwy9kWUEuyYcRWuKts5nSzADkShkzQHs0bTAYms3Pq/1u
xkb1M4s1CrwCQPqGx5MZj0+Xco+otFUT/z/401qGOWJLOFj7iRAySwTEksOgwSuSTBXBMaidJjnc
d9uwZCWwWOhHTS83PkwnLaBm/yKCc0AMjfS44ITmFBVUAmXtrkyf0ne1Wd2gdhomD9EGPS4A1MXc
EvM1d7Km4lybGG5LklbD0bCeg92WDbRTyWBOrsHKmKuzSlldxWHzXr9DDUUe/vi20PTRGrbcjJuy
EPFBVu83+Bet7XPQGDnl/0t4h5MUph9HgWL8gjJEE6phhVcQE6T44ALswcLyrrKwxVBGEDcRJIox
agSkpZLuJ0oOaATxJ7DkYm/YgzWHRwMq+76cW3rcPbAxjMpApwEmFqcCNwvYHm8PKFs21cae9Cj/
eGp9Mfk5LzcYnr/ajjA5HKd080098HL/MMQQNmQ3XqhQ7y5IAg7bzJ4uO1BMlnDwNhiVbWgHKEzu
aI13Ul0YNKvMily6Md7LnpmDxKC/+fe8ago/gRYMZ1lXaNWIFyjZyfv3GIgTYk+NVStP1ucFKtsk
PhTWLZrFHOqapQN+jFaZoyDEARHFqswgjG766QWDxKIOegLBCLJCur17sriaG0q2A7MH6Jpz72lB
hMSOHWw07MTQNBrLDmOzKGDbWPuLOcT6lGKmn/Ob9klAsZ7S6e12N5mEJB1rrPA25cwq1GC3jUtE
1FZ08ZWHRA6mWFFOhGo7FZScKrycBtkUUWFcfMAZiLZ7lrqpRksFKpe8i8yb/RoNYLZD0lpAa1ID
3Rmyr7J/vH/vH1slKMBZNYUxCQ9jwcgvs4ctliC6F7gQUJzEbFdBJMl2V+XB1ymVipE5NhB4bDlC
WtM36N4HvGmuqg+D4VmvbQTQO6C9TfnY/2EDioMfzB8CC8ZdVZA566XtHhJzMG39a39IcUCketIe
QgMxzeO38DzxMMy/bi53YSp37yYsRAITiJiKTzWtTB81u2Jeo8esOm1SDVHQXN0CH2GkzAuO+F/7
4CRx3htFgj+2Z+W387+VhqpaMcYJB0B6IGzCshgLKbhcVefzlVC4kWZse9aHrVUZp0p7TGk3m3uo
TSK20h5N+DADO4mvKHU+Fu+q048TG99ORvfFHbNBrnBbLMoLNIRAM3QnJDcZ8VuAR8KqaQ6v3+u2
NeF4i+XlpjL8cb/Tet8fRF8UtNBfgyEDiXGBLC5sg9DveYytH7f9r49efPf0uz9RLgCv8U/bB903
o13MgYAlNHUMA9adL7sm3zenHRy9GORtdQPzYOnYYlxRdzaQQ3dgc7GHmYop7cmzPwx/INXFnezJ
9RaoPPJipE0ZoIvmCgdMZFm4sQN8Z/vnKVGU+Evv3f+5H/9/Ve8cJw==
"""
import sys
import base64
import zlib
class DictImporter(object):
def __init__(self, sources):
self.sources = sources
def find_module(self, fullname, path=None):
if fullname == "argparse" and sys.version_info >= (2,7):
# we were generated with <python2.7 (which pulls in argparse)
# but we are running now on a stdlib which has it, so use that.
return None
if fullname in self.sources:
return self
if fullname + '.__init__' in self.sources:
return self
return None
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except KeyError:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__) # noqa
return sys.modules[fullname]
def get_source(self, name):
res = self.sources.get(name)
if res is None:
res = self.sources.get(name + '.__init__')
return res
if __name__ == "__main__":
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
importer = DictImporter(sources)
sys.meta_path.insert(0, importer)
entry = "import pytest; raise SystemExit(pytest.cmdline.main())"
do_exec(entry, locals()) # noqa
| 231,272 | 74.976675 | 77 | py |
introd | introd-main/cfvqa/engine.py | import os
import math
import time
import torch
import datetime
import threading
import numpy as np
from bootstrap.lib import utils
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class Engine(object):
"""Contains training and evaluation procedures
"""
def __init__(self):
self.hooks = {}
self.epoch = 0
self.dataset = None
self.model = None
self.optimizer = None
self.view = None
self.best_out = {}
# generate_view will be executed at the end of each
# training and evaluation epoch
self.register_hook('train_on_flush', self.generate_view)
self.register_hook('eval_on_flush', self.generate_view)
def generate_view(self):
""" Generate a view.html via an asynchronous call to `self.view.generate()`
"""
if self.view is not None:
threading.Thread(target=self.view.generate).start()
# path_opts = os.path.join(Options()['exp']['dir'], 'options.yaml')
# os.system('python -m bootstrap.views.view --path_opts {}'.format(path_opts))
def load_state_dict(self, state):
"""
"""
self.epoch = state['epoch']
self.best_out = state['best_out']
def state_dict(self):
"""
"""
state = {}
state['epoch'] = self.epoch
state['best_out'] = self.best_out
return state
def hook(self, name):
""" Run all the callback functions that have been registered
for a hook.
Args:
name: the name of the hook
"""
if name in self.hooks:
for func in self.hooks[name]:
func()
def register_hook(self, name, func):
""" Register a callback function to be triggered when the hook
is called.
Args:
name: the name of the hook
func: the callback function (no argument)
Example usage:
.. code-block:: python
def func():
print('hooked!')
engine.register_hook('train_on_start_batch', func)
"""
if name not in self.hooks:
self.hooks[name] = []
self.hooks[name].append(func)
def resume(self, map_location=None):
""" Resume a checkpoint using the `bootstrap.lib.options.Options`
"""
Logger()('Loading {} checkpoint'.format(Options()['exp']['resume']))
self.load(Options()['exp']['dir'],
Options()['exp']['resume'],
self.model, self.optimizer,
map_location=map_location)
# self.epoch += 1
if self.epoch > 0:
self.epoch += 1
def eval(self):
""" Launch evaluation procedures
"""
Logger()('Launching evaluation procedures')
if Options()['dataset']['eval_split']:
# self.epoch-1 to be equal to the same resumed epoch
# or to be equal to -1 when not resumed
self.eval_epoch(self.model, self.dataset['eval'], self.epoch-1, logs_json=True)
Logger()('Ending evaluation procedures')
def train(self):
""" Launch training procedures
List of the hooks:
- train_on_start: before the full training procedure
"""
Logger()('Launching training procedures')
self.hook('train_on_start')
while self.epoch < Options()['engine']['nb_epochs']:
self.train_epoch(self.model, self.dataset['train'], self.optimizer, self.epoch)
if Options()['dataset']['eval_split']:
out = self.eval_epoch(self.model, self.dataset['eval'], self.epoch)
if 'saving_criteria' in Options()['engine'] and Options()['engine']['saving_criteria'] is not None:
for saving_criteria in Options()['engine']['saving_criteria']:
if self.is_best(out, saving_criteria):
name = saving_criteria.split(':')[0]
Logger()('Saving best checkpoint for strategy {}'.format(name))
self.save(Options()['exp']['dir'], 'best_{}'.format(name), self.model, self.optimizer)
Logger()('Saving last checkpoint')
self.save(Options()['exp']['dir'], 'last', self.model, self.optimizer)
self.epoch += 1
Logger()('Ending training procedures')
def train_epoch(self, model, dataset, optimizer, epoch, mode='train'):
""" Launch training procedures for one epoch
List of the hooks:
- train_on_start_epoch: before the training procedure for an epoch
- train_on_start_batch: before the training precedure for a batch
- train_on_forward: after the forward of the model
- train_on_bachward: after the backward of the loss
- train_on_update: after the optimization step
- train_on_print: after the print to the terminal
- train_on_end_batch: end of the training procedure for a batch
- train_on_end_epoch: before saving the logs in logs.json
- train_on_flush: end of the training procedure for an epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Training model on {}set for epoch {}'.format(dataset.split, epoch))
model.train()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook(f'{mode}_on_start_epoch')
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook(f'{mode}_on_start_batch')
optimizer.zero_grad()
out = model(batch)
self.hook(f'{mode}_on_forward')
if not torch.isnan(out['loss']):
out['loss'].backward()
else:
Logger()('NaN detected')
#torch.cuda.synchronize()
self.hook(f'{mode}_on_backward')
optimizer.step()
#torch.cuda.synchronize()
self.hook(f'{mode}_on_update')
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value(f'{mode}_batch.epoch', epoch, should_print=False)
Logger().log_value(f'{mode}_batch.batch', i, should_print=False)
Logger().log_value(f'{mode}_batch.timer.process', timer['process'], should_print=False)
Logger().log_value(f'{mode}_batch.timer.load', timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.dim() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if type(value) == list:
continue
if type(value) == dict:
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value(f'{mode}_batch.'+key, value, should_print=False)
if i % Options()['engine']['print_freq'] == 0:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(' '*len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' '*len(mode), timer['process'], timer['load']))
Logger()("{} loss: {:.5f}".format(' '*len(mode), out['loss'].data.item()))
self.hook(f'{mode}_on_print')
timer['elapsed'] = time.time()
self.hook(f'{mode}_on_end_batch')
if Options()['engine']['debug']:
if i > 2:
break
Logger().log_value(f'{mode}_epoch.epoch', epoch, should_print=True)
for key, value in out_epoch.items():
# Logger().log_value(f'{mode}_epoch.'+key, sum(value)/len(value), should_print=True)
Logger().log_value(f'{mode}_epoch.'+key, np.asarray(value).mean(), should_print=True)
self.hook(f'{mode}_on_end_epoch')
Logger().flush()
self.hook(f'{mode}_on_flush')
def eval_epoch(self, model, dataset, epoch, mode='eval', logs_json=True):
""" Launch evaluation procedures for one epoch
List of the hooks (``mode='eval'`` by default):
- mode_on_start_epoch: before the evaluation procedure for an epoch
- mode_on_start_batch: before the evaluation precedure for a batch
- mode_on_forward: after the forward of the model
- mode_on_print: after the print to the terminal
- mode_on_end_batch: end of the evaluation procedure for a batch
- mode_on_end_epoch: before saving the logs in logs.json
- mode_on_flush: end of the evaluation procedure for an epoch
Returns:
out(dict): mean of all the scalar outputs of the model, indexed by output name, for this epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Evaluating model on {}set for epoch {}'.format(dataset.split, epoch))
model.eval()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook('{}_on_start_epoch'.format(mode))
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook('{}_on_start_batch'.format(mode))
with torch.no_grad():
out = model(batch)
#torch.cuda.synchronize()
self.hook('{}_on_forward'.format(mode))
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value('{}_batch.batch'.format(mode), i, should_print=False)
Logger().log_value('{}_batch.epoch'.format(mode), epoch, should_print=False)
Logger().log_value('{}_batch.timer.process'.format(mode), timer['process'], should_print=False)
Logger().log_value('{}_batch.timer.load'.format(mode), timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.dim() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if type(value) == list:
continue
if type(value) == dict:
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value('{}_batch.{}'.format(mode, key), value, should_print=False)
if i % Options()['engine']['print_freq'] == 0:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(' '*len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' '*len(mode), timer['process'], timer['load']))
self.hook('{}_on_print'.format(mode))
timer['elapsed'] = time.time()
self.hook('{}_on_end_batch'.format(mode))
if Options()['engine']['debug']:
if i > 10:
break
out = {}
for key, value in out_epoch.items():
try:
# out[key] = sum(value)/len(value)
out[key] = np.asarray(value).mean()
except:
import ipdb; ipdb.set_trace()
Logger().log_value('{}_epoch.epoch'.format(mode), epoch, should_print=True)
for key, value in out.items():
Logger().log_value('{}_epoch.{}'.format(mode, key), value, should_print=True)
self.hook('{}_on_end_epoch'.format(mode))
if logs_json:
Logger().flush()
self.hook('{}_on_flush'.format(mode))
return out
def is_best(self, out, saving_criteria):
""" Verify if the last model is the best for a specific saving criteria
Args:
out(dict): mean of all the scalar outputs of model indexed by output name
saving_criteria(str):
Returns:
is_best(bool)
Example usage:
.. code-block:: python
out = {
'loss': 0.2,
'acctop1': 87.02
}
engine.is_best(out, 'loss:min')
"""
if ':min' in saving_criteria:
name = saving_criteria.replace(':min', '')
order = '<'
elif ':max' in saving_criteria:
name = saving_criteria.replace(':max', '')
order = '>'
else:
error_msg = """'--engine.saving_criteria' named '{}' does not specify order,
you need to chose between '{}' or '{}' to specify if the criteria needs to be minimize or maximize""".format(
saving_criteria, saving_criteria+':min', saving_criteria+':max')
raise ValueError(error_msg)
if name not in out:
raise KeyError("'--engine.saving_criteria' named '{}' not in outputs '{}'".format(name, list(out.keys())))
if name not in self.best_out:
self.best_out[name] = out[name]
else:
if eval('{} {} {}'.format(out[name], order, self.best_out[name])):
self.best_out[name] = out[name]
return True
return False
def load(self, dir_logs, name, model, optimizer, map_location=None):
""" Load a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Loading model...')
model_state = torch.load(path_template.format(name, 'model'), map_location=map_location)
model.load_state_dict(model_state)
if Options()['dataset']['train_split'] is not None:
if os.path.isfile(path_template.format(name, 'optimizer')):
Logger()('Loading optimizer...')
optimizer_state = torch.load(path_template.format(name, 'optimizer'), map_location=map_location)
optimizer.load_state_dict(optimizer_state)
else:
Logger()('No optimizer checkpoint', log_level=Logger.WARNING)
if os.path.isfile(path_template.format(name, 'engine')):
Logger()('Loading engine...')
engine_state = torch.load(path_template.format(name, 'engine'), map_location=map_location)
self.load_state_dict(engine_state)
else:
Logger()('No engine checkpoint', log_level=Logger.WARNING)
def save(self, dir_logs, name, model, optimizer):
""" Save a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Saving model...')
model_state = model.state_dict()
torch.save(model_state, path_template.format(name, 'model'))
Logger()('Saving optimizer...')
optimizer_state = optimizer.state_dict()
torch.save(optimizer_state, path_template.format(name, 'optimizer'))
Logger()('Saving engine...')
engine_state = self.state_dict()
torch.save(engine_state, path_template.format(name, 'engine'))
| 17,179 | 38.313501 | 121 | py |
introd | introd-main/cfvqa/run.py | import os
import click
import traceback
import torch
import torch.backends.cudnn as cudnn
from bootstrap.lib import utils
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from cfvqa import engines
from bootstrap import datasets
from bootstrap import models
from bootstrap import optimizers
from bootstrap import views
def init_experiment_directory(exp_dir, resume=None):
# create the experiment directory
if not os.path.isdir(exp_dir):
os.system('mkdir -p ' + exp_dir)
else:
if resume is None:
if click.confirm('Exp directory already exists in {}. Erase?'
.format(exp_dir, default=False)):
os.system('rm -r ' + exp_dir)
os.system('mkdir -p ' + exp_dir)
else:
os._exit(1)
def init_logs_options_files(exp_dir, resume=None):
# get the logs name which is used for the txt, json and yaml files
# default is `logs.txt`, `logs.json` and `options.yaml`
if 'logs_name' in Options()['misc'] and Options()['misc']['logs_name'] is not None:
logs_name = 'logs_{}'.format(Options()['misc']['logs_name'])
path_yaml = os.path.join(exp_dir, 'options_{}.yaml'.format(logs_name))
elif resume and Options()['dataset']['train_split'] is None:
eval_split = Options()['dataset']['eval_split']
path_yaml = os.path.join(exp_dir, 'options_eval_{}.yaml'.format(eval_split))
logs_name = 'logs_eval_{}'.format(eval_split)
else:
path_yaml = os.path.join(exp_dir, 'options.yaml')
logs_name = 'logs'
# create the options.yaml file
if not os.path.isfile(path_yaml):
Options().save(path_yaml)
# create the logs.txt and logs.json files
Logger(exp_dir, name=logs_name)
def run(path_opts=None):
# first call to Options() load the options yaml file from --path_opts command line argument if path_opts=None
Options(path_opts)
# initialiaze seeds to be able to reproduce experiment on reload
utils.set_random_seed(Options()['misc']['seed'])
init_experiment_directory(Options()['exp']['dir'], Options()['exp']['resume'])
init_logs_options_files(Options()['exp']['dir'], Options()['exp']['resume'])
Logger().log_dict('options', Options(), should_print=True) # display options
Logger()(os.uname()) # display server name
if torch.cuda.is_available():
cudnn.benchmark = True
Logger()('Available GPUs: {}'.format(utils.available_gpu_ids()))
# engine can train, eval, optimize the model
# engine can save and load the model and optimizer
engine = engines.factory()
# dataset is a dictionary that contains all the needed datasets indexed by modes
# (example: dataset.keys() -> ['train','eval'])
engine.dataset = datasets.factory(engine)
# model includes a network, a criterion and a metric
# model can register engine hooks (begin epoch, end batch, end batch, etc.)
# (example: "calculate mAP at the end of the evaluation epoch")
# note: model can access to datasets using engine.dataset
engine.model = models.factory(engine)
# optimizer can register engine hooks
engine.optimizer = optimizers.factory(engine.model, engine)
# view will save a view.html in the experiment directory
# with some nice plots and curves to monitor training
engine.view = views.factory(engine)
# load the model and optimizer from a checkpoint
if Options()['exp']['resume']:
engine.resume()
# if no training split, evaluate the model on the evaluation split
# (example: $ python main.py --dataset.train_split --dataset.eval_split test)
if not Options()['dataset']['train_split']:
engine.eval()
# optimize the model on the training split for several epochs
# (example: $ python main.py --dataset.train_split train)
# if evaluation split, evaluate the model after each epochs
# (example: $ python main.py --dataset.train_split train --dataset.eval_split val)
if Options()['dataset']['train_split']:
engine.train()
# with torch.autograd.profiler.profile(use_cuda=Options()['misc.cuda']) as prof:
# engine.train()
# path_tracing = 'tracing_1.0_cuda,{}_all.html'.format(Options()['misc.cuda'])
# prof.export_chrome_trace(path_tracing)
def main(path_opts=None, run=None):
try:
run(path_opts=path_opts)
# to avoid traceback for -h flag in arguments line
except SystemExit:
pass
except:
# to be able to write the error trace to exp_dir/logs.txt
try:
Logger()(traceback.format_exc(), Logger.ERROR)
except:
pass
if __name__ == '__main__':
main(run=run)
| 4,750 | 36.117188 | 113 | py |
introd | introd-main/cfvqa/cfvqa/run.py | import os
import click
import traceback
import torch
import torch.backends.cudnn as cudnn
from bootstrap.lib import utils
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from cfvqa import engines
from bootstrap import datasets
from bootstrap import models
from bootstrap import optimizers
from bootstrap import views
def init_experiment_directory(exp_dir, resume=None):
# create the experiment directory
if not os.path.isdir(exp_dir):
os.system('mkdir -p ' + exp_dir)
else:
if resume is None:
if click.confirm('Exp directory already exists in {}. Erase?'
.format(exp_dir, default=False)):
os.system('rm -r ' + exp_dir)
os.system('mkdir -p ' + exp_dir)
else:
os._exit(1)
def init_logs_options_files(exp_dir, resume=None):
# get the logs name which is used for the txt, json and yaml files
# default is `logs.txt`, `logs.json` and `options.yaml`
if 'logs_name' in Options()['misc'] and Options()['misc']['logs_name'] is not None:
logs_name = 'logs_{}'.format(Options()['misc']['logs_name'])
path_yaml = os.path.join(exp_dir, 'options_{}.yaml'.format(logs_name))
elif resume and Options()['dataset']['train_split'] is None:
eval_split = Options()['dataset']['eval_split']
path_yaml = os.path.join(exp_dir, 'options_eval_{}.yaml'.format(eval_split))
logs_name = 'logs_eval_{}'.format(eval_split)
else:
path_yaml = os.path.join(exp_dir, 'options.yaml')
logs_name = 'logs'
# create the options.yaml file
if not os.path.isfile(path_yaml):
Options().save(path_yaml)
# create the logs.txt and logs.json files
Logger(exp_dir, name=logs_name)
def run(path_opts=None):
# first call to Options() load the options yaml file from --path_opts command line argument if path_opts=None
Options(path_opts)
# initialiaze seeds to be able to reproduce experiment on reload
utils.set_random_seed(Options()['misc']['seed'])
init_experiment_directory(Options()['exp']['dir'], Options()['exp']['resume'])
init_logs_options_files(Options()['exp']['dir'], Options()['exp']['resume'])
Logger().log_dict('options', Options(), should_print=True) # display options
Logger()(os.uname()) # display server name
if torch.cuda.is_available():
cudnn.benchmark = True
Logger()('Available GPUs: {}'.format(utils.available_gpu_ids()))
# engine can train, eval, optimize the model
# engine can save and load the model and optimizer
engine = engines.factory()
# dataset is a dictionary that contains all the needed datasets indexed by modes
# (example: dataset.keys() -> ['train','eval'])
engine.dataset = datasets.factory(engine)
# model includes a network, a criterion and a metric
# model can register engine hooks (begin epoch, end batch, end batch, etc.)
# (example: "calculate mAP at the end of the evaluation epoch")
# note: model can access to datasets using engine.dataset
engine.model = models.factory(engine)
# optimizer can register engine hooks
engine.optimizer = optimizers.factory(engine.model, engine)
# view will save a view.html in the experiment directory
# with some nice plots and curves to monitor training
engine.view = views.factory(engine)
# load the model and optimizer from a checkpoint
if Options()['exp']['resume']:
engine.resume()
# if no training split, evaluate the model on the evaluation split
# (example: $ python main.py --dataset.train_split --dataset.eval_split test)
if not Options()['dataset']['train_split']:
engine.eval()
# optimize the model on the training split for several epochs
# (example: $ python main.py --dataset.train_split train)
# if evaluation split, evaluate the model after each epochs
# (example: $ python main.py --dataset.train_split train --dataset.eval_split val)
if Options()['dataset']['train_split']:
engine.train()
# with torch.autograd.profiler.profile(use_cuda=Options()['misc.cuda']) as prof:
# engine.train()
# path_tracing = 'tracing_1.0_cuda,{}_all.html'.format(Options()['misc.cuda'])
# prof.export_chrome_trace(path_tracing)
def main(path_opts=None, run=None):
try:
run(path_opts=path_opts)
# to avoid traceback for -h flag in arguments line
except SystemExit:
pass
except:
# to be able to write the error trace to exp_dir/logs.txt
try:
Logger()(traceback.format_exc(), Logger.ERROR)
except:
pass
if __name__ == '__main__':
main(run=run)
| 4,750 | 36.117188 | 113 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/rubi.py | import torch
import torch.nn as nn
from block.models.networks.mlp import MLP
from .utils import grad_mul_const # mask_softmax, grad_reverse, grad_reverse_mask,
class RUBiNet(nn.Module):
"""
Wraps another model
The original model must return a dictionnary containing the 'logits' key (predictions before softmax)
Returns:
- logits: the original predictions of the model
- logits_q: the predictions from the question-only branch
- logits_rubi: the updated predictions from the model by the mask.
=> Use `logits_rubi` and `logits_q` for the loss
"""
def __init__(self, model, output_size, classif, end_classif=True):
super().__init__()
self.net = model
self.c_1 = MLP(**classif)
self.end_classif = end_classif
if self.end_classif:
self.c_2 = nn.Linear(output_size, output_size)
def forward(self, batch):
out = {}
# model prediction
net_out = self.net(batch)
logits = net_out['logits']
q_embedding = net_out['q_emb'] # N * q_emb
q_embedding = grad_mul_const(q_embedding, 0.0) # don't backpropagate through question encoder
q_pred = self.c_1(q_embedding)
fusion_pred = logits * torch.sigmoid(q_pred)
if self.end_classif:
q_out = self.c_2(q_pred)
else:
q_out = q_pred
out['logits'] = net_out['logits']
out['logits_all'] = fusion_pred
out['logits_q'] = q_out
return out
def process_answers(self, out, key=''):
out = self.net.process_answers(out)
out = self.net.process_answers(out, key='_all')
out = self.net.process_answers(out, key='_q')
return out
| 1,733 | 33 | 105 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/smrl_net.py | from copy import deepcopy
import itertools
import os
import numpy as np
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
import block
from block.models.networks.vqa_net import factory_text_enc
from block.models.networks.mlp import MLP
from .utils import mask_softmax
class SMRLNet(nn.Module):
def __init__(self,
txt_enc={},
self_q_att=False,
agg={},
classif={},
wid_to_word={},
word_to_wid={},
aid_to_ans=[],
ans_to_aid={},
fusion={},
residual=False,
q_single=False,
):
super().__init__()
self.self_q_att = self_q_att
self.agg = agg
assert self.agg['type'] in ['max', 'mean']
self.classif = classif
self.wid_to_word = wid_to_word
self.word_to_wid = word_to_wid
self.aid_to_ans = aid_to_ans
self.ans_to_aid = ans_to_aid
self.fusion = fusion
self.residual = residual
# Modules
self.txt_enc = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0 = nn.Linear(2400, 512)
self.q_att_linear1 = nn.Linear(512, 2)
if q_single:
self.txt_enc_single = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0_single = nn.Linear(2400, 512)
self.q_att_linear1_single = nn.Linear(512, 2)
else:
self.txt_enc_single = None
self.fusion_module = block.factory_fusion(self.fusion)
if self.classif['mlp']['dimensions'][-1] != len(self.aid_to_ans):
Logger()(f"Warning, the classif_mm output dimension ({self.classif['mlp']['dimensions'][-1]})"
f"doesn't match the number of answers ({len(self.aid_to_ans)}). Modifying the output dimension.")
self.classif['mlp']['dimensions'][-1] = len(self.aid_to_ans)
self.classif_module = MLP(**self.classif['mlp'])
Logger().log_value('nparams',
sum(p.numel() for p in self.parameters() if p.requires_grad),
should_print=True)
Logger().log_value('nparams_txt_enc',
self.get_nparams_txt_enc(),
should_print=True)
def get_text_enc(self, vocab_words, options):
"""
returns the text encoding network.
"""
return factory_text_enc(self.wid_to_word, options)
def get_nparams_txt_enc(self):
params = [p.numel() for p in self.txt_enc.parameters() if p.requires_grad]
if self.self_q_att:
params += [p.numel() for p in self.q_att_linear0.parameters() if p.requires_grad]
params += [p.numel() for p in self.q_att_linear1.parameters() if p.requires_grad]
return sum(params)
def process_fusion(self, q, mm):
bsize = mm.shape[0]
n_regions = mm.shape[1]
mm = mm.contiguous().view(bsize*n_regions, -1)
mm = self.fusion_module([q, mm])
mm = mm.view(bsize, n_regions, -1)
return mm
def forward(self, batch):
v = batch['visual']
q = batch['question']
l = batch['lengths'].data
c = batch['norm_coord']
nb_regions = batch.get('nb_regions')
bsize = v.shape[0]
n_regions = v.shape[1]
out = {}
q = self.process_question(q, l,)
out['q_emb'] = q
q_expand = q[:,None,:].expand(bsize, n_regions, q.shape[1])
q_expand = q_expand.contiguous().view(bsize*n_regions, -1)
# single txt encoder
if self.txt_enc_single is not None:
out['q_emb'] = self.process_question(q, l, self.txt_enc_single, self.q_att_linear0_single, self.q_att_linear1_single)
mm = self.process_fusion(q_expand, v,)
if self.residual:
mm = v + mm
if self.agg['type'] == 'max':
mm, mm_argmax = torch.max(mm, 1)
elif self.agg['type'] == 'mean':
mm = mm.mean(1)
out['v_emb'] = v.mean(1)
out['mm'] = mm
out['mm_argmax'] = mm_argmax
logits = self.classif_module(mm)
out['logits'] = logits
return out
def process_question(self, q, l, txt_enc=None, q_att_linear0=None, q_att_linear1=None):
if txt_enc is None:
txt_enc = self.txt_enc
if q_att_linear0 is None:
q_att_linear0 = self.q_att_linear0
if q_att_linear1 is None:
q_att_linear1 = self.q_att_linear1
q_emb = txt_enc.embedding(q)
q, _ = txt_enc.rnn(q_emb)
if self.self_q_att:
q_att = q_att_linear0(q)
q_att = F.relu(q_att)
q_att = q_att_linear1(q_att)
q_att = mask_softmax(q_att, l)
#self.q_att_coeffs = q_att
if q_att.size(2) > 1:
q_atts = torch.unbind(q_att, dim=2)
q_outs = []
for q_att in q_atts:
q_att = q_att.unsqueeze(2)
q_att = q_att.expand_as(q)
q_out = q_att*q
q_out = q_out.sum(1)
q_outs.append(q_out)
q = torch.cat(q_outs, dim=1)
else:
q_att = q_att.expand_as(q)
q = q_att * q
q = q.sum(1)
else:
# l contains the number of words for each question
# in case of multi-gpus it must be a Tensor
# thus we convert it into a list during the forward pass
l = list(l.data[:,0])
q = txt_enc._select_last(q, l)
return q
def process_answers(self, out, key=''):
batch_size = out[f'logits{key}'].shape[0]
_, pred = out[f'logits{key}'].data.max(1)
pred.squeeze_()
if batch_size != 1:
out[f'answers{key}'] = [self.aid_to_ans[pred[i].item()] for i in range(batch_size)]
out[f'answer_ids{key}'] = [pred[i].item() for i in range(batch_size)]
else:
out[f'answers{key}'] = [self.aid_to_ans[pred.item()]]
out[f'answer_ids{key}'] = [pred.item()]
return out
| 6,297 | 32.679144 | 129 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/cfvqaintrod.py | import torch
import torch.nn as nn
from block.models.networks.mlp import MLP
from .utils import grad_mul_const # mask_softmax, grad_reverse, grad_reverse_mask,
eps = 1e-12
class CFVQAIntroD(nn.Module):
"""
Wraps another model
The original model must return a dictionnary containing the 'logits' key (predictions before softmax)
Returns:
- logits_vq: the original predictions of the model, i.e., NIE
- logits_q: the predictions from the question-only branch
- logits_v: the predictions from the vision-only branch
- logits_all: the predictions from the ensemble model
- logits_cfvqa: the predictions based on CF-VQA, i.e., TIE
=> Use `logits_all`, `logits_q` and `logits_v` for the loss
"""
def __init__(self, model, model_teacher, output_size, classif_q, classif_v, fusion_mode, end_classif=True, is_va=True):
super().__init__()
self.net_student = model
self.net = model_teacher
self.end_classif = end_classif
assert fusion_mode in ['rubi', 'hm', 'sum'], "Fusion mode should be rubi/hm/sum."
self.fusion_mode = fusion_mode
self.is_va = is_va and (not fusion_mode=='rubi') # RUBi does not consider V->A
# Q->A branch
self.q_1 = MLP(**classif_q)
if self.end_classif: # default: True (following RUBi)
self.q_2 = nn.Linear(output_size, output_size)
# V->A branch
if self.is_va: # default: True (containing V->A)
self.v_1 = MLP(**classif_v)
if self.end_classif: # default: True (following RUBi)
self.v_2 = nn.Linear(output_size, output_size)
self.constant = nn.Parameter(torch.tensor(0.0))
self.constant.requires_grad = True
self.net.eval()
self.q_1.eval()
if self.end_classif:
self.q_2.eval()
if self.is_va:
self.v_1.eval()
if self.end_classif:
self.v_2.eval()
def forward(self, batch):
out = {}
# model prediction
net_out = self.net(batch)
logits = net_out['logits']
# Q->A branch
q_embedding = net_out['q_emb'] # N * q_emb
q_embedding = grad_mul_const(q_embedding, 0.0) # don't backpropagate
q_pred = self.q_1(q_embedding)
# V->A branch
if self.is_va:
v_embedding = net_out['v_emb'] # N * v_emb
v_embedding = grad_mul_const(v_embedding, 0.0) # don't backpropagate
v_pred = self.v_1(v_embedding)
else:
v_pred = None
# both q, k and v are the facts
z_qkv = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=True, v_fact=True) # te
# q is the fact while k and v are the counterfactuals
z_q = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=False, v_fact=False) # nie
logits_cfvqa = z_qkv - z_q
if self.end_classif:
q_out = self.q_2(q_pred)
if self.is_va:
v_out = self.v_2(v_pred)
else:
q_out = q_pred
if self.is_va:
v_out = v_pred
out['logits_all'] = z_qkv # for optimization
out['logits_vq'] = logits # predictions of the original VQ branch, i.e., NIE
out['logits_cfvqa'] = logits_cfvqa # predictions of CFVQA, i.e., TIE
out['logits_q'] = q_out # for optimization
if self.is_va:
out['logits_v'] = v_out # for optimization
# student model
logits_stu = self.net_student(batch)
out['logits_stu'] = logits_stu['logits']
return out
def process_answers(self, out, key=''):
out = self.net.process_answers(out, key='_all')
out = self.net.process_answers(out, key='_vq')
out = self.net.process_answers(out, key='_cfvqa')
out = self.net.process_answers(out, key='_q')
if self.is_va:
out = self.net.process_answers(out, key='_v')
# student model
out = self.net.process_answers(out, key='_stu')
return out
def fusion(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
z_k, z_q, z_v = self.transform(z_k, z_q, z_v, q_fact, k_fact, v_fact)
if self.fusion_mode == 'rubi':
z = z_k * torch.sigmoid(z_q)
elif self.fusion_mode == 'hm':
if self.is_va:
z = z_k * z_q * z_v
else:
z = z_k * z_q
z = torch.log(z + eps) - torch.log1p(z)
elif self.fusion_mode == 'sum':
if self.is_va:
z = z_k + z_q + z_v
else:
z = z_k + z_q
z = torch.log(torch.sigmoid(z) + eps)
return z
def transform(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
if not k_fact:
z_k = self.constant * torch.ones_like(z_k).cuda()
if not q_fact:
z_q = self.constant * torch.ones_like(z_q).cuda()
if self.is_va:
if not v_fact:
z_v = self.constant * torch.ones_like(z_v).cuda()
if self.fusion_mode == 'hm':
z_k = torch.sigmoid(z_k)
z_q = torch.sigmoid(z_q)
if self.is_va:
z_v = torch.sigmoid(z_v)
return z_k, z_q, z_v | 5,384 | 33.741935 | 123 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/utils.py | import torch
def mask_softmax(x, lengths):#, dim=1)
mask = torch.zeros_like(x).to(device=x.device, non_blocking=True)
t_lengths = lengths[:,:,None].expand_as(mask)
arange_id = torch.arange(mask.size(1)).to(device=x.device, non_blocking=True)
arange_id = arange_id[None,:,None].expand_as(mask)
mask[arange_id<t_lengths] = 1
# https://stackoverflow.com/questions/42599498/numercially-stable-softmax
# https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
# exp(x - max(x)) instead of exp(x) is a trick
# to improve the numerical stability while giving
# the same outputs
x2 = torch.exp(x - torch.max(x))
x3 = x2 * mask
epsilon = 1e-5
x3_sum = torch.sum(x3, dim=1, keepdim=True) + epsilon
x4 = x3 / x3_sum.expand_as(x3)
return x4
class GradReverseMask(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, mask, weight):
"""
The mask should be composed of 0 or 1.
The '1' will get their gradient reversed..
"""
ctx.save_for_backward(mask)
ctx.weight = weight
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
mask, = ctx.saved_tensors
mask_c = mask.clone().detach().float()
mask_c[mask == 0] = 1.0
mask_c[mask == 1] = - float(ctx.weight)
return grad_output * mask_c[:, None].float(), None, None
def grad_reverse_mask(x, mask, weight=1):
return GradReverseMask.apply(x, mask, weight)
class GradReverse(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
class GradMulConst(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
| 2,326 | 27.036145 | 98 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/cfvqa.py | import torch
import torch.nn as nn
from block.models.networks.mlp import MLP
from .utils import grad_mul_const # mask_softmax, grad_reverse, grad_reverse_mask,
eps = 1e-12
class CFVQA(nn.Module):
"""
Wraps another model
The original model must return a dictionnary containing the 'logits' key (predictions before softmax)
Returns:
- logits_vq: the original predictions of the model, i.e., NIE
- logits_q: the predictions from the question-only branch
- logits_v: the predictions from the vision-only branch
- logits_all: the predictions from the ensemble model
- logits_cfvqa: the predictions based on CF-VQA, i.e., TIE
=> Use `logits_all`, `logits_q` and `logits_v` for the loss
"""
def __init__(self, model, output_size, classif_q, classif_v, fusion_mode, end_classif=True, is_va=True):
super().__init__()
self.net = model
self.end_classif = end_classif
assert fusion_mode in ['rubi', 'hm', 'sum'], "Fusion mode should be rubi/hm/sum."
self.fusion_mode = fusion_mode
self.is_va = is_va and (not fusion_mode=='rubi') # RUBi does not consider V->A
# Q->A branch
self.q_1 = MLP(**classif_q)
if self.end_classif: # default: True (following RUBi)
self.q_2 = nn.Linear(output_size, output_size)
# V->A branch
if self.is_va: # default: True (containing V->A)
self.v_1 = MLP(**classif_v)
if self.end_classif: # default: True (following RUBi)
self.v_2 = nn.Linear(output_size, output_size)
self.constant = nn.Parameter(torch.tensor(0.0))
def forward(self, batch):
out = {}
# model prediction
net_out = self.net(batch)
logits = net_out['logits']
# Q->A branch
q_embedding = net_out['q_emb'] # N * q_emb
q_embedding = grad_mul_const(q_embedding, 0.0) # don't backpropagate
q_pred = self.q_1(q_embedding)
# V->A branch
if self.is_va:
v_embedding = net_out['v_emb'] # N * v_emb
v_embedding = grad_mul_const(v_embedding, 0.0) # don't backpropagate
v_pred = self.v_1(v_embedding)
else:
v_pred = None
# both q, k and v are the facts
z_qkv = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=True, v_fact=True) # te
# q is the fact while k and v are the counterfactuals
z_q = self.fusion(logits, q_pred, v_pred, q_fact=True, k_fact=False, v_fact=False) # nie
logits_cfvqa = z_qkv - z_q
if self.end_classif:
q_out = self.q_2(q_pred)
if self.is_va:
v_out = self.v_2(v_pred)
else:
q_out = q_pred
if self.is_va:
v_out = v_pred
out['logits_all'] = z_qkv # for optimization
out['logits_vq'] = logits # predictions of the original VQ branch, i.e., NIE
out['logits_cfvqa'] = logits_cfvqa # predictions of CFVQA, i.e., TIE
out['logits_q'] = q_out # for optimization
if self.is_va:
out['logits_v'] = v_out # for optimization
if self.is_va:
out['z_nde'] = self.fusion(logits.clone().detach(), q_pred.clone().detach(), v_pred.clone().detach(), q_fact=True, k_fact=False, v_fact=False) # tie
else:
out['z_nde'] = self.fusion(logits.clone().detach(), q_pred.clone().detach(), None, q_fact=True, k_fact=False, v_fact=False) # tie
return out
def process_answers(self, out, key=''):
out = self.net.process_answers(out, key='_all')
out = self.net.process_answers(out, key='_vq')
out = self.net.process_answers(out, key='_cfvqa')
out = self.net.process_answers(out, key='_q')
if self.is_va:
out = self.net.process_answers(out, key='_v')
return out
def fusion(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
z_k, z_q, z_v = self.transform(z_k, z_q, z_v, q_fact, k_fact, v_fact)
if self.fusion_mode == 'rubi':
z = z_k * torch.sigmoid(z_q)
elif self.fusion_mode == 'hm':
if self.is_va:
z = z_k * z_q * z_v
else:
z = z_k * z_q
z = torch.log(z + eps) - torch.log1p(z)
elif self.fusion_mode == 'sum':
if self.is_va:
z = z_k + z_q + z_v
else:
z = z_k + z_q
z = torch.log(torch.sigmoid(z) + eps)
return z
def transform(self, z_k, z_q, z_v, q_fact=False, k_fact=False, v_fact=False):
if not k_fact:
z_k = self.constant * torch.ones_like(z_k).cuda()
if not q_fact:
z_q = self.constant * torch.ones_like(z_q).cuda()
if self.is_va:
if not v_fact:
z_v = self.constant * torch.ones_like(z_v).cuda()
if self.fusion_mode == 'hm':
z_k = torch.sigmoid(z_k)
z_q = torch.sigmoid(z_q)
if self.is_va:
z_v = torch.sigmoid(z_v)
return z_k, z_q, z_v | 5,174 | 35.702128 | 161 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/factory.py | import sys
import copy
import torch
import torch.nn as nn
import os
import json
from bootstrap.lib.options import Options
from bootstrap.models.networks.data_parallel import DataParallel
from block.models.networks.vqa_net import VQANet as AttentionNet
from bootstrap.lib.logger import Logger
from .rubi import RUBiNet
from .cfvqa import CFVQA
from .cfvqaintrod import CFVQAIntroD
from .rubiintrod import RUBiIntroD
def factory(engine):
mode = list(engine.dataset.keys())[0]
dataset = engine.dataset[mode]
opt = Options()['model.network']
if opt['base'] == 'smrl':
from .smrl_net import SMRLNet as BaselineNet
elif opt['base'] == 'updn':
from .updn_net import UpDnNet as BaselineNet
elif opt['base'] == 'san':
from .san_net import SANNet as BaselineNet
else:
raise ValueError(opt['base'])
orig_net = BaselineNet(
txt_enc=opt['txt_enc'],
self_q_att=opt['self_q_att'],
agg=opt['agg'],
classif=opt['classif'],
wid_to_word=dataset.wid_to_word,
word_to_wid=dataset.word_to_wid,
aid_to_ans=dataset.aid_to_ans,
ans_to_aid=dataset.ans_to_aid,
fusion=opt['fusion'],
residual=opt['residual'],
q_single=opt['q_single'],
)
if opt['name'] == 'baseline':
net = orig_net
elif opt['name'] == 'rubi':
net = RUBiNet(
model=orig_net,
output_size=len(dataset.aid_to_ans),
classif=opt['rubi_params']['mlp_q']
)
elif opt['name'] == 'cfvqa':
net = CFVQA(
model=orig_net,
output_size=len(dataset.aid_to_ans),
classif_q=opt['cfvqa_params']['mlp_q'],
classif_v=opt['cfvqa_params']['mlp_v'],
fusion_mode=opt['fusion_mode'],
is_va=True
)
elif opt['name'] == 'cfvqasimple':
net = CFVQA(
model=orig_net,
output_size=len(dataset.aid_to_ans),
classif_q=opt['cfvqa_params']['mlp_q'],
classif_v=None,
fusion_mode=opt['fusion_mode'],
is_va=False
)
elif opt['name'] == 'cfvqaintrod':
orig_net_teacher = BaselineNet(
txt_enc=opt['txt_enc'],
self_q_att=opt['self_q_att'],
agg=opt['agg'],
classif=opt['classif'],
wid_to_word=dataset.wid_to_word,
word_to_wid=dataset.word_to_wid,
aid_to_ans=dataset.aid_to_ans,
ans_to_aid=dataset.ans_to_aid,
fusion=opt['fusion'],
residual=opt['residual'],
q_single=opt['q_single'],
)
net = CFVQAIntroD(
model=orig_net,
model_teacher=orig_net_teacher,
output_size=len(dataset.aid_to_ans),
classif_q=opt['cfvqa_params']['mlp_q'],
classif_v=opt['cfvqa_params']['mlp_v'],
fusion_mode=opt['fusion_mode']
)
elif opt['name'] == 'cfvqasimpleintrod':
orig_net_teacher = BaselineNet(
txt_enc=opt['txt_enc'],
self_q_att=opt['self_q_att'],
agg=opt['agg'],
classif=opt['classif'],
wid_to_word=dataset.wid_to_word,
word_to_wid=dataset.word_to_wid,
aid_to_ans=dataset.aid_to_ans,
ans_to_aid=dataset.ans_to_aid,
fusion=opt['fusion'],
residual=opt['residual'],
q_single=opt['q_single'],
)
net = CFVQAIntroD(
model=orig_net,
model_teacher=orig_net_teacher,
output_size=len(dataset.aid_to_ans),
classif_q=opt['cfvqa_params']['mlp_q'],
classif_v=None,
fusion_mode=opt['fusion_mode'],
is_va=False
)
elif opt['name'] == 'rubiintrod':
orig_net_teacher = BaselineNet(
txt_enc=opt['txt_enc'],
self_q_att=opt['self_q_att'],
agg=opt['agg'],
classif=opt['classif'],
wid_to_word=dataset.wid_to_word,
word_to_wid=dataset.word_to_wid,
aid_to_ans=dataset.aid_to_ans,
ans_to_aid=dataset.ans_to_aid,
fusion=opt['fusion'],
residual=opt['residual'],
q_single=opt['q_single'],
)
net = RUBiIntroD(
model=orig_net,
model_teacher=orig_net_teacher,
output_size=len(dataset.aid_to_ans),
classif=opt['rubi_params']['mlp_q']
)
else:
raise ValueError(opt['name'])
if Options()['misc.cuda'] and torch.cuda.device_count() > 1:
net = DataParallel(net)
return net
| 4,667 | 29.913907 | 64 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/updn_net.py | from copy import deepcopy
import itertools
import os
import numpy as np
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
import block
from block.models.networks.vqa_net import factory_text_enc
from block.models.networks.mlp import MLP
from .utils import mask_softmax
from torch.nn.utils.weight_norm import weight_norm
class UpDnNet(nn.Module):
def __init__(self,
txt_enc={},
self_q_att=False,
agg={},
classif={},
wid_to_word={},
word_to_wid={},
aid_to_ans=[],
ans_to_aid={},
fusion={},
residual=False,
q_single=False,
):
super().__init__()
self.self_q_att = self_q_att
self.agg = agg
assert self.agg['type'] in ['max', 'mean']
self.classif = classif
self.wid_to_word = wid_to_word
self.word_to_wid = word_to_wid
self.aid_to_ans = aid_to_ans
self.ans_to_aid = ans_to_aid
self.fusion = fusion
self.residual = residual
# Modules
self.txt_enc = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0 = nn.Linear(2400, 512)
self.q_att_linear1 = nn.Linear(512, 2)
if q_single:
self.txt_enc_single = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0_single = nn.Linear(2400, 512)
self.q_att_linear1_single = nn.Linear(512, 2)
else:
self.txt_enc_single = None
if self.classif['mlp']['dimensions'][-1] != len(self.aid_to_ans):
Logger()(f"Warning, the classif_mm output dimension ({self.classif['mlp']['dimensions'][-1]})"
f"doesn't match the number of answers ({len(self.aid_to_ans)}). Modifying the output dimension.")
self.classif['mlp']['dimensions'][-1] = len(self.aid_to_ans)
self.classif_module = MLP(**self.classif['mlp'])
# UpDn
q_dim = self.fusion['input_dims'][0]
v_dim = self.fusion['input_dims'][1]
output_dim = self.fusion['output_dim']
self.v_att = Attention(v_dim, q_dim, output_dim)
self.q_net = FCNet([q_dim, output_dim])
self.v_net = FCNet([v_dim, output_dim])
Logger().log_value('nparams',
sum(p.numel() for p in self.parameters() if p.requires_grad),
should_print=True)
Logger().log_value('nparams_txt_enc',
self.get_nparams_txt_enc(),
should_print=True)
def get_text_enc(self, vocab_words, options):
"""
returns the text encoding network.
"""
return factory_text_enc(self.wid_to_word, options)
def get_nparams_txt_enc(self):
params = [p.numel() for p in self.txt_enc.parameters() if p.requires_grad]
if self.self_q_att:
params += [p.numel() for p in self.q_att_linear0.parameters() if p.requires_grad]
params += [p.numel() for p in self.q_att_linear1.parameters() if p.requires_grad]
return sum(params)
def forward(self, batch):
v = batch['visual']
q = batch['question']
l = batch['lengths'].data
c = batch['norm_coord']
nb_regions = batch.get('nb_regions')
out = {}
q_emb = self.process_question(q, l,)
out['v_emb'] = v.mean(1)
out['q_emb'] = q_emb
# single txt encoder
if self.txt_enc_single is not None:
out['q_emb'] = self.process_question(q, l, self.txt_enc_single, self.q_att_linear0_single, self.q_att_linear1_single)
# New
att = self.v_att(v, q_emb)
v_emb = (att * v).sum(1)
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classif_module(joint_repr)
out['logits'] = logits
return out
def process_question(self, q, l, txt_enc=None, q_att_linear0=None, q_att_linear1=None):
if txt_enc is None:
txt_enc = self.txt_enc
if q_att_linear0 is None:
q_att_linear0 = self.q_att_linear0
if q_att_linear1 is None:
q_att_linear1 = self.q_att_linear1
q_emb = txt_enc.embedding(q)
q, _ = txt_enc.rnn(q_emb)
if self.self_q_att:
q_att = q_att_linear0(q)
q_att = F.relu(q_att)
q_att = q_att_linear1(q_att)
q_att = mask_softmax(q_att, l)
#self.q_att_coeffs = q_att
if q_att.size(2) > 1:
q_atts = torch.unbind(q_att, dim=2)
q_outs = []
for q_att in q_atts:
q_att = q_att.unsqueeze(2)
q_att = q_att.expand_as(q)
q_out = q_att*q
q_out = q_out.sum(1)
q_outs.append(q_out)
q = torch.cat(q_outs, dim=1)
else:
q_att = q_att.expand_as(q)
q = q_att * q
q = q.sum(1)
else:
# l contains the number of words for each question
# in case of multi-gpus it must be a Tensor
# thus we convert it into a list during the forward pass
l = list(l.data[:,0])
q = txt_enc._select_last(q, l)
return q
def process_answers(self, out, key=''):
batch_size = out[f'logits{key}'].shape[0]
_, pred = out[f'logits{key}'].data.max(1)
pred.squeeze_()
if batch_size != 1:
out[f'answers{key}'] = [self.aid_to_ans[pred[i].item()] for i in range(batch_size)]
out[f'answer_ids{key}'] = [pred[i].item() for i in range(batch_size)]
else:
out[f'answers{key}'] = [self.aid_to_ans[pred.item()]]
out[f'answer_ids{key}'] = [pred.item()]
return out
class Attention(nn.Module):
def __init__(self, v_dim, q_dim, num_hid, dropout=0.2):
super(Attention, self).__init__()
self.v_proj = FCNet([v_dim, num_hid])
self.q_proj = FCNet([q_dim, num_hid])
self.dropout = nn.Dropout(dropout)
self.linear = weight_norm(nn.Linear(num_hid, 1), dim=None)
def forward(self, v, q):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
logits = self.logits(v, q)
w = nn.functional.softmax(logits, 1)
return w
def logits(self, v, q):
batch, k, _ = v.size()
v_proj = self.v_proj(v) # [batch, k, qdim]
q_proj = self.q_proj(q).unsqueeze(1).repeat(1, k, 1)
joint_repr = v_proj * q_proj
joint_repr = self.dropout(joint_repr)
logits = self.linear(joint_repr)
return logits
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network
"""
def __init__(self, dims):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.ReLU())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x) | 7,498 | 32.627803 | 129 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/rubiintrod.py | import torch
import torch.nn as nn
from block.models.networks.mlp import MLP
from .utils import grad_mul_const # mask_softmax, grad_reverse, grad_reverse_mask,
class RUBiIntroD(nn.Module):
"""
Wraps another model
The original model must return a dictionnary containing the 'logits' key (predictions before softmax)
Returns:
- logits: the original predictions of the model
- logits_q: the predictions from the question-only branch
- logits_rubi: the updated predictions from the model by the mask.
=> Use `logits_rubi` and `logits_q` for the loss
"""
def __init__(self, model, model_teacher, output_size, classif, end_classif=True):
super().__init__()
self.net_student = model
self.net = model_teacher
self.c_1 = MLP(**classif)
self.end_classif = end_classif
if self.end_classif:
self.c_2 = nn.Linear(output_size, output_size)
self.net.eval()
self.c_1.eval()
self.c_2.eval()
def forward(self, batch):
out = {}
# model prediction
net_out = self.net(batch)
logits = net_out['logits']
q_embedding = net_out['q_emb'] # N * q_emb
q_embedding = grad_mul_const(q_embedding, 0.0) # don't backpropagate through question encoder
q_pred = self.c_1(q_embedding)
fusion_pred = logits * torch.sigmoid(q_pred)
if self.end_classif:
q_out = self.c_2(q_pred)
else:
q_out = q_pred
out['logits'] = net_out['logits']
out['logits_all'] = fusion_pred
out['logits_q'] = q_out
# student model
logits_stu = self.net_student(batch)
out['logits_stu'] = logits_stu['logits']
return out
def process_answers(self, out, key=''):
out = self.net.process_answers(out)
out = self.net.process_answers(out, key='_all')
out = self.net.process_answers(out, key='_q')
out = self.net.process_answers(out, key='_stu')
return out
| 2,042 | 31.951613 | 105 | py |
introd | introd-main/cfvqa/cfvqa/models/networks/san_net.py | from copy import deepcopy
import itertools
import os
import numpy as np
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
import block
from block.models.networks.vqa_net import factory_text_enc
from block.models.networks.mlp import MLP
from .utils import mask_softmax
from torch.nn.utils.weight_norm import weight_norm
from torch.autograd import Variable
class SANNet(nn.Module):
def __init__(self,
txt_enc={},
self_q_att=False,
agg={},
classif={},
wid_to_word={},
word_to_wid={},
aid_to_ans=[],
ans_to_aid={},
fusion={},
residual=False,
q_single=False
):
super().__init__()
self.self_q_att = self_q_att
self.agg = agg
assert self.agg['type'] in ['max', 'mean']
self.classif = classif
self.wid_to_word = wid_to_word
self.word_to_wid = word_to_wid
self.aid_to_ans = aid_to_ans
self.ans_to_aid = ans_to_aid
self.fusion = fusion
self.residual = residual
# Modules
self.txt_enc = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0 = nn.Linear(2400, 512)
self.q_att_linear1 = nn.Linear(512, 2)
if q_single:
self.txt_enc_single = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0_single = nn.Linear(2400, 512)
self.q_att_linear1_single = nn.Linear(512, 2)
if self.classif['mlp']['dimensions'][-1] != len(self.aid_to_ans):
Logger()(f"Warning, the classif_mm output dimension ({self.classif['mlp']['dimensions'][-1]})"
f"doesn't match the number of answers ({len(self.aid_to_ans)}). Modifying the output dimension.")
self.classif['mlp']['dimensions'][-1] = len(self.aid_to_ans)
self.classif_module = MLP(**self.classif['mlp'])
# UpDn
q_dim = self.fusion['input_dims'][0]
v_dim = self.fusion['input_dims'][1]
output_dim = self.fusion['output_dim']
att_size = 512
self.v_att = Attention(v_dim, v_dim, att_size, 36, output_dim, drop_ratio=0.5)
self.txt_enc.rnn = QuestionEmbedding(620, q_dim, 1, False, 0.0)
self.q_net = FCNet([q_dim, output_dim])
# self.v_net = FCNet([v_dim, output_dim])
Logger().log_value('nparams',
sum(p.numel() for p in self.parameters() if p.requires_grad),
should_print=True)
Logger().log_value('nparams_txt_enc',
self.get_nparams_txt_enc(),
should_print=True)
def get_text_enc(self, vocab_words, options):
"""
returns the text encoding network.
"""
return factory_text_enc(self.wid_to_word, options)
def get_nparams_txt_enc(self):
params = [p.numel() for p in self.txt_enc.parameters() if p.requires_grad]
if self.self_q_att:
params += [p.numel() for p in self.q_att_linear0.parameters() if p.requires_grad]
params += [p.numel() for p in self.q_att_linear1.parameters() if p.requires_grad]
return sum(params)
def forward(self, batch):
v = batch['visual']
q = batch['question']
l = batch['lengths'].data
c = batch['norm_coord']
nb_regions = batch.get('nb_regions')
out = {}
q_emb = self.process_question(q, l,)
out['v_emb'] = v.mean(1)
out['q_emb'] = q_emb
# single txt encoder
if self.txt_enc_single is not None:
out['q_emb'] = self.process_question(q, l, self.txt_enc_single, self.q_att_linear0_single, self.q_att_linear1_single)
# New
q_repr = self.q_net(q_emb)
joint_repr = self.v_att(q_repr, v)
logits = self.classif_module(joint_repr)
out['logits'] = logits
return out
def process_question(self, q, l, txt_enc=None, q_att_linear0=None, q_att_linear1=None):
if txt_enc is None:
txt_enc = self.txt_enc
q_emb = txt_enc.embedding(q)
q = txt_enc.rnn(q_emb)
return q
def process_answers(self, out, key=''):
batch_size = out[f'logits{key}'].shape[0]
_, pred = out[f'logits{key}'].data.max(1)
pred.squeeze_()
if batch_size != 1:
out[f'answers{key}'] = [self.aid_to_ans[pred[i].item()] for i in range(batch_size)]
out[f'answer_ids{key}'] = [pred[i].item() for i in range(batch_size)]
else:
out[f'answers{key}'] = [self.aid_to_ans[pred.item()]]
out[f'answer_ids{key}'] = [pred.item()]
return out
class Attention(nn.Module): # Extend PyTorch's Module class
def __init__(self, v_dim, q_dim, att_size, img_seq_size, output_size, drop_ratio):
super(Attention, self).__init__() # Must call super __init__()
self.v_dim = v_dim
self.q_dim = q_dim
self.att_size = att_size
self.img_seq_size = img_seq_size
self.output_size = output_size
self.drop_ratio = drop_ratio
self.tan = nn.Tanh()
self.dp = nn.Dropout(drop_ratio)
self.sf = nn.Softmax()
self.fc11 = nn.Linear(q_dim, 768, bias=True)
# self.fc111 = nn.Linear(768, 640, bias=True)
self.fc111 = nn.Linear(768, att_size, bias=True)
self.fc12 = nn.Linear(v_dim, 768, bias=False)
# self.fc121 = nn.Linear(768, 640, bias=False)
self.fc121 = nn.Linear(768, att_size, bias=False)
self.linear_second = nn.Linear(att_size, att_size, bias=False)
# self.linear_second = nn.Linear(att_size, img_seq_size, bias=False)
self.fc13 = nn.Linear(att_size, 1, bias=True)
self.fc21 = nn.Linear(q_dim, att_size, bias=True)
self.fc22 = nn.Linear(v_dim, att_size, bias=False)
self.fc23 = nn.Linear(att_size, 1, bias=True)
self.fc = nn.Linear(v_dim, output_size, bias=True)
# d = input_size | m = img_seq_size | k = att_size
def forward(self, ques_feat, img_feat): # ques_feat -- [batch, d] | img_feat -- [batch_size, m, d]
# print(img_feat.size(), ques_feat.size())
# print(self.v_dim, self.q_dim)
# print("=======================================================================")
B = ques_feat.size(0)
# Stack 1
ques_emb_1 = self.fc11(ques_feat)
ques_emb_1 = self.fc111(ques_emb_1) # [batch_size, att_size]
img_emb_1 = self.fc12(img_feat)
img_emb_1 = self.fc121(img_emb_1)
# print(ques_emb_1.size(), img_emb_1.size())
# print("=======================================================================")
# h1 = self.tan(ques_emb_1.view(B, 1, self.att_size) + img_emb_1)
h1 = self.tan(ques_emb_1.view(B, 1, self.att_size) + img_emb_1)
h1_emb = self.linear_second(h1)
h1_emb = self.fc13(h1_emb)
p1 = self.sf(h1_emb.view(-1, self.img_seq_size)).view(B, 1, self.img_seq_size)
# Weighted sum
img_att1 = p1.matmul(img_feat)
u1 = ques_feat + img_att1.view(-1, self.v_dim)
# Stack 2
ques_emb_2 = self.fc21(u1) # [batch_size, att_size]
img_emb_2 = self.fc22(img_feat)
h2 = self.tan(ques_emb_2.view(B, 1, self.att_size) + img_emb_2)
h2_emb = self.fc23(self.dp(h2))
p2 = self.sf(h2_emb.view(-1, self.img_seq_size)).view(B, 1, self.img_seq_size)
# Weighted sum
img_att2 = p2.matmul(img_feat)
u2 = u1 + img_att2.view(-1, self.v_dim)
return u2
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network
"""
def __init__(self, dims):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.ReLU())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
class QuestionEmbedding(nn.Module):
def __init__(self, in_dim, num_hid, nlayers, bidirect, dropout, rnn_type='GRU'):
"""Module for question embedding
"""
super(QuestionEmbedding, self).__init__()
assert rnn_type == 'LSTM' or rnn_type == 'GRU'
rnn_cls = nn.LSTM if rnn_type == 'LSTM' else nn.GRU
self.rnn = rnn_cls(
in_dim, num_hid, nlayers,
bidirectional=bidirect,
dropout=dropout,
batch_first=True)
self.in_dim = in_dim
self.num_hid = num_hid
self.nlayers = nlayers
self.rnn_type = rnn_type
self.ndirections = 1 + int(bidirect)
def init_hidden(self, batch):
# just to get the type of tensor
weight = next(self.parameters()).data
hid_shape = (self.nlayers * self.ndirections, batch, self.num_hid)
if self.rnn_type == 'LSTM':
return (Variable(weight.new(*hid_shape).zero_()),
Variable(weight.new(*hid_shape).zero_()))
else:
return Variable(weight.new(*hid_shape).zero_())
def forward(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
if self.ndirections == 1:
return output[:, -1]
forward_ = output[:, -1, :self.num_hid]
backward = output[:, 0, self.num_hid:]
return torch.cat((forward_, backward), dim=1)
def forward_all(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
return output
| 10,169 | 34.190311 | 129 | py |
introd | introd-main/cfvqa/cfvqa/models/criterions/rubiintrod_criterion.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
class RUBiIntroDCriterion(nn.Module):
def __init__(self):
super().__init__()
self.cls_loss = nn.CrossEntropyLoss(reduction='none')
def forward(self, net_out, batch):
out = {}
logits_all = net_out['logits_all']
class_id = batch['class_id'].squeeze(1)
# KD
logits_t = net_out['logits']
logits_s = net_out['logits_stu']
p_t = torch.nn.functional.softmax(logits_t, -1).clone().detach()
kd_loss = - p_t*F.log_softmax(logits_s, -1)
kd_loss = kd_loss.sum(1)
cls_loss = self.cls_loss(logits_s, class_id)
# weight estimation
cls_loss_ood = self.cls_loss(logits_t, class_id)
cls_loss_id = self.cls_loss(logits_all, class_id)
weight = cls_loss_ood/(cls_loss_ood+cls_loss_id)
weight = torch.round(weight)
weight = weight.detach()
loss = (weight*kd_loss).mean() + ((1-weight)*cls_loss).mean()
out['loss'] = loss
return out
| 1,165 | 28.897436 | 72 | py |
introd | introd-main/cfvqa/cfvqa/models/criterions/cfvqaintrod_criterion.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
class CFVQAIntroDCriterion(nn.Module):
def __init__(self):
super().__init__()
self.cls_loss = nn.CrossEntropyLoss(reduction='none')
def forward(self, net_out, batch):
out = {}
logits_all = net_out['logits_all']
class_id = batch['class_id'].squeeze(1)
# KD
logits_t = net_out['logits_cfvqa']
logits_s = net_out['logits_stu']
p_t = torch.nn.functional.softmax(logits_t, -1).clone().detach()
kd_loss = - p_t*F.log_softmax(logits_s, -1)
kd_loss = kd_loss.sum(1)
cls_loss = self.cls_loss(logits_s, class_id)
# weight estimation
cls_loss_ood = self.cls_loss(logits_t, class_id)
cls_loss_id = self.cls_loss(logits_all, class_id)
weight = cls_loss_ood/(cls_loss_ood+cls_loss_id)
weight = weight.detach()
loss = (weight*kd_loss).mean() + ((1-weight)*cls_loss).mean()
out['loss'] = loss
return out
| 1,135 | 28.894737 | 72 | py |
introd | introd-main/cfvqa/cfvqa/models/criterions/rubi_criterion.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
class RUBiCriterion(nn.Module):
def __init__(self, question_loss_weight=1.0):
super().__init__()
Logger()(f'RUBiCriterion, with question_loss_weight = ({question_loss_weight})')
self.question_loss_weight = question_loss_weight
self.fusion_loss = nn.CrossEntropyLoss()
self.question_loss = nn.CrossEntropyLoss()
def forward(self, net_out, batch):
out = {}
# logits = net_out['logits']
logits_q = net_out['logits_q']
logits_rubi = net_out['logits_all']
class_id = batch['class_id'].squeeze(1)
fusion_loss = self.fusion_loss(logits_rubi, class_id)
question_loss = self.question_loss(logits_q, class_id)
loss = fusion_loss + self.question_loss_weight * question_loss
out['loss'] = loss
out['loss_mm_q'] = fusion_loss
out['loss_q'] = question_loss
return out
| 1,058 | 32.09375 | 88 | py |
introd | introd-main/cfvqa/cfvqa/models/criterions/cfvqa_criterion.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
class CFVQACriterion(nn.Module):
def __init__(self, question_loss_weight=1.0, vision_loss_weight=1.0, is_va=True):
super().__init__()
self.is_va = is_va
Logger()(f'CFVQACriterion, with question_loss_weight = ({question_loss_weight})')
if self.is_va:
Logger()(f'CFVQACriterion, with vision_loss_weight = ({vision_loss_weight})')
self.fusion_loss = nn.CrossEntropyLoss()
self.question_loss = nn.CrossEntropyLoss()
self.question_loss_weight = question_loss_weight
if self.is_va:
self.vision_loss = nn.CrossEntropyLoss()
self.vision_loss_weight = vision_loss_weight
def forward(self, net_out, batch):
out = {}
class_id = batch['class_id'].squeeze(1)
logits_rubi = net_out['logits_all']
fusion_loss = self.fusion_loss(logits_rubi, class_id)
logits_q = net_out['logits_q']
question_loss = self.question_loss(logits_q, class_id)
if self.is_va:
logits_v = net_out['logits_v']
vision_loss = self.vision_loss(logits_v, class_id)
nde = net_out['z_nde']
p_te = torch.nn.functional.softmax(logits_rubi, -1).clone().detach()
p_nde = torch.nn.functional.softmax(nde, -1)
kl_loss = - p_te*p_nde.log()
kl_loss = kl_loss.sum(1).mean()
loss = fusion_loss \
+ self.question_loss_weight * question_loss \
+ kl_loss
if self.is_va:
loss += self.vision_loss_weight * vision_loss
out['loss'] = loss
out['loss_mm_q'] = fusion_loss
out['loss_q'] = question_loss
if self.is_va:
out['loss_v'] = vision_loss
return out
| 1,918 | 33.267857 | 89 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_rubi_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['', '_all', '_q']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQARUBiMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['', '_all', '_q']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
if self.dataset.split == 'test':
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers'][i]
}
if 'is_testdev' in batch and batch['is_testdev'][i]:
self.results_testdev.append(pred_item)
if self.logits['tensor'] is None:
self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
self.logits['tensor'][self.idx] = logits[i]
self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['', '_all', '_q']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['', '_q', '_all']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['', '_all', '_q']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['', '_all', '_q']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['', '_all', '_q']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,030 | 43.384956 | 143 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_cfvqasimple_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['_all', '_vq', '_cfvqa', '_q']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQACFVQASimpleMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Options()['dataset.eval_split'] == 'test': # 0430
self.accuracy = None
else:
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['_all', '_vq', '_cfvqa', '_q']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
# if self.dataset.split == 'test': # 0430
# pred_item = {
# 'question_id': batch['question_id'][i],
# 'answer': net_out[f'answers{key}'][i]
# # 'answer': net_out[f'answers'][i]
# }
# # if 'is_testdev' in batch and batch['is_testdev'][i]: # 0430
# # self.results_testdev.append(pred_item)
# if self.logits['tensor'] is None:
# self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
# self.logits['tensor'][self.idx] = logits[i]
# self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
# self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['_all', '_vq', '_cfvqa', '_q']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['_all', '_vq', '_cfvqa', '_q']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['_all', '_vq', '_cfvqa', '_q']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['_all', '_vq', '_cfvqa', '_q']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['_all', '_vq', '_cfvqa', '_q']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,348 | 43.995652 | 143 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_rubiintrod_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['', '_stu']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQARUBiIntroDMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['', '_stu']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
if self.dataset.split == 'test':
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers'][i]
}
if 'is_testdev' in batch and batch['is_testdev'][i]:
self.results_testdev.append(pred_item)
if self.logits['tensor'] is None:
self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
self.logits['tensor'][self.idx] = logits[i]
self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['', '_stu']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['', '_q', '_all']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['', '_stu']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['', '_stu']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['', '_stu']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,000 | 43.252212 | 143 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_cfvqa_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQACFVQAMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Options()['dataset.eval_split'] == 'test': # 0430
self.accuracy = None
else:
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
# if self.dataset.split == 'test': # 0430
# pred_item = {
# 'question_id': batch['question_id'][i],
# 'answer': net_out[f'answers{key}'][i]
# # 'answer': net_out[f'answers'][i]
# }
# # if 'is_testdev' in batch and batch['is_testdev'][i]: # 0430
# # self.results_testdev.append(pred_item)
# if self.logits['tensor'] is None:
# self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
# self.logits['tensor'][self.idx] = logits[i]
# self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
# self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['_all', '_vq', '_cfvqa', '_q', '_v']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['_all', '_vq', '_cfvqa', '_q', '_v']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,384 | 44.152174 | 143 | py |
introd | introd-main/cfvqa/cfvqa/models/metrics/vqa_cfvqaintrod_metrics.py | import torch
import torch.nn as nn
import os
import json
from scipy import stats
import numpy as np
from collections import defaultdict
from bootstrap.models.metrics.accuracy import accuracy
from block.models.metrics.vqa_accuracies import VQAAccuracies
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class VQAAccuracy(nn.Module):
def __init__(self, topk=[1,5]):
super().__init__()
self.topk = topk
def forward(self, cri_out, net_out, batch):
out = {}
class_id = batch['class_id'].data.cpu()
for key in ['_all', '_cfvqa', '_stu']:
logits = net_out[f'logits{key}'].data.cpu()
acc_out = accuracy(logits, class_id, topk=self.topk)
for i, k in enumerate(self.topk):
out[f'accuracy{key}_top{k}'] = acc_out[i]
return out
class VQACFVQAIntroDMetrics(VQAAccuracies):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Options()['dataset.eval_split'] == 'test': # 0430
self.accuracy = None
else:
self.accuracy = VQAAccuracy()
self.rm_dir_rslt = 1 if Options()['dataset.train_split'] is not None else 0
def forward(self, cri_out, net_out, batch):
out = {}
if self.accuracy is not None:
out = self.accuracy(cri_out, net_out, batch)
# add answers and answer_ids keys to net_out
net_out = self.engine.model.network.process_answers(net_out)
batch_size = len(batch['index'])
for i in range(batch_size):
# Open Ended Accuracy (VQA-VQA2)
if self.open_ended:
for key in ['_all', '_cfvqa', '_stu']:
pred_item = {
'question_id': batch['question_id'][i],
'answer': net_out[f'answers{key}'][i]
}
self.results[key].append(pred_item)
# if self.dataset.split == 'test': # 0430
# pred_item = {
# 'question_id': batch['question_id'][i],
# 'answer': net_out[f'answers{key}'][i]
# # 'answer': net_out[f'answers'][i]
# }
# # if 'is_testdev' in batch and batch['is_testdev'][i]: # 0430
# # self.results_testdev.append(pred_item)
# if self.logits['tensor'] is None:
# self.logits['tensor'] = torch.FloatTensor(len(self.dataset), logits.size(1))
# self.logits['tensor'][self.idx] = logits[i]
# self.logits['qid_to_idx'][batch['question_id'][i]] = self.idx
# self.idx += 1
# TDIUC metrics
if self.tdiuc:
gt_aid = batch['answer_id'][i]
gt_ans = batch['answer'][i]
gt_type = batch['question_type'][i]
self.gt_types.append(gt_type)
if gt_ans in self.ans_to_aid:
self.gt_aids.append(gt_aid)
else:
self.gt_aids.append(-1)
self.gt_aid_not_found += 1
for key in ['_all', '_cfvqa', '_stu']:
qid = batch['question_id'][i]
pred_aid = net_out[f'answer_ids{key}'][i]
self.pred_aids[key].append(pred_aid)
self.res_by_type[key][gt_type+'_pred'].append(pred_aid)
if gt_ans in self.ans_to_aid:
self.res_by_type[key][gt_type+'_gt'].append(gt_aid)
if gt_aid == pred_aid:
self.res_by_type[key][gt_type+'_t'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
else:
self.res_by_type[key][gt_type+'_gt'].append(-1)
self.res_by_type[key][gt_type+'_f'].append(pred_aid)
return out
def reset_oe(self):
self.results = dict()
self.dir_rslt = dict()
self.path_rslt = dict()
for key in ['_all', '_cfvqa', '_stu']:
self.results[key] = []
self.dir_rslt[key] = os.path.join(
self.dir_exp,
f'results{key}',
self.dataset.split,
'epoch,{}'.format(self.engine.epoch))
os.system('mkdir -p '+self.dir_rslt[key])
self.path_rslt[key] = os.path.join(
self.dir_rslt[key],
'OpenEnded_mscoco_{}_model_results.json'.format(
self.dataset.get_subtype()))
if self.dataset.split == 'test':
pass
# self.results_testdev = []
# self.path_rslt_testdev = os.path.join(
# self.dir_rslt,
# 'OpenEnded_mscoco_{}_model_results.json'.format(
# self.dataset.get_subtype(testdev=True)))
# self.path_logits = os.path.join(self.dir_rslt, 'logits.pth')
# os.system('mkdir -p '+os.path.dirname(self.path_logits))
# self.logits = {}
# self.logits['aid_to_ans'] = self.engine.model.network.aid_to_ans
# self.logits['qid_to_idx'] = {}
# self.logits['tensor'] = None
# self.idx = 0
# path_aid_to_ans = os.path.join(self.dir_rslt, 'aid_to_ans.json')
# with open(path_aid_to_ans, 'w') as f:
# json.dump(self.engine.model.network.aid_to_ans, f)
def reset_tdiuc(self):
self.pred_aids = defaultdict(list)
self.gt_aids = []
self.gt_types = []
self.gt_aid_not_found = 0
self.res_by_type = {key: defaultdict(list) for key in ['_all', '_cfvqa', '_stu']}
def compute_oe_accuracy(self):
logs_name_prefix = Options()['misc'].get('logs_name', '') or ''
for key in ['_all', '_cfvqa', '_stu']:
logs_name = (logs_name_prefix + key) or "logs"
with open(self.path_rslt[key], 'w') as f:
json.dump(self.results[key], f)
# if self.dataset.split == 'test':
# with open(self.path_rslt_testdev, 'w') as f:
# json.dump(self.results_testdev, f)
if 'test' not in self.dataset.split:
call_to_prog = 'python -m block.models.metrics.compute_oe_accuracy '\
+ '--dir_vqa {} --dir_exp {} --dir_rslt {} --epoch {} --split {} --logs_name {} --rm {} &'\
.format(self.dir_vqa, self.dir_exp, self.dir_rslt[key], self.engine.epoch, self.dataset.split, logs_name, self.rm_dir_rslt)
Logger()('`'+call_to_prog+'`')
os.system(call_to_prog)
def compute_tdiuc_metrics(self):
Logger()('{} of validation answers were not found in ans_to_aid'.format(self.gt_aid_not_found))
for key in ['_all', '_cfvqa', '_stu']:
Logger()(f'Computing TDIUC metrics for logits{key}')
accuracy = float(100*np.mean(np.array(self.pred_aids[key])==np.array(self.gt_aids)))
Logger()('Overall Traditional Accuracy is {:.2f}'.format(accuracy))
Logger().log_value('{}_epoch.tdiuc.accuracy{}'.format(self.mode, key), accuracy, should_print=False)
types = list(set(self.gt_types))
sum_acc = []
eps = 1e-10
Logger()('---------------------------------------')
Logger()('Not using per-answer normalization...')
for tp in types:
acc = 100*(len(self.res_by_type[key][tp+'_t'])/len(self.res_by_type[key][tp+'_t']+self.res_by_type[key][tp+'_f']))
sum_acc.append(acc+eps)
Logger()(f"Accuracy {key} for class '{tp}' is {acc:.2f}")
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy {} is {:.2f}'.format(key, acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h'.format(self.mode, key), acc_mpt_h, should_print=False)
Logger()('---------------------------------------')
Logger()('Using per-answer normalization...')
for tp in types:
per_ans_stat = defaultdict(int)
for g,p in zip(self.res_by_type[key][tp+'_gt'],self.res_by_type[key][tp+'_pred']):
per_ans_stat[str(g)+'_gt']+=1
if g==p:
per_ans_stat[str(g)]+=1
unq_acc = 0
for unq_ans in set(self.res_by_type[key][tp+'_gt']):
acc_curr_ans = per_ans_stat[str(unq_ans)]/per_ans_stat[str(unq_ans)+'_gt']
unq_acc +=acc_curr_ans
acc = 100*unq_acc/len(set(self.res_by_type[key][tp+'_gt']))
sum_acc.append(acc+eps)
Logger()("Accuracy {} for class '{}' is {:.2f}".format(key, tp, acc))
Logger().log_value('{}_epoch.tdiuc{}.perQuestionType_norm.{}'.format(self.mode, key, tp), acc, should_print=False)
acc_mpt_a = float(np.mean(np.array(sum_acc)))
Logger()('Arithmetic MPT Accuracy is {:.2f}'.format(acc_mpt_a))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_a_norm'.format(self.mode, key), acc_mpt_a, should_print=False)
acc_mpt_h = float(stats.hmean(sum_acc))
Logger()('Harmonic MPT Accuracy is {:.2f}'.format(acc_mpt_h))
Logger().log_value('{}_epoch.tdiuc{}.acc_mpt_h_norm'.format(self.mode, key), acc_mpt_h, should_print=False)
| 10,313 | 43.843478 | 143 | py |
introd | introd-main/cfvqa/cfvqa/datasets/vqacp.py | import os
import csv
import copy
import json
import torch
import numpy as np
from tqdm import tqdm
from os import path as osp
from bootstrap.lib.logger import Logger
from block.datasets.vqa_utils import AbstractVQA
from copy import deepcopy
import random
import h5py
class VQACP(AbstractVQA):
def __init__(self,
dir_data='data/vqa/vqacp2',
split='train',
batch_size=80,
nb_threads=4,
pin_memory=False,
shuffle=False,
nans=1000,
minwcount=10,
nlp='mcb',
proc_split='train',
samplingans=False,
dir_rcnn='data/coco/extract_rcnn',
dir_cnn=None,
dir_vgg16=None,
has_testdevset=False,
):
super(VQACP, self).__init__(
dir_data=dir_data,
split=split,
batch_size=batch_size,
nb_threads=nb_threads,
pin_memory=pin_memory,
shuffle=shuffle,
nans=nans,
minwcount=minwcount,
nlp=nlp,
proc_split=proc_split,
samplingans=samplingans,
has_valset=True,
has_testset=False,
has_testdevset=has_testdevset,
has_testset_anno=False,
has_answers_occurence=True,
do_tokenize_answers=False)
self.dir_rcnn = dir_rcnn
self.dir_cnn = dir_cnn
self.dir_vgg16 = dir_vgg16
self.load_image_features()
self.load_original_annotation = False
def add_rcnn_to_item(self, item):
path_rcnn = os.path.join(self.dir_rcnn, '{}.pth'.format(item['image_name']))
item_rcnn = torch.load(path_rcnn)
item['visual'] = item_rcnn['pooled_feat']
item['coord'] = item_rcnn['rois']
item['norm_coord'] = item_rcnn['norm_rois']
item['nb_regions'] = item['visual'].size(0)
return item
def load_image_features(self):
if self.dir_cnn:
filename_train = os.path.join(self.dir_cnn, 'trainset.hdf5')
filename_val = os.path.join(self.dir_cnn, 'valset.hdf5')
Logger()(f"Opening file {filename_train}, {filename_val}")
self.image_features_train = h5py.File(filename_train, 'r', swmr=True)
self.image_features_val = h5py.File(filename_val, 'r', swmr=True)
# load txt
with open(os.path.join(self.dir_cnn, 'trainset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_train = {}
for i, line in enumerate(f):
self.image_names_to_index_train[line.strip()] = i
with open(os.path.join(self.dir_cnn, 'valset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_val = {}
for i, line in enumerate(f):
self.image_names_to_index_val[line.strip()] = i
elif self.dir_vgg16:
# list filenames
self.filenames_train = os.listdir(os.path.join(self.dir_vgg16, 'train'))
self.filenames_val = os.listdir(os.path.join(self.dir_vgg16, 'val'))
def add_vgg_to_item(self, item):
image_name = item['image_name']
filename = image_name + '.pth'
if filename in self.filenames_train:
path = os.path.join(self.dir_vgg16, 'train', filename)
elif filename in self.filenames_val:
path = os.path.join(self.dir_vgg16, 'val', filename)
visual = torch.load(path)
visual = visual.permute(1, 2, 0).view(14*14, 512)
item['visual'] = visual
return item
def add_cnn_to_item(self, item):
image_name = item['image_name']
if image_name in self.image_names_to_index_train:
index = self.image_names_to_index_train[image_name]
image = torch.tensor(self.image_features_train['att'][index])
elif image_name in self.image_names_to_index_val:
index = self.image_names_to_index_val[image_name]
image = torch.tensor(self.image_features_val['att'][index])
image = image.permute(1, 2, 0).view(196, 2048)
item['visual'] = image
return item
def __getitem__(self, index):
item = {}
item['index'] = index
# Process Question (word token)
question = self.dataset['questions'][index]
if self.load_original_annotation:
item['original_question'] = question
item['question_id'] = question['question_id']
item['question'] = torch.LongTensor(question['question_wids'])
item['lengths'] = torch.LongTensor([len(question['question_wids'])])
item['image_name'] = question['image_name']
# Process Object, Attribut and Relational features
if self.dir_rcnn:
item = self.add_rcnn_to_item(item)
elif self.dir_cnn:
item = self.add_cnn_to_item(item)
elif self.dir_vgg16:
item = self.add_vgg_to_item(item)
# Process Answer if exists
if 'annotations' in self.dataset:
annotation = self.dataset['annotations'][index]
if self.load_original_annotation:
item['original_annotation'] = annotation
if 'train' in self.split and self.samplingans:
proba = annotation['answers_count']
proba = proba / np.sum(proba)
item['answer_id'] = int(np.random.choice(annotation['answers_id'], p=proba))
else:
item['answer_id'] = annotation['answer_id']
item['class_id'] = torch.LongTensor([item['answer_id']])
item['answer'] = annotation['answer']
item['question_type'] = annotation['question_type']
return item
def download(self):
dir_ann = osp.join(self.dir_raw, 'annotations')
os.system('mkdir -p '+dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v1_train_questions.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v1_test_questions.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v1_train_annotations.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v1_test_annotations.json -P' + dir_ann)
train_q = {"questions":json.load(open(osp.join(dir_ann, "vqacp_v1_train_questions.json")))}
val_q = {"questions":json.load(open(osp.join(dir_ann, "vqacp_v1_test_questions.json")))}
train_ann = {"annotations":json.load(open(osp.join(dir_ann, "vqacp_v1_train_annotations.json")))}
val_ann = {"annotations":json.load(open(osp.join(dir_ann, "vqacp_v1_test_annotations.json")))}
train_q['info'] = {}
train_q['data_type'] = 'mscoco'
train_q['data_subtype'] = "train2014cp"
train_q['task_type'] = "Open-Ended"
train_q['license'] = {}
val_q['info'] = {}
val_q['data_type'] = 'mscoco'
val_q['data_subtype'] = "val2014cp"
val_q['task_type'] = "Open-Ended"
val_q['license'] = {}
for k in ["info", 'data_type','data_subtype', 'license']:
train_ann[k] = train_q[k]
val_ann[k] = val_q[k]
with open(osp.join(dir_ann, "OpenEnded_mscoco_train2014_questions.json"), 'w') as F:
F.write(json.dumps(train_q))
with open(osp.join(dir_ann, "OpenEnded_mscoco_val2014_questions.json"), 'w') as F:
F.write(json.dumps(val_q))
with open(osp.join(dir_ann, "mscoco_train2014_annotations.json"), 'w') as F:
F.write(json.dumps(train_ann))
with open(osp.join(dir_ann, "mscoco_val2014_annotations.json"), 'w') as F:
F.write(json.dumps(val_ann))
def add_image_names(self, dataset):
for q in dataset['questions']:
q['image_name'] = 'COCO_%s_%012d.jpg'%(q['coco_split'],q['image_id'])
return dataset
| 7,952 | 41.079365 | 111 | py |
introd | introd-main/cfvqa/cfvqa/datasets/vqacp2.py | import os
import csv
import copy
import json
import torch
import numpy as np
from tqdm import tqdm
from os import path as osp
from bootstrap.lib.logger import Logger
from block.datasets.vqa_utils import AbstractVQA
from copy import deepcopy
import random
import h5py
class VQACP2(AbstractVQA):
def __init__(self,
dir_data='data/vqa/vqacp2',
split='train',
batch_size=80,
nb_threads=4,
pin_memory=False,
shuffle=False,
nans=1000,
minwcount=10,
nlp='mcb',
proc_split='train',
samplingans=False,
dir_rcnn='data/coco/extract_rcnn',
dir_cnn=None,
dir_vgg16=None,
has_testdevset=False,
):
super(VQACP2, self).__init__(
dir_data=dir_data,
split=split,
batch_size=batch_size,
nb_threads=nb_threads,
pin_memory=pin_memory,
shuffle=shuffle,
nans=nans,
minwcount=minwcount,
nlp=nlp,
proc_split=proc_split,
samplingans=samplingans,
has_valset=True,
has_testset=False,
has_testdevset=has_testdevset,
has_testset_anno=False,
has_answers_occurence=True,
do_tokenize_answers=False)
self.dir_rcnn = dir_rcnn
self.dir_cnn = dir_cnn
self.dir_vgg16 = dir_vgg16
self.load_image_features()
self.load_original_annotation = False
def add_rcnn_to_item(self, item):
path_rcnn = os.path.join(self.dir_rcnn, '{}.pth'.format(item['image_name']))
item_rcnn = torch.load(path_rcnn)
item['visual'] = item_rcnn['pooled_feat']
item['coord'] = item_rcnn['rois']
item['norm_coord'] = item_rcnn['norm_rois']
item['nb_regions'] = item['visual'].size(0)
return item
def load_image_features(self):
if self.dir_cnn:
filename_train = os.path.join(self.dir_cnn, 'trainset.hdf5')
filename_val = os.path.join(self.dir_cnn, 'valset.hdf5')
Logger()(f"Opening file {filename_train}, {filename_val}")
self.image_features_train = h5py.File(filename_train, 'r', swmr=True)
self.image_features_val = h5py.File(filename_val, 'r', swmr=True)
# load txt
with open(os.path.join(self.dir_cnn, 'trainset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_train = {}
for i, line in enumerate(f):
self.image_names_to_index_train[line.strip()] = i
with open(os.path.join(self.dir_cnn, 'valset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_val = {}
for i, line in enumerate(f):
self.image_names_to_index_val[line.strip()] = i
elif self.dir_vgg16:
# list filenames
self.filenames_train = os.listdir(os.path.join(self.dir_vgg16, 'train'))
self.filenames_val = os.listdir(os.path.join(self.dir_vgg16, 'val'))
def add_vgg_to_item(self, item):
image_name = item['image_name']
filename = image_name + '.pth'
if filename in self.filenames_train:
path = os.path.join(self.dir_vgg16, 'train', filename)
elif filename in self.filenames_val:
path = os.path.join(self.dir_vgg16, 'val', filename)
visual = torch.load(path)
visual = visual.permute(1, 2, 0).view(14*14, 512)
item['visual'] = visual
return item
def add_cnn_to_item(self, item):
image_name = item['image_name']
if image_name in self.image_names_to_index_train:
index = self.image_names_to_index_train[image_name]
image = torch.tensor(self.image_features_train['att'][index])
elif image_name in self.image_names_to_index_val:
index = self.image_names_to_index_val[image_name]
image = torch.tensor(self.image_features_val['att'][index])
image = image.permute(1, 2, 0).view(196, 2048)
item['visual'] = image
return item
def __getitem__(self, index):
item = {}
item['index'] = index
# Process Question (word token)
question = self.dataset['questions'][index]
if self.load_original_annotation:
item['original_question'] = question
item['question_id'] = question['question_id']
item['question'] = torch.LongTensor(question['question_wids'])
item['lengths'] = torch.LongTensor([len(question['question_wids'])])
item['image_name'] = question['image_name']
# Process Object, Attribut and Relational features
if self.dir_rcnn:
item = self.add_rcnn_to_item(item)
elif self.dir_cnn:
item = self.add_cnn_to_item(item)
elif self.dir_vgg16:
item = self.add_vgg_to_item(item)
# Process Answer if exists
if 'annotations' in self.dataset:
annotation = self.dataset['annotations'][index]
if self.load_original_annotation:
item['original_annotation'] = annotation
if 'train' in self.split and self.samplingans:
proba = annotation['answers_count']
proba = proba / np.sum(proba)
item['answer_id'] = int(np.random.choice(annotation['answers_id'], p=proba))
else:
item['answer_id'] = annotation['answer_id']
item['class_id'] = torch.LongTensor([item['answer_id']])
item['answer'] = annotation['answer']
item['question_type'] = annotation['question_type']
return item
def download(self):
dir_ann = osp.join(self.dir_raw, 'annotations')
os.system('mkdir -p '+dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_train_questions.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_test_questions.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_train_annotations.json -P' + dir_ann)
os.system('wget https://computing.ece.vt.edu/~aish/vqacp/vqacp_v2_test_annotations.json -P' + dir_ann)
train_q = {"questions":json.load(open(osp.join(dir_ann, "vqacp_v2_train_questions.json")))}
val_q = {"questions":json.load(open(osp.join(dir_ann, "vqacp_v2_test_questions.json")))}
train_ann = {"annotations":json.load(open(osp.join(dir_ann, "vqacp_v2_train_annotations.json")))}
val_ann = {"annotations":json.load(open(osp.join(dir_ann, "vqacp_v2_test_annotations.json")))}
train_q['info'] = {}
train_q['data_type'] = 'mscoco'
train_q['data_subtype'] = "train2014cp"
train_q['task_type'] = "Open-Ended"
train_q['license'] = {}
val_q['info'] = {}
val_q['data_type'] = 'mscoco'
val_q['data_subtype'] = "val2014cp"
val_q['task_type'] = "Open-Ended"
val_q['license'] = {}
for k in ["info", 'data_type','data_subtype', 'license']:
train_ann[k] = train_q[k]
val_ann[k] = val_q[k]
with open(osp.join(dir_ann, "OpenEnded_mscoco_train2014_questions.json"), 'w') as F:
F.write(json.dumps(train_q))
with open(osp.join(dir_ann, "OpenEnded_mscoco_val2014_questions.json"), 'w') as F:
F.write(json.dumps(val_q))
with open(osp.join(dir_ann, "mscoco_train2014_annotations.json"), 'w') as F:
F.write(json.dumps(train_ann))
with open(osp.join(dir_ann, "mscoco_val2014_annotations.json"), 'w') as F:
F.write(json.dumps(val_ann))
def add_image_names(self, dataset):
for q in dataset['questions']:
q['image_name'] = 'COCO_%s_%012d.jpg'%(q['coco_split'],q['image_id'])
return dataset
| 7,954 | 41.089947 | 111 | py |
introd | introd-main/cfvqa/cfvqa/datasets/vqa2.py | import os
import csv
import copy
import json
import torch
import numpy as np
from os import path as osp
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from block.datasets.vqa_utils import AbstractVQA
from copy import deepcopy
import random
import tqdm
import h5py
class VQA2(AbstractVQA):
def __init__(self,
dir_data='data/vqa2',
split='train',
batch_size=10,
nb_threads=4,
pin_memory=False,
shuffle=False,
nans=1000,
minwcount=10,
nlp='mcb',
proc_split='train',
samplingans=False,
dir_rcnn='data/coco/extract_rcnn',
adversarial=False,
dir_cnn=None
):
super(VQA2, self).__init__(
dir_data=dir_data,
split=split,
batch_size=batch_size,
nb_threads=nb_threads,
pin_memory=pin_memory,
shuffle=shuffle,
nans=nans,
minwcount=minwcount,
nlp=nlp,
proc_split=proc_split,
samplingans=samplingans,
has_valset=True,
has_testset=True,
has_answers_occurence=True,
do_tokenize_answers=False)
self.dir_rcnn = dir_rcnn
self.dir_cnn = dir_cnn
self.load_image_features()
# to activate manually in visualization context (notebo# to activate manually in visualization context (notebook)
self.load_original_annotation = False
def add_rcnn_to_item(self, item):
path_rcnn = os.path.join(self.dir_rcnn, '{}.pth'.format(item['image_name']))
item_rcnn = torch.load(path_rcnn)
item['visual'] = item_rcnn['pooled_feat']
item['coord'] = item_rcnn['rois']
item['norm_coord'] = item_rcnn.get('norm_rois', None)
item['nb_regions'] = item['visual'].size(0)
return item
def add_cnn_to_item(self, item):
image_name = item['image_name']
if image_name in self.image_names_to_index_train:
index = self.image_names_to_index_train[image_name]
image = torch.tensor(self.image_features_train['att'][index])
elif image_name in self.image_names_to_index_val:
index = self.image_names_to_index_val[image_name]
image = torch.tensor(self.image_features_val['att'][index])
image = image.permute(1, 2, 0).view(196, 2048)
item['visual'] = image
return item
def load_image_features(self):
if self.dir_cnn:
filename_train = os.path.join(self.dir_cnn, 'trainset.hdf5')
filename_val = os.path.join(self.dir_cnn, 'valset.hdf5')
Logger()(f"Opening file {filename_train}, {filename_val}")
self.image_features_train = h5py.File(filename_train, 'r', swmr=True)
self.image_features_val = h5py.File(filename_val, 'r', swmr=True)
# load txt
with open(os.path.join(self.dir_cnn, 'trainset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_train = {}
for i, line in enumerate(f):
self.image_names_to_index_train[line.strip()] = i
with open(os.path.join(self.dir_cnn, 'valset.txt'.format(self.split)), 'r') as f:
self.image_names_to_index_val = {}
for i, line in enumerate(f):
self.image_names_to_index_val[line.strip()] = i
def __getitem__(self, index):
item = {}
item['index'] = index
# Process Question (word token)
question = self.dataset['questions'][index]
if self.load_original_annotation:
item['original_question'] = question
item['question_id'] = question['question_id']
item['question'] = torch.tensor(question['question_wids'], dtype=torch.long)
item['lengths'] = torch.tensor([len(question['question_wids'])], dtype=torch.long)
item['image_name'] = question['image_name']
# Process Object, Attribut and Relational features
# Process Object, Attribut and Relational features
if self.dir_rcnn:
item = self.add_rcnn_to_item(item)
elif self.dir_cnn:
item = self.add_cnn_to_item(item)
# Process Answer if exists
if 'annotations' in self.dataset:
annotation = self.dataset['annotations'][index]
if self.load_original_annotation:
item['original_annotation'] = annotation
if 'train' in self.split and self.samplingans:
proba = annotation['answers_count']
proba = proba / np.sum(proba)
item['answer_id'] = int(np.random.choice(annotation['answers_id'], p=proba))
else:
item['answer_id'] = annotation['answer_id']
item['class_id'] = torch.tensor([item['answer_id']], dtype=torch.long)
item['answer'] = annotation['answer']
item['question_type'] = annotation['question_type']
else:
if item['question_id'] in self.is_qid_testdev:
item['is_testdev'] = True
else:
item['is_testdev'] = False
# if Options()['model.network.name'] == 'xmn_net':
# num_feat = 36
# relation_mask = np.zeros((num_feat, num_feat))
# boxes = item['coord']
# for i in range(num_feat):
# for j in range(i+1, num_feat):
# # if there is no overlap between two bounding box
# if boxes[0,i]>boxes[2,j] or boxes[0,j]>boxes[2,i] or boxes[1,i]>boxes[3,j] or boxes[1,j]>boxes[3,i]:
# pass
# else:
# relation_mask[i,j] = relation_mask[j,i] = 1
# relation_mask = torch.from_numpy(relation_mask).byte()
# item['relation_mask'] = relation_mask
return item
def download(self):
dir_zip = osp.join(self.dir_raw, 'zip')
os.system('mkdir -p '+dir_zip)
dir_ann = osp.join(self.dir_raw, 'annotations')
os.system('mkdir -p '+dir_ann)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Questions_Train_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Questions_Val_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Questions_Test_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Annotations_Train_mscoco.zip -P '+dir_zip)
os.system('wget http://visualqa.org/data/mscoco/vqa/v2_Annotations_Val_mscoco.zip -P '+dir_zip)
os.system('unzip '+osp.join(dir_zip, 'v2_Questions_Train_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Questions_Val_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Questions_Test_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Annotations_Train_mscoco.zip')+' -d '+dir_ann)
os.system('unzip '+osp.join(dir_zip, 'v2_Annotations_Val_mscoco.zip')+' -d '+dir_ann)
os.system('mv '+osp.join(dir_ann, 'v2_mscoco_train2014_annotations.json')+' '
+osp.join(dir_ann, 'mscoco_train2014_annotations.json'))
os.system('mv '+osp.join(dir_ann, 'v2_mscoco_val2014_annotations.json')+' '
+osp.join(dir_ann, 'mscoco_val2014_annotations.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_train2014_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_train2014_questions.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_val2014_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_val2014_questions.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_test2015_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_test2015_questions.json'))
os.system('mv '+osp.join(dir_ann, 'v2_OpenEnded_mscoco_test-dev2015_questions.json')+' '
+osp.join(dir_ann, 'OpenEnded_mscoco_test-dev2015_questions.json'))
| 8,260 | 44.640884 | 122 | py |
introd | introd-main/cfvqa/cfvqa/engines/engine.py | import os
import math
import time
import torch
import datetime
import threading
import numpy as np
from bootstrap.lib import utils
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
class Engine(object):
"""Contains training and evaluation procedures
"""
def __init__(self):
self.hooks = {}
self.epoch = 0
self.dataset = None
self.model = None
self.optimizer = None
self.view = None
self.best_out = {}
# generate_view will be executed at the end of each
# training and evaluation epoch
self.register_hook('train_on_flush', self.generate_view)
self.register_hook('eval_on_flush', self.generate_view)
def generate_view(self):
""" Generate a view.html via an asynchronous call to `self.view.generate()`
"""
if self.view is not None:
threading.Thread(target=self.view.generate).start()
# path_opts = os.path.join(Options()['exp']['dir'], 'options.yaml')
# os.system('python -m bootstrap.views.view --path_opts {}'.format(path_opts))
def load_state_dict(self, state):
"""
"""
self.epoch = state['epoch']
self.best_out = state['best_out']
def state_dict(self):
"""
"""
state = {}
state['epoch'] = self.epoch
state['best_out'] = self.best_out
return state
def hook(self, name):
""" Run all the callback functions that have been registered
for a hook.
Args:
name: the name of the hook
"""
if name in self.hooks:
for func in self.hooks[name]:
func()
def register_hook(self, name, func):
""" Register a callback function to be triggered when the hook
is called.
Args:
name: the name of the hook
func: the callback function (no argument)
Example usage:
.. code-block:: python
def func():
print('hooked!')
engine.register_hook('train_on_start_batch', func)
"""
if name not in self.hooks:
self.hooks[name] = []
self.hooks[name].append(func)
def resume(self, map_location=None):
""" Resume a checkpoint using the `bootstrap.lib.options.Options`
"""
Logger()('Loading {} checkpoint'.format(Options()['exp']['resume']))
self.load(Options()['exp']['dir'],
Options()['exp']['resume'],
self.model, self.optimizer,
map_location=map_location)
# self.epoch += 1
if self.epoch > 0:
self.epoch += 1
def eval(self):
""" Launch evaluation procedures
"""
Logger()('Launching evaluation procedures')
if Options()['dataset']['eval_split']:
# self.epoch-1 to be equal to the same resumed epoch
# or to be equal to -1 when not resumed
self.eval_epoch(self.model, self.dataset['eval'], self.epoch-1, logs_json=True)
Logger()('Ending evaluation procedures')
def train(self):
""" Launch training procedures
List of the hooks:
- train_on_start: before the full training procedure
"""
Logger()('Launching training procedures')
self.hook('train_on_start')
while self.epoch < Options()['engine']['nb_epochs']:
self.train_epoch(self.model, self.dataset['train'], self.optimizer, self.epoch)
if Options()['dataset']['eval_split']:
out = self.eval_epoch(self.model, self.dataset['eval'], self.epoch)
if 'saving_criteria' in Options()['engine'] and Options()['engine']['saving_criteria'] is not None:
for saving_criteria in Options()['engine']['saving_criteria']:
if self.is_best(out, saving_criteria):
name = saving_criteria.split(':')[0]
Logger()('Saving best checkpoint for strategy {}'.format(name))
self.save(Options()['exp']['dir'], 'best_{}'.format(name), self.model, self.optimizer)
Logger()('Saving last checkpoint')
self.save(Options()['exp']['dir'], 'last', self.model, self.optimizer)
self.epoch += 1
Logger()('Ending training procedures')
def train_epoch(self, model, dataset, optimizer, epoch, mode='train'):
""" Launch training procedures for one epoch
List of the hooks:
- train_on_start_epoch: before the training procedure for an epoch
- train_on_start_batch: before the training precedure for a batch
- train_on_forward: after the forward of the model
- train_on_bachward: after the backward of the loss
- train_on_update: after the optimization step
- train_on_print: after the print to the terminal
- train_on_end_batch: end of the training procedure for a batch
- train_on_end_epoch: before saving the logs in logs.json
- train_on_flush: end of the training procedure for an epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Training model on {}set for epoch {}'.format(dataset.split, epoch))
model.train()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook(f'{mode}_on_start_epoch')
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook(f'{mode}_on_start_batch')
optimizer.zero_grad()
out = model(batch)
self.hook(f'{mode}_on_forward')
if not torch.isnan(out['loss']):
out['loss'].backward()
else:
Logger()('NaN detected')
#torch.cuda.synchronize()
self.hook(f'{mode}_on_backward')
optimizer.step()
#torch.cuda.synchronize()
self.hook(f'{mode}_on_update')
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value(f'{mode}_batch.epoch', epoch, should_print=False)
Logger().log_value(f'{mode}_batch.batch', i, should_print=False)
Logger().log_value(f'{mode}_batch.timer.process', timer['process'], should_print=False)
Logger().log_value(f'{mode}_batch.timer.load', timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.dim() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if type(value) == list:
continue
if type(value) == dict:
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value(f'{mode}_batch.'+key, value, should_print=False)
if i % Options()['engine']['print_freq'] == 0:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(' '*len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' '*len(mode), timer['process'], timer['load']))
Logger()("{} loss: {:.5f}".format(' '*len(mode), out['loss'].data.item()))
self.hook(f'{mode}_on_print')
timer['elapsed'] = time.time()
self.hook(f'{mode}_on_end_batch')
if Options()['engine']['debug']:
if i > 2:
break
Logger().log_value(f'{mode}_epoch.epoch', epoch, should_print=True)
for key, value in out_epoch.items():
# Logger().log_value(f'{mode}_epoch.'+key, sum(value)/len(value), should_print=True)
Logger().log_value(f'{mode}_epoch.'+key, np.asarray(value).mean(), should_print=True)
self.hook(f'{mode}_on_end_epoch')
Logger().flush()
self.hook(f'{mode}_on_flush')
def eval_epoch(self, model, dataset, epoch, mode='eval', logs_json=True):
""" Launch evaluation procedures for one epoch
List of the hooks (``mode='eval'`` by default):
- mode_on_start_epoch: before the evaluation procedure for an epoch
- mode_on_start_batch: before the evaluation precedure for a batch
- mode_on_forward: after the forward of the model
- mode_on_print: after the print to the terminal
- mode_on_end_batch: end of the evaluation procedure for a batch
- mode_on_end_epoch: before saving the logs in logs.json
- mode_on_flush: end of the evaluation procedure for an epoch
Returns:
out(dict): mean of all the scalar outputs of the model, indexed by output name, for this epoch
"""
utils.set_random_seed(Options()['misc']['seed'] + epoch) # to be able to reproduce exps on reload
Logger()('Evaluating model on {}set for epoch {}'.format(dataset.split, epoch))
model.eval()
timer = {
'begin': time.time(),
'elapsed': time.time(),
'process': None,
'load': None,
'run_avg': 0
}
out_epoch = {}
batch_loader = dataset.make_batch_loader()
self.hook('{}_on_start_epoch'.format(mode))
for i, batch in enumerate(batch_loader):
timer['load'] = time.time() - timer['elapsed']
self.hook('{}_on_start_batch'.format(mode))
with torch.no_grad():
out = model(batch)
#torch.cuda.synchronize()
self.hook('{}_on_forward'.format(mode))
timer['process'] = time.time() - timer['elapsed']
if i == 0:
timer['run_avg'] = timer['process']
else:
timer['run_avg'] = timer['run_avg'] * 0.8 + timer['process'] * 0.2
Logger().log_value('{}_batch.batch'.format(mode), i, should_print=False)
Logger().log_value('{}_batch.epoch'.format(mode), epoch, should_print=False)
Logger().log_value('{}_batch.timer.process'.format(mode), timer['process'], should_print=False)
Logger().log_value('{}_batch.timer.load'.format(mode), timer['load'], should_print=False)
for key, value in out.items():
if torch.is_tensor(value):
if value.dim() <= 1:
value = value.item() # get number from a torch scalar
else:
continue
if type(value) == list:
continue
if type(value) == dict:
continue
if key not in out_epoch:
out_epoch[key] = []
out_epoch[key].append(value)
Logger().log_value('{}_batch.{}'.format(mode, key), value, should_print=False)
if i % Options()['engine']['print_freq'] == 0:
Logger()("{}: epoch {} | batch {}/{}".format(mode, epoch, i, len(batch_loader) - 1))
Logger()("{} elapsed: {} | left: {}".format(' '*len(mode),
datetime.timedelta(seconds=math.floor(time.time() - timer['begin'])),
datetime.timedelta(seconds=math.floor(timer['run_avg'] * (len(batch_loader) - 1 - i)))))
Logger()("{} process: {:.5f} | load: {:.5f}".format(' '*len(mode), timer['process'], timer['load']))
self.hook('{}_on_print'.format(mode))
timer['elapsed'] = time.time()
self.hook('{}_on_end_batch'.format(mode))
if Options()['engine']['debug']:
if i > 10:
break
out = {}
for key, value in out_epoch.items():
try:
# out[key] = sum(value)/len(value)
out[key] = np.asarray(value).mean()
except:
import ipdb; ipdb.set_trace()
Logger().log_value('{}_epoch.epoch'.format(mode), epoch, should_print=True)
for key, value in out.items():
Logger().log_value('{}_epoch.{}'.format(mode, key), value, should_print=True)
self.hook('{}_on_end_epoch'.format(mode))
if logs_json:
Logger().flush()
self.hook('{}_on_flush'.format(mode))
return out
def is_best(self, out, saving_criteria):
""" Verify if the last model is the best for a specific saving criteria
Args:
out(dict): mean of all the scalar outputs of model indexed by output name
saving_criteria(str):
Returns:
is_best(bool)
Example usage:
.. code-block:: python
out = {
'loss': 0.2,
'acctop1': 87.02
}
engine.is_best(out, 'loss:min')
"""
if ':min' in saving_criteria:
name = saving_criteria.replace(':min', '')
order = '<'
elif ':max' in saving_criteria:
name = saving_criteria.replace(':max', '')
order = '>'
else:
error_msg = """'--engine.saving_criteria' named '{}' does not specify order,
you need to chose between '{}' or '{}' to specify if the criteria needs to be minimize or maximize""".format(
saving_criteria, saving_criteria+':min', saving_criteria+':max')
raise ValueError(error_msg)
if name not in out:
raise KeyError("'--engine.saving_criteria' named '{}' not in outputs '{}'".format(name, list(out.keys())))
if name not in self.best_out:
self.best_out[name] = out[name]
else:
if eval('{} {} {}'.format(out[name], order, self.best_out[name])):
self.best_out[name] = out[name]
return True
return False
def load(self, dir_logs, name, model, optimizer, map_location=None):
""" Load a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Loading model...')
model_state = torch.load(path_template.format(name, 'model'), map_location=map_location)
model.load_state_dict(model_state, strict=False)
if Options()['dataset']['train_split'] is not None:
if os.path.isfile(path_template.format(name, 'optimizer')):
Logger()('Loading optimizer...')
optimizer_state = torch.load(path_template.format(name, 'optimizer'), map_location=map_location)
optimizer.load_state_dict(optimizer_state)
else:
Logger()('No optimizer checkpoint', log_level=Logger.WARNING)
if os.path.isfile(path_template.format(name, 'engine')):
Logger()('Loading engine...')
engine_state = torch.load(path_template.format(name, 'engine'), map_location=map_location)
self.load_state_dict(engine_state)
else:
Logger()('No engine checkpoint', log_level=Logger.WARNING)
def save(self, dir_logs, name, model, optimizer):
""" Save a checkpoint
Args:
dir_logs: directory of the checkpoint
name: name of the checkpoint
model: model associated to the checkpoint
optimizer: optimizer associated to the checkpoint
"""
path_template = os.path.join(dir_logs, 'ckpt_{}_{}.pth.tar')
Logger()('Saving model...')
model_state = model.state_dict()
torch.save(model_state, path_template.format(name, 'model'))
Logger()('Saving optimizer...')
optimizer_state = optimizer.state_dict()
torch.save(optimizer_state, path_template.format(name, 'optimizer'))
Logger()('Saving engine...')
engine_state = self.state_dict()
torch.save(engine_state, path_template.format(name, 'engine'))
| 17,193 | 38.345538 | 121 | py |
introd | introd-main/cfvqa/cfvqa/optimizers/factory.py | import torch.nn as nn
from bootstrap.lib.options import Options
from bootstrap.optimizers.factory import factory_optimizer
from block.optimizers.lr_scheduler import ReduceLROnPlateau
from block.optimizers.lr_scheduler import BanOptimizer
def factory(model, engine):
opt = Options()['optimizer']
optimizer = BanOptimizer(engine,
name=Options()['optimizer'].get('name', 'Adamax'),
lr=Options()['optimizer']['lr'],
gradual_warmup_steps=Options()['optimizer'].get('gradual_warmup_steps', [0.5, 2.0, 4]),
lr_decay_epochs=Options()['optimizer'].get('lr_decay_epochs', [10, 20, 2]),
lr_decay_rate=Options()['optimizer'].get('lr_decay_rate', .25))
if opt.get('lr_scheduler', None):
optimizer = ReduceLROnPlateau(optimizer, engine,
**opt['lr_scheduler'])
if opt.get('init', None) == 'glorot':
for p in model.network.parameters():
if p.dim()==1:
p.data.fill_(0)
elif p.dim()>=2:
nn.init.xavier_uniform_(p.data)
else:
raise ValueError(p.dim())
return optimizer
| 1,127 | 35.387097 | 95 | py |
introd | introd-main/css/fc.py | from __future__ import print_function
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network
"""
def __init__(self, dims):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.ReLU())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
if __name__ == '__main__':
fc1 = FCNet([10, 20, 10])
print(fc1)
print('============')
fc2 = FCNet([10, 20])
print(fc2)
| 853 | 24.117647 | 76 | py |
introd | introd-main/css/main.py | import argparse
import json
import cPickle as pickle
from collections import defaultdict, Counter
from os.path import dirname, join
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
from dataset import Dictionary, VQAFeatureDataset
import base_model
from train import train
import utils
import click
from vqa_debias_loss_functions import *
def parse_args():
parser = argparse.ArgumentParser("Train the BottomUpTopDown model with a de-biasing method")
# Arguments we added
parser.add_argument(
'--cache_features', default=True,
help="Cache image features in RAM. Makes things much faster, "
"especially if the filesystem is slow, but requires at least 48gb of RAM")
parser.add_argument(
'--dataset', default='cpv2',
choices=["v2", "cpv2", "cpv1", "cpv2val"],
help="Run on VQA-2.0 instead of VQA-CP 2.0"
)
parser.add_argument(
'-p', "--entropy_penalty", default=0.36, type=float,
help="Entropy regularizer weight for the learned_mixin model")
parser.add_argument(
'--mode', default="updn",
choices=["updn", "q_debias","v_debias","q_v_debias"],
help="Kind of ensemble loss to use")
parser.add_argument(
'--debias', default="learned_mixin",
choices=["learned_mixin_rw2", "learned_mixin_rw", "learned_mixin", "reweight", "bias_product", "none",'focal'],
help="Kind of ensemble loss to use")
parser.add_argument(
'--topq', type=int,default=1,
choices=[1,2,3],
help="num of words to be masked in questio")
parser.add_argument(
'--keep_qtype', default=True,
help="keep qtype or not")
parser.add_argument(
'--topv', type=int,default=1,
choices=[1,3,5,-1],
help="num of object bbox to be masked in image")
parser.add_argument(
'--top_hint',type=int, default=9,
choices=[9,18,27,36],
help="num of hint")
parser.add_argument(
'--qvp', type=int,default=0,
choices=[0,1,2,3,4,5,6,7,8,9,10],
help="ratio of q_bias and v_bias")
parser.add_argument(
'--eval_each_epoch', default=True,
help="Evaluate every epoch, instead of at the end")
# Arguments from the original model, we leave this default, except we
# set --epochs to 30 since the model maxes out its performance on VQA 2.0 well before then
# parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--num_hid', type=int, default=1024)
parser.add_argument('--model', type=str, default='baseline0_newatt')
parser.add_argument('--output', type=str, default='logs/exp0')
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--feature', type=str, default='css')
args = parser.parse_args()
return args
def get_bias(train_dset,eval_dset):
# Compute the bias:
# The bias here is just the expected score for each answer/question type
answer_voc_size = train_dset.num_ans_candidates
# question_type -> answer -> total score
question_type_to_probs = defaultdict(Counter)
# question_type -> num_occurances
question_type_to_count = Counter()
for ex in train_dset.entries:
ans = ex["answer"]
q_type = ans["question_type"]
question_type_to_count[q_type] += 1
if ans["labels"] is not None:
for label, score in zip(ans["labels"], ans["scores"]):
question_type_to_probs[q_type][label] += score
question_type_to_prob_array = {}
for q_type, count in question_type_to_count.items():
prob_array = np.zeros(answer_voc_size, np.float32)
for label, total_score in question_type_to_probs[q_type].items():
prob_array[label] += total_score
prob_array /= count
question_type_to_prob_array[q_type] = prob_array
for ds in [train_dset,eval_dset]:
for ex in ds.entries:
q_type = ex["answer"]["question_type"]
ex["bias"] = question_type_to_prob_array[q_type]
def main():
args = parse_args()
dataset=args.dataset
# args.output=os.path.join('logs',args.output)
if not os.path.isdir(args.output):
utils.create_dir(args.output)
else:
if click.confirm('Exp directory already exists in {}. Erase?'
.format(args.output, default=False)):
os.system('rm -r ' + args.output)
utils.create_dir(args.output)
else:
os._exit(1)
if dataset=='cpv1':
dictionary = Dictionary.load_from_file('data/dictionary_v1.pkl')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
dictionary = Dictionary.load_from_file('data/dictionary.pkl')
print("Building train dataset...")
train_dset = VQAFeatureDataset('train', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
print("Building test dataset...")
eval_dset = VQAFeatureDataset('val', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
get_bias(train_dset,eval_dset)
# Build the model using the original constructor
constructor = 'build_%s' % args.model
model = getattr(base_model, constructor)(train_dset, args.num_hid).cuda()
if dataset=='cpv1':
model.w_emb.init_embedding('data/glove6b_init_300d_v1.npy')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
model.w_emb.init_embedding('data/glove6b_init_300d.npy')
# Add the loss_fn based our arguments
if args.debias == "bias_product":
model.debias_loss_fn = BiasProduct()
elif args.debias == "none":
model.debias_loss_fn = Plain()
elif args.debias == "reweight":
model.debias_loss_fn = ReweightByInvBias()
elif args.debias == "learned_mixin":
model.debias_loss_fn = LearnedMixin(args.entropy_penalty)
elif args.debias == "focal":
model.debias_loss_fn = Focal()
else:
raise RuntimeError(args.mode)
with open('util/qid2type_%s.json'%args.dataset,'r') as f:
qid2type=json.load(f)
model=model.cuda()
batch_size = args.batch_size
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
train_loader = DataLoader(train_dset, batch_size, shuffle=True, num_workers=0)
eval_loader = DataLoader(eval_dset, batch_size, shuffle=False, num_workers=0)
print("Starting training...")
train(model, train_loader, eval_loader, args,qid2type)
if __name__ == '__main__':
main()
| 6,824 | 34.732984 | 119 | py |
introd | introd-main/css/vqa_debias_loss_functions.py | from collections import OrderedDict, defaultdict, Counter
from torch import nn
from torch.nn import functional as F
import numpy as np
import torch
import inspect
def convert_sigmoid_logits_to_binary_logprobs(logits):
"""computes log(sigmoid(logits)), log(1-sigmoid(logits))"""
log_prob = -F.softplus(-logits)
log_one_minus_prob = -logits + log_prob
return log_prob, log_one_minus_prob
def elementwise_logsumexp(a, b):
"""computes log(exp(x) + exp(b))"""
return torch.max(a, b) + torch.log1p(torch.exp(-torch.abs(a - b)))
def renormalize_binary_logits(a, b):
"""Normalize so exp(a) + exp(b) == 1"""
norm = elementwise_logsumexp(a, b)
return a - norm, b - norm
class DebiasLossFn(nn.Module):
"""General API for our loss functions"""
def forward(self, hidden, logits, bias, labels):
"""
:param hidden: [batch, n_hidden] hidden features from the last layer in the model
:param logits: [batch, n_answers_options] sigmoid logits for each answer option
:param bias: [batch, n_answers_options]
bias probabilities for each answer option between 0 and 1
:param labels: [batch, n_answers_options]
scores for each answer option, between 0 and 1
:return: Scalar loss
"""
raise NotImplementedError()
def to_json(self):
"""Get a json representation of this loss function.
We construct this by looking up the __init__ args
"""
cls = self.__class__
init = cls.__init__
if init is object.__init__:
return [] # No init args
init_signature = inspect.getargspec(init)
if init_signature.varargs is not None:
raise NotImplementedError("varags not supported")
if init_signature.keywords is not None:
raise NotImplementedError("keywords not supported")
args = [x for x in init_signature.args if x != "self"]
out = OrderedDict()
out["name"] = cls.__name__
for key in args:
out[key] = getattr(self, key)
return out
class Plain(DebiasLossFn):
def forward(self, hidden, logits, bias, labels):
loss = F.binary_cross_entropy_with_logits(logits, labels)
loss *= labels.size(1)
return loss
class PlainKD(DebiasLossFn):
def forward(self, hidden, logits, bias, labels):
loss = F.binary_cross_entropy_with_logits(logits, labels, reduction='none')
# loss *= labels.size(1)
loss = loss.sum(1)
# return loss
return None, loss
class Focal(DebiasLossFn):
def forward(self, hidden, logits, bias, labels):
# import pdb;pdb.set_trace()
focal_logits=torch.log(F.softmax(logits,dim=1)+1e-5) * ((1-F.softmax(bias,dim=1))*(1-F.softmax(bias,dim=1)))
loss=F.binary_cross_entropy_with_logits(focal_logits,labels)
loss*=labels.size(1)
return loss
class ReweightByInvBias(DebiasLossFn):
def forward(self, hidden, logits, bias, labels):
# Manually compute the binary cross entropy since the old version of torch always aggregates
log_prob, log_one_minus_prob = convert_sigmoid_logits_to_binary_logprobs(logits)
loss = -(log_prob * labels + (1 - labels) * log_one_minus_prob)
weights = (1 - bias)
loss *= weights # Apply the weights
return loss.sum() / weights.sum()
class BiasProduct(DebiasLossFn):
def __init__(self, smooth=True, smooth_init=-1, constant_smooth=0.0):
"""
:param smooth: Add a learned sigmoid(a) factor to the bias to smooth it
:param smooth_init: How to initialize `a`
:param constant_smooth: Constant to add to the bias to smooth it
"""
super(BiasProduct, self).__init__()
self.constant_smooth = constant_smooth
self.smooth_init = smooth_init
self.smooth = smooth
if smooth:
self.smooth_param = torch.nn.Parameter(
torch.from_numpy(np.full((1,), smooth_init, dtype=np.float32)))
else:
self.smooth_param = None
def forward(self, hidden, logits, bias, labels):
smooth = self.constant_smooth
if self.smooth:
smooth += F.sigmoid(self.smooth_param)
# Convert the bias into log-space, with a factor for both the
# binary outputs for each answer option
bias_lp = torch.log(bias + smooth)
bias_l_inv = torch.log1p(-bias + smooth)
# Convert the the logits into log-space with the same format
log_prob, log_one_minus_prob = convert_sigmoid_logits_to_binary_logprobs(logits)
# import pdb;pdb.set_trace()
# Add the bias
log_prob += bias_lp
log_one_minus_prob += bias_l_inv
# Re-normalize the factors in logspace
log_prob, log_one_minus_prob = renormalize_binary_logits(log_prob, log_one_minus_prob)
# Compute the binary cross entropy
loss = -(log_prob * labels + (1 - labels) * log_one_minus_prob).sum(1).mean(0)
return loss
class LearnedMixin(DebiasLossFn):
def __init__(self, w, smooth=True, smooth_init=-1, constant_smooth=0.0):
"""
:param w: Weight of the entropy penalty
:param smooth: Add a learned sigmoid(a) factor to the bias to smooth it
:param smooth_init: How to initialize `a`
:param constant_smooth: Constant to add to the bias to smooth it
"""
super(LearnedMixin, self).__init__()
self.w = w
# self.w=0
self.smooth_init = smooth_init
self.constant_smooth = constant_smooth
self.bias_lin = torch.nn.Linear(1024, 1)
self.smooth = smooth
if self.smooth:
self.smooth_param = torch.nn.Parameter(
torch.from_numpy(np.full((1,), smooth_init, dtype=np.float32)))
else:
self.smooth_param = None
def forward(self, hidden, logits, bias, labels):
factor = self.bias_lin.forward(hidden) # [batch, 1]
factor = F.softplus(factor)
bias = torch.stack([bias, 1 - bias], 2) # [batch, n_answers, 2]
# Smooth
bias += self.constant_smooth
if self.smooth:
soften_factor = F.sigmoid(self.smooth_param)
bias = bias + soften_factor.unsqueeze(1)
bias = torch.log(bias) # Convert to logspace
# Scale by the factor
# [batch, n_answers, 2] * [batch, 1, 1] -> [batch, n_answers, 2]
bias = bias * factor.unsqueeze(1)
log_prob, log_one_minus_prob = convert_sigmoid_logits_to_binary_logprobs(logits)
log_probs = torch.stack([log_prob, log_one_minus_prob], 2)
# Add the bias in
logits = bias + log_probs
# Renormalize to get log probabilities
log_prob, log_one_minus_prob = renormalize_binary_logits(logits[:, :, 0], logits[:, :, 1])
# Compute loss
loss = -(log_prob * labels + (1 - labels) * log_one_minus_prob).sum(1).mean(0)
# Re-normalized version of the bias
bias_norm = elementwise_logsumexp(bias[:, :, 0], bias[:, :, 1])
bias_logprob = bias - bias_norm.unsqueeze(2)
# Compute and add the entropy penalty
entropy = -(torch.exp(bias_logprob) * bias_logprob).sum(2).mean()
return loss + self.w * entropy
class LearnedMixinKD(DebiasLossFn):
def __init__(self, smooth=True, smooth_init=-1, constant_smooth=0.0):
"""
:param w: Weight of the entropy penalty
:param smooth: Add a learned sigmoid(a) factor to the bias to smooth it
:param smooth_init: How to initialize `a`
:param constant_smooth: Constant to add to the bias to smooth it
"""
super(LearnedMixinKD, self).__init__()
self.smooth_init = smooth_init
self.constant_smooth = constant_smooth
self.bias_lin = torch.nn.Linear(1024, 1)
self.smooth = smooth
if self.smooth:
self.smooth_param = torch.nn.Parameter(
torch.from_numpy(np.full((1,), smooth_init, dtype=np.float32)))
else:
self.smooth_param = None
def forward(self, hidden, logits, bias, labels):
factor = self.bias_lin.forward(hidden) # [batch, 1]
factor = F.softplus(factor)
bias = torch.stack([bias, 1 - bias], 2) # [batch, n_answers, 2]
# Smooth
bias += self.constant_smooth
if self.smooth:
soften_factor = F.sigmoid(self.smooth_param)
bias = bias + soften_factor.unsqueeze(1)
bias = torch.log(bias) # Convert to logspace
# Scale by the factor
# [batch, n_answers, 2] * [batch, 1, 1] -> [batch, n_answers, 2]
bias = bias * factor.unsqueeze(1)
log_prob, log_one_minus_prob = convert_sigmoid_logits_to_binary_logprobs(logits)
log_probs = torch.stack([log_prob, log_one_minus_prob], 2)
# Add the bias in
logits = bias + log_probs
# Renormalize to get log probabilities
log_prob, log_one_minus_prob = renormalize_binary_logits(logits[:, :, 0], logits[:, :, 1])
# Compute loss
loss = -(log_prob * labels + (1 - labels) * log_one_minus_prob).sum(1).mean(0)
# Re-normalized version of the bias
bias_norm = elementwise_logsumexp(bias[:, :, 0], bias[:, :, 1])
bias_logprob = bias - bias_norm.unsqueeze(2)
prob_all = torch.exp(log_prob)
p = torch.clamp(1-prob_all, min=1e-12)
p = torch.clamp(prob_all/p, min=1e-12)
logits_all = torch.log(p)
return logits_all, loss | 9,581 | 35.022556 | 116 | py |
introd | introd-main/css/base_model.py | import torch
import torch.nn as nn
from attention import Attention, NewAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
import numpy as np
def mask_softmax(x,mask):
mask=mask.unsqueeze(2).float()
x2=torch.exp(x-torch.max(x))
x3=x2*mask
epsilon=1e-5
x3_sum=torch.sum(x3,dim=1,keepdim=True)+epsilon
x4=x3/x3_sum.expand_as(x3)
return x4
class BaseModel(nn.Module):
def __init__(self, w_emb, q_emb, v_att, q_net, v_net, classifier):
super(BaseModel, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
self.debias_loss_fn = None
# self.bias_scale = torch.nn.Parameter(torch.from_numpy(np.ones((1, ), dtype=np.float32)*1.2))
self.bias_lin = torch.nn.Linear(1024, 1)
def forward(self, v, q, labels, bias,v_mask):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb) # [batch, q_dim]
att = self.v_att(v, q_emb)
if v_mask is None:
att = nn.functional.softmax(att, 1)
else:
att= mask_softmax(att,v_mask)
v_emb = (att * v).sum(1) # [batch, v_dim]
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classifier(joint_repr)
if labels is not None:
loss = self.debias_loss_fn(joint_repr, logits, bias, labels)
else:
loss = None
return logits, loss,w_emb
def build_baseline0(dataset, num_hid):
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0)
v_att = Attention(dataset.v_dim, q_emb.num_hid, num_hid)
q_net = FCNet([num_hid, num_hid])
v_net = FCNet([dataset.v_dim, num_hid])
classifier = SimpleClassifier(
num_hid, 2 * num_hid, dataset.num_ans_candidates, 0.5)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier)
def build_baseline0_newatt(dataset, num_hid):
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0)
v_att = NewAttention(dataset.v_dim, q_emb.num_hid, num_hid)
q_net = FCNet([q_emb.num_hid, num_hid])
v_net = FCNet([dataset.v_dim, num_hid])
classifier = SimpleClassifier(
num_hid, num_hid * 2, dataset.num_ans_candidates, 0.5)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier) | 2,765 | 32.325301 | 102 | py |
introd | introd-main/css/base_model_introd.py | import torch
import torch.nn as nn
from attention import Attention, NewAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
import numpy as np
def mask_softmax(x,mask):
mask=mask.unsqueeze(2).float()
x2=torch.exp(x-torch.max(x))
x3=x2*mask
epsilon=1e-5
x3_sum=torch.sum(x3,dim=1,keepdim=True)+epsilon
x4=x3/x3_sum.expand_as(x3)
return x4
class BaseModel(nn.Module):
def __init__(self, w_emb, q_emb, v_att, q_net, v_net, classifier):
super(BaseModel, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
self.debias_loss_fn = None
# self.bias_scale = torch.nn.Parameter(torch.from_numpy(np.ones((1, ), dtype=np.float32)*1.2))
self.bias_lin = torch.nn.Linear(1024, 1)
def forward(self, v, q, labels, bias,v_mask):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb) # [batch, q_dim]
att = self.v_att(v, q_emb)
if v_mask is None:
att = nn.functional.softmax(att, 1)
else:
att= mask_softmax(att,v_mask)
v_emb = (att * v).sum(1) # [batch, v_dim]
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classifier(joint_repr)
if labels is not None:
logits_all, loss = self.debias_loss_fn(joint_repr, logits, bias, labels)
else:
logits_all = None
loss = None
return logits, logits_all, loss, w_emb
def build_baseline0(dataset, num_hid):
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0)
v_att = Attention(dataset.v_dim, q_emb.num_hid, num_hid)
q_net = FCNet([num_hid, num_hid])
v_net = FCNet([dataset.v_dim, num_hid])
classifier = SimpleClassifier(
num_hid, 2 * num_hid, dataset.num_ans_candidates, 0.5)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier)
def build_baseline0_newatt(dataset, num_hid):
w_emb = WordEmbedding(dataset.dictionary.ntoken, 300, 0.0)
q_emb = QuestionEmbedding(300, num_hid, 1, False, 0.0)
v_att = NewAttention(dataset.v_dim, q_emb.num_hid, num_hid)
q_net = FCNet([q_emb.num_hid, num_hid])
v_net = FCNet([dataset.v_dim, num_hid])
classifier = SimpleClassifier(
num_hid, num_hid * 2, dataset.num_ans_candidates, 0.5)
return BaseModel(w_emb, q_emb, v_att, q_net, v_net, classifier) | 2,820 | 32.583333 | 102 | py |
introd | introd-main/css/train_introd.py | import json
import os
import pickle
import time
from os.path import join
import torch
import torch.nn as nn
import utils
from torch.autograd import Variable
import numpy as np
from tqdm import tqdm
import random
import copy
from torch.nn import functional as F
def compute_score_with_logits(logits, labels):
logits = torch.argmax(logits, 1)
one_hots = torch.zeros(*labels.size()).cuda()
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def train(model_teacher, model, train_loader, eval_loader,args,qid2type):
dataset=args.dataset
num_epochs=args.epochs
mode=args.mode
run_eval=args.eval_each_epoch
output=args.output
optim = torch.optim.Adamax(model.parameters())
logger = utils.Logger(os.path.join(output, 'log.txt'))
total_step = 0
best_eval_score = 0
logsigmoid = torch.nn.LogSigmoid()
KLDivLoss = torch.nn.KLDivLoss(reduction='none')
for epoch in range(num_epochs):
total_loss = 0
train_score = 0
t = time.time()
for i, (v, q, a, b, _, _, _, _) in tqdm(enumerate(train_loader), ncols=100,
desc="Epoch %d" % (epoch + 1), total=len(train_loader)):
total_step += 1
#########################################
v = Variable(v).cuda().requires_grad_()
q = Variable(q).cuda()
# q_mask=Variable(q_mask).cuda()
a = Variable(a).cuda()
b = Variable(b).cuda()
# hintscore = Variable(hintscore).cuda()
# type_mask=Variable(type_mask).float().cuda()
# notype_mask=Variable(notype_mask).float().cuda()
#########################################
pred_nie, pred_te, _, _ = model_teacher(v, q, a, b, None)
pred, _, loss_ce, _ = model(v, q, a, b, None)
aa = a/torch.clamp(a.sum(1, keepdim=True), min=1e-24)
loss_te = -(aa*logsigmoid(pred_te) + (1-aa)*logsigmoid(-pred_te)).sum(1)
loss_nie = -(aa*logsigmoid(pred_nie) + (1-aa)*logsigmoid(-pred_nie)).sum(1)
loss_te = torch.clamp(loss_te, min=1e-12)
loss_nie = torch.clamp(loss_nie, min=1e-12)
w = loss_nie/(loss_te+loss_nie)
w = w.clone().detach()
# KL
prob_nie = F.softmax(pred_nie, -1).clone().detach()
loss_kl = - prob_nie*F.log_softmax(pred, -1)
loss_kl = loss_kl.sum(1)
loss = (w*loss_kl + (1-w)*loss_ce).mean()
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
total_loss /= len(train_loader.dataset)
train_score = 100 * train_score / len(train_loader.dataset)
if run_eval:
model.train(False)
results = evaluate(model, eval_loader, qid2type)
results["epoch"] = epoch + 1
results["step"] = total_step
results["train_loss"] = total_loss
results["train_score"] = train_score
model.train(True)
eval_score = results["score"]
bound = results["upper_bound"]
yn = results['score_yesno']
other = results['score_other']
num = results['score_number']
logger.write('epoch %d, time: %.2f' % (epoch + 1, time.time() - t))
logger.write('\ttrain_loss: %.2f, score: %.2f' % (total_loss, train_score))
if run_eval:
logger.write('\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))
logger.write('\tyn score: %.2f other score: %.2f num score: %.2f' % (100 * yn, 100 * other, 100 * num))
if eval_score > best_eval_score:
model_path = os.path.join(output, 'model.pth')
torch.save(model.state_dict(), model_path)
best_eval_score = eval_score
def evaluate(model, dataloader, qid2type):
score = 0
upper_bound = 0
score_yesno = 0
score_number = 0
score_other = 0
total_yesno = 0
total_number = 0
total_other = 0
for v, q, a, b, qids, _ in tqdm(dataloader, ncols=100, total=len(dataloader), desc="eval"):
v = Variable(v, requires_grad=False).cuda()
q = Variable(q, requires_grad=False).cuda()
pred, _, _, _ = model(v, q, None, None, None)
batch_score = compute_score_with_logits(pred, a.cuda()).cpu().numpy().sum(1)
score += batch_score.sum()
upper_bound += (a.max(1)[0]).sum()
qids = qids.detach().cpu().int().numpy()
for j in range(len(qids)):
qid = qids[j]
typ = qid2type[str(qid)]
if typ == 'yes/no':
score_yesno += batch_score[j]
total_yesno += 1
elif typ == 'other':
score_other += batch_score[j]
total_other += 1
elif typ == 'number':
score_number += batch_score[j]
total_number += 1
else:
print('Hahahahahahahahahahaha')
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
score_yesno /= total_yesno
score_other /= total_other
score_number /= total_number
results = dict(
score=score,
upper_bound=upper_bound,
score_yesno=score_yesno,
score_other=score_other,
score_number=score_number,
)
return results
| 5,773 | 32.569767 | 115 | py |
introd | introd-main/css/main_introd.py | import argparse
import json
import cPickle as pickle
from collections import defaultdict, Counter
from os.path import dirname, join
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
from dataset import Dictionary, VQAFeatureDataset
import base_model_introd as base_model
from train_introd import train
import utils
import click
from vqa_debias_loss_functions import *
def parse_args():
parser = argparse.ArgumentParser("Train the BottomUpTopDown model with a de-biasing method")
# Arguments we added
parser.add_argument(
'--cache_features', default=True,
help="Cache image features in RAM. Makes things much faster, "
"especially if the filesystem is slow, but requires at least 48gb of RAM")
parser.add_argument(
'--dataset', default='cpv2',
choices=["v2", "cpv2", "cpv1", "cpv2val"],
help="Run on VQA-2.0 instead of VQA-CP 2.0"
)
parser.add_argument(
'-p', "--entropy_penalty", default=0.36, type=float,
help="Entropy regularizer weight for the learned_mixin model")
parser.add_argument(
'--mode', default="updn",
choices=["updn", "q_debias","v_debias","q_v_debias"],
help="Kind of ensemble loss to use")
parser.add_argument(
'--debias', default="learned_mixin",
choices=["learned_mixin_rw2", "learned_mixin_rw", "learned_mixin", "reweight", "bias_product", "none",'focal'],
help="Kind of ensemble loss to use")
parser.add_argument(
'--topq', type=int,default=1,
choices=[1,2,3],
help="num of words to be masked in questio")
parser.add_argument(
'--keep_qtype', default=True,
help="keep qtype or not")
parser.add_argument(
'--topv', type=int,default=1,
choices=[1,3,5,-1],
help="num of object bbox to be masked in image")
parser.add_argument(
'--top_hint',type=int, default=9,
choices=[9,18,27,36],
help="num of hint")
parser.add_argument(
'--qvp', type=int,default=0,
choices=[0,1,2,3,4,5,6,7,8,9,10],
help="ratio of q_bias and v_bias")
parser.add_argument(
'--eval_each_epoch', default=True,
help="Evaluate every epoch, instead of at the end")
# Arguments from the original model, we leave this default, except we
# set --epochs to 30 since the model maxes out its performance on VQA 2.0 well before then
parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--num_hid', type=int, default=1024)
parser.add_argument('--model', type=str, default='baseline0_newatt')
parser.add_argument('--source', type=str, default='./logs/vqacp2/css/')
parser.add_argument('--output', type=str, default='logs/exp0')
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--seed', type=int, default=1111, help='random seed')
args = parser.parse_args()
return args
def get_bias(train_dset,eval_dset):
# Compute the bias:
# The bias here is just the expected score for each answer/question type
answer_voc_size = train_dset.num_ans_candidates
# question_type -> answer -> total score
question_type_to_probs = defaultdict(Counter)
# question_type -> num_occurances
question_type_to_count = Counter()
for ex in train_dset.entries:
ans = ex["answer"]
q_type = ans["question_type"]
question_type_to_count[q_type] += 1
if ans["labels"] is not None:
for label, score in zip(ans["labels"], ans["scores"]):
question_type_to_probs[q_type][label] += score
question_type_to_prob_array = {}
for q_type, count in question_type_to_count.items():
prob_array = np.zeros(answer_voc_size, np.float32)
for label, total_score in question_type_to_probs[q_type].items():
prob_array[label] += total_score
prob_array /= count
question_type_to_prob_array[q_type] = prob_array
for ds in [train_dset,eval_dset]:
for ex in ds.entries:
q_type = ex["answer"]["question_type"]
ex["bias"] = question_type_to_prob_array[q_type]
def main():
args = parse_args()
dataset=args.dataset
# args.output=os.path.join('logs',args.output)
if not os.path.isdir(args.output):
utils.create_dir(args.output)
else:
if click.confirm('Exp directory already exists in {}. Erase?'
.format(args.output, default=False)):
os.system('rm -r ' + args.output)
utils.create_dir(args.output)
else:
os._exit(1)
if dataset=='cpv1':
dictionary = Dictionary.load_from_file('data/dictionary_v1.pkl')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
dictionary = Dictionary.load_from_file('data/dictionary.pkl')
print("Building train dataset...")
train_dset = VQAFeatureDataset('train', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
print("Building test dataset...")
eval_dset = VQAFeatureDataset('val', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
get_bias(train_dset,eval_dset)
# Build the model using the original constructor
constructor = 'build_%s' % args.model
model = getattr(base_model, constructor)(train_dset, args.num_hid).cuda()
if dataset=='cpv1':
model.w_emb.init_embedding('data/glove6b_init_300d_v1.npy')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
model.w_emb.init_embedding('data/glove6b_init_300d.npy')
model_student = getattr(base_model, constructor)(train_dset, args.num_hid).cuda()
if dataset=='cpv1':
model_student.w_emb.init_embedding('data/glove6b_init_300d_v1.npy')
elif dataset=='cpv2' or dataset=='v2' or dataset=='cpv2val':
model_student.w_emb.init_embedding('data/glove6b_init_300d.npy')
state_dict = torch.load(join(args.source, "model.pth"))
model.debias_loss_fn = LearnedMixinKD()
model.load_state_dict(state_dict, strict=False)
model_student.debias_loss_fn = PlainKD()
model.train(False)
with open('util/qid2type_%s.json'%args.dataset,'r') as f:
qid2type=json.load(f)
model=model.cuda()
model_student=model_student.cuda()
batch_size = args.batch_size
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
train_loader = DataLoader(train_dset, batch_size, shuffle=True, num_workers=0)
eval_loader = DataLoader(eval_dset, batch_size, shuffle=False, num_workers=0)
print("Starting training...")
train(model, model_student, train_loader, eval_loader, args,qid2type)
if __name__ == '__main__':
main()
| 6,902 | 35.718085 | 119 | py |
introd | introd-main/css/utils.py | from __future__ import print_function
import errno
import os
import numpy as np
# from PIL import Image
import torch
import torch.nn as nn
EPS = 1e-7
def assert_eq(real, expected):
# assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
assert real == real, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real-expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
# def load_imageid(folder):
# images = load_folder(folder, 'jpg')
# img_ids = set()
# for img in images:
# img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
# img_ids.add(img_id)
# return img_ids
# def pil_loader(path):
# with open(path, 'rb') as f:
# with Image.open(f) as img:
# return img.convert('RGB')
def weights_init(m):
"""custom weights initialization."""
cname = m.__class__
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
elif cname == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print('%s is not initialized.' % cname)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for key, vals in self.infos.iteritems():
msgs.append('%s %.6f' % (key, np.mean(vals)))
msg = '\n'.join(msgs)
self.log_file.write(msg + '\n')
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write(msg + '\n')
self.log_file.flush()
print(msg)
| 2,535 | 23.862745 | 79 | py |
introd | introd-main/css/classifier.py | import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
class SimpleClassifier(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim, dropout):
super(SimpleClassifier, self).__init__()
layers = [
weight_norm(nn.Linear(in_dim, hid_dim), dim=None),
nn.ReLU(),
nn.Dropout(dropout, inplace=True),
weight_norm(nn.Linear(hid_dim, out_dim), dim=None)
]
self.main = nn.Sequential(*layers)
def forward(self, x):
logits = self.main(x)
return logits
| 565 | 28.789474 | 62 | py |
introd | introd-main/css/dataset.py | from __future__ import print_function
from __future__ import unicode_literals
import os
import json
import cPickle
from collections import Counter
import numpy as np
import utils
import h5py
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from random import choice
class Dictionary(object):
def __init__(self, word2idx=None, idx2word=None):
if word2idx is None:
word2idx = {}
if idx2word is None:
idx2word = []
self.word2idx = word2idx
self.idx2word = idx2word
@property
def ntoken(self):
return len(self.word2idx)
@property
def padding_idx(self):
return len(self.word2idx)
def tokenize(self, sentence, add_word):
sentence = sentence.lower()
sentence = sentence.replace(',', '').replace('?', '').replace('\'s', ' \'s').replace('-',
' ').replace('.','').replace('"', '').replace('n\'t', ' not').replace('$', ' dollar ')
words = sentence.split()
tokens = []
if add_word:
for w in words:
tokens.append(self.add_word(w))
else:
for w in words:
if w in self.word2idx:
tokens.append(self.word2idx[w])
else:
tokens.append(len(self.word2idx))
return tokens
def dump_to_file(self, path):
cPickle.dump([self.word2idx, self.idx2word], open(path, 'wb'))
print('dictionary dumped to %s' % path)
@classmethod
def load_from_file(cls, path):
print('loading dictionary from %s' % path)
word2idx, idx2word = cPickle.load(open(path, 'rb'))
d = cls(word2idx, idx2word)
return d
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
def _create_entry(img_idx, question, answer):
answer.pop('image_id')
answer.pop('question_id')
entry = {
'question_id' : question['question_id'],
'image_id' : question['image_id'],
'image_idx' : img_idx,
'question' : question['question'],
'answer' : answer
}
return entry
def _load_dataset(dataroot, name, img_id2val, dataset):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
if dataset=='cpv2':
answer_path = os.path.join(dataroot, 'cp-cache', '%s_target.pkl' % name)
name = "train" if name == "train" else "test"
question_path = os.path.join(dataroot, 'vqacp_v2_%s_questions.json' % name)
with open(question_path) as f:
questions = json.load(f)
elif dataset=='cpv1':
answer_path = os.path.join(dataroot, 'cp-v1-cache', '%s_target.pkl' % name)
name = "train" if name == "train" else "test"
question_path = os.path.join(dataroot, 'vqacp_v1_%s_questions.json' % name)
with open(question_path) as f:
questions = json.load(f)
elif dataset=='v2':
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
question_path = os.path.join(dataroot, 'v2_OpenEnded_mscoco_%s2014_questions.json' % name)
with open(question_path) as f:
questions = json.load(f)["questions"]
elif dataset=='cpv2val':
answer_path = os.path.join(dataroot, 'cpval-cache', '%s_target.pkl' % name)
name = "train" if name == "train" else "test"
question_path = os.path.join(dataroot, 'vqacp2val', 'vqacp_v2_%s_questions.json' % name)
with open(question_path) as f:
questions = json.load(f)
with open(answer_path, 'rb') as f:
answers = cPickle.load(f)
questions.sort(key=lambda x: x['question_id'])
answers.sort(key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
if answer["labels"] is None:
raise ValueError()
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
img_idx = None
if img_id2val:
img_idx = img_id2val[img_id]
entries.append(_create_entry(img_idx, question, answer))
return entries
class VQAFeatureDataset(Dataset):
def __init__(self, name, dictionary, dataroot='data', dataset='cpv2',
use_hdf5=False, cache_image_features=False):
super(VQAFeatureDataset, self).__init__()
self.name=name
if dataset=='cpv2':
with open('data/train_cpv2_hintscore.json', 'r') as f:
self.train_hintscore = json.load(f)
with open('data/test_cpv2_hintscore.json', 'r') as f:
self.test_hintsocre = json.load(f)
with open('util/cpv2_type_mask.json', 'r') as f:
self.type_mask = json.load(f)
with open('util/cpv2_notype_mask.json', 'r') as f:
self.notype_mask = json.load(f)
elif dataset=='cpv1':
with open('data/train_cpv1_hintscore.json', 'r') as f:
self.train_hintscore = json.load(f)
with open('data/test_cpv1_hintscore.json', 'r') as f:
self.test_hintsocre = json.load(f)
with open('util/cpv1_type_mask.json', 'r') as f:
self.type_mask = json.load(f)
with open('util/cpv1_notype_mask.json', 'r') as f:
self.notype_mask = json.load(f)
elif dataset=='v2':
with open('data/train_v2_hintscore.json', 'r') as f:
self.train_hintscore = json.load(f)
with open('data/test_v2_hintscore.json', 'r') as f:
self.test_hintsocre = json.load(f)
with open('util/v2_type_mask.json', 'r') as f:
self.type_mask = json.load(f)
with open('util/v2_notype_mask.json', 'r') as f:
self.notype_mask = json.load(f)
elif dataset=='cpv2val':
with open('data/train_cpv2_hintscore.json', 'r') as f:
self.train_hintscore = json.load(f)
with open('data/train_cpv2_hintscore.json', 'r') as f:
self.test_hintsocre = json.load(f)
with open('util/cpv2_type_mask.json', 'r') as f:
self.type_mask = json.load(f)
with open('util/cpv2_notype_mask.json', 'r') as f:
self.notype_mask = json.load(f)
assert name in ['train', 'val']
if dataset=='cpv2':
ans2label_path = os.path.join(dataroot, 'cp-cache', 'trainval_ans2label.pkl')
label2ans_path = os.path.join(dataroot, 'cp-cache', 'trainval_label2ans.pkl')
elif dataset=='cpv1':
ans2label_path = os.path.join(dataroot, 'cp-v1-cache', 'trainval_ans2label.pkl')
label2ans_path = os.path.join(dataroot, 'cp-v1-cache', 'trainval_label2ans.pkl')
elif dataset=='v2':
ans2label_path = os.path.join(dataroot, 'cache', 'trainval_ans2label.pkl')
label2ans_path = os.path.join(dataroot, 'cache', 'trainval_label2ans.pkl')
elif dataset=='cpv2val':
ans2label_path = os.path.join(dataroot, 'cpval-cache', 'trainval_ans2label.pkl')
label2ans_path = os.path.join(dataroot, 'cpval-cache', 'trainval_label2ans.pkl')
self.ans2label = cPickle.load(open(ans2label_path, 'rb'))
self.label2ans = cPickle.load(open(label2ans_path, 'rb'))
self.num_ans_candidates = len(self.ans2label)
self.dictionary = dictionary
self.use_hdf5 = use_hdf5
if use_hdf5:
h5_path = os.path.join(dataroot, '%s36.hdf5'%name)
self.hf = h5py.File(h5_path, 'r')
self.features = self.hf.get('image_features')
with open("util/%s36_imgid2img.pkl"%name, "rb") as f:
imgid2idx = cPickle.load(f)
else:
imgid2idx = None
self.entries = _load_dataset(dataroot, name, imgid2idx, dataset=dataset)
if cache_image_features:
image_to_fe = {}
for entry in tqdm(self.entries, ncols=100, desc="caching-features"):
img_id = entry["image_id"]
if img_id not in image_to_fe:
if use_hdf5:
fe = np.array(self.features[imgid2idx[img_id]])
else:
fe=torch.load('data/css_features/'+str(img_id)+'.pth')['image_feature']
image_to_fe[img_id]=fe
self.image_to_fe = image_to_fe
if use_hdf5:
self.hf.close()
else:
self.image_to_fe = None
self.tokenize()
self.tensorize()
self.v_dim = 2048
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in tqdm(self.entries, ncols=100, desc="tokenize"):
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
padding_mask=[self.dictionary.padding_idx-1] * (max_length - len(tokens))
tokens_mask = padding_mask + tokens
tokens = padding + tokens
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens
entry['q_token_mask']=tokens_mask
def tensorize(self):
for entry in tqdm(self.entries, ncols=100, desc="tensorize"):
question = torch.from_numpy(np.array(entry['q_token']))
question_mask = torch.from_numpy(np.array(entry['q_token_mask']))
entry['q_token'] = question
entry['q_token_mask']=question_mask
answer = entry['answer']
labels = np.array(answer['labels'])
scores = np.array(answer['scores'], dtype=np.float32)
if len(labels):
labels = torch.from_numpy(labels)
scores = torch.from_numpy(scores)
entry['answer']['labels'] = labels
entry['answer']['scores'] = scores
else:
entry['answer']['labels'] = None
entry['answer']['scores'] = None
def __getitem__(self, index):
entry = self.entries[index]
if self.image_to_fe is not None:
features = self.image_to_fe[entry["image_id"]]
elif self.use_hdf5:
features = np.array(self.features[entry['image_idx']])
features = torch.from_numpy(features).view(36, 2048)
else:
features = torch.load('data/css_features/' + str(entry["image_id"]) + '.pth')['image_feature']
q_id=entry['question_id']
ques = entry['q_token']
ques_mask=entry['q_token_mask']
answer = entry['answer']
labels = answer['labels']
scores = answer['scores']
target = torch.zeros(self.num_ans_candidates)
if labels is not None:
target.scatter_(0, labels, scores)
if self.name=='train':
train_hint=torch.tensor(self.train_hintscore[str(q_id)])
type_mask=torch.tensor(self.type_mask[str(q_id)])
notype_mask=torch.tensor(self.notype_mask[str(q_id)])
if "bias" in entry:
return features, ques, target,entry["bias"],train_hint,type_mask,notype_mask,ques_mask
else:
return features, ques,target, 0,train_hint
else:
test_hint=torch.tensor(self.test_hintsocre[str(q_id)])
if "bias" in entry:
return features, ques, target, entry["bias"],q_id,test_hint
else:
return features, ques, target, 0,q_id,test_hint
def __len__(self):
return len(self.entries)
| 12,287 | 38.009524 | 106 | py |
introd | introd-main/css/eval.py | import argparse
import json
import cPickle
from collections import defaultdict, Counter
from os.path import dirname, join
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import os
# from new_dataset import Dictionary, VQAFeatureDataset
from dataset import Dictionary, VQAFeatureDataset
import base_model
from train import train
import utils
from vqa_debias_loss_functions import *
from tqdm import tqdm
from torch.autograd import Variable
def parse_args():
parser = argparse.ArgumentParser("Train the BottomUpTopDown model with a de-biasing method")
# Arguments we added
parser.add_argument(
'--cache_features', default=True,
help="Cache image features in RAM. Makes things much faster, "
"especially if the filesystem is slow, but requires at least 48gb of RAM")
parser.add_argument(
'--dataset', default='cpv2', help="Run on VQA-2.0 instead of VQA-CP 2.0")
parser.add_argument(
'-p', "--entropy_penalty", default=0.36, type=float,
help="Entropy regularizer weight for the learned_mixin model")
parser.add_argument(
'--debias', default="learned_mixin",
choices=["learned_mixin", "reweight", "bias_product", "none"],
help="Kind of ensemble loss to use")
# Arguments from the original model, we leave this default, except we
# set --epochs to 15 since the model maxes out its performance on VQA 2.0 well before then
parser.add_argument('--num_hid', type=int, default=1024)
parser.add_argument('--model', type=str, default='baseline0_newatt')
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--model_state', type=str, default='logs/exp0/model.pth')
args = parser.parse_args()
return args
def compute_score_with_logits(logits, labels):
# logits = torch.max(logits, 1)[1].data # argmax
logits = torch.argmax(logits,1)
one_hots = torch.zeros(*labels.size()).cuda()
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def evaluate(model,dataloader,qid2type):
score = 0
upper_bound = 0
score_yesno = 0
score_number = 0
score_other = 0
total_yesno = 0
total_number = 0
total_other = 0
model.train(False)
# import pdb;pdb.set_trace()
for v, q, a, b,qids,hintscore in tqdm(dataloader, ncols=100, total=len(dataloader), desc="eval"):
v = Variable(v, requires_grad=False).cuda()
q = Variable(q, requires_grad=False).cuda()
pred, _ ,_= model(v, q, None, None,None)
batch_score= compute_score_with_logits(pred, a.cuda()).cpu().numpy().sum(1)
score += batch_score.sum()
upper_bound += (a.max(1)[0]).sum()
qids = qids.detach().cpu().int().numpy()
for j in range(len(qids)):
qid=qids[j]
typ = qid2type[str(qid)]
if typ == 'yes/no':
score_yesno += batch_score[j]
total_yesno += 1
elif typ == 'other':
score_other += batch_score[j]
total_other += 1
elif typ == 'number':
score_number += batch_score[j]
total_number += 1
else:
print('Hahahahahahahahahahaha')
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
score_yesno /= total_yesno
score_other /= total_other
score_number /= total_number
print('\teval overall score: %.2f' % (100 * score))
print('\teval up_bound score: %.2f' % (100 * upper_bound))
print('\teval y/n score: %.2f' % (100 * score_yesno))
print('\teval other score: %.2f' % (100 * score_other))
print('\teval number score: %.2f' % (100 * score_number))
def evaluate_ai(model,dataloader,qid2type,label2ans):
score=0
upper_bound=0
ai_top1=0
ai_top2=0
ai_top3=0
for v, q, a, b, qids, hintscore in tqdm(dataloader, ncols=100, total=len(dataloader), desc="eval"):
v = Variable(v, requires_grad=False).cuda().float().requires_grad_()
q = Variable(q, requires_grad=False).cuda()
a=a.cuda()
hintscore=hintscore.cuda().float()
pred, _, _ = model(v, q, None, None, None)
vqa_grad = torch.autograd.grad((pred * (a > 0).float()).sum(), v, create_graph=True)[0] # [b , 36, 2048]
vqa_grad_cam=vqa_grad.sum(2)
sv_ind=torch.argmax(vqa_grad_cam,1)
x_ind_top1=torch.topk(vqa_grad_cam,k=1)[1]
x_ind_top2=torch.topk(vqa_grad_cam,k=2)[1]
x_ind_top3=torch.topk(vqa_grad_cam,k=3)[1]
y_score_top1 = hintscore.gather(1,x_ind_top1).sum(1)/1
y_score_top2 = hintscore.gather(1,x_ind_top2).sum(1)/2
y_score_top3 = hintscore.gather(1,x_ind_top3).sum(1)/3
batch_score=compute_score_with_logits(pred,a.cuda()).cpu().numpy().sum(1)
score+=batch_score.sum()
upper_bound+=(a.max(1)[0]).sum()
qids=qids.detach().cpu().int().numpy()
for j in range(len(qids)):
if batch_score[j]>0:
ai_top1 += y_score_top1[j]
ai_top2 += y_score_top2[j]
ai_top3 += y_score_top3[j]
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
ai_top1=(ai_top1.item() * 1.0) / len(dataloader.dataset)
ai_top2=(ai_top2.item() * 1.0) / len(dataloader.dataset)
ai_top3=(ai_top3.item() * 1.0) / len(dataloader.dataset)
print('\teval overall score: %.2f' % (100 * score))
print('\teval up_bound score: %.2f' % (100 * upper_bound))
print('\ttop1_ai_score: %.2f' % (100 * ai_top1))
print('\ttop2_ai_score: %.2f' % (100 * ai_top2))
print('\ttop3_ai_score: %.2f' % (100 * ai_top3))
def main():
args = parse_args()
dataset = args.dataset
with open('util/qid2type_%s.json'%args.dataset,'r') as f:
qid2type=json.load(f)
if dataset=='cpv1':
dictionary = Dictionary.load_from_file('data/dictionary_v1.pkl')
elif dataset=='cpv2' or dataset=='v2':
dictionary = Dictionary.load_from_file('data/dictionary.pkl')
print("Building test dataset...")
eval_dset = VQAFeatureDataset('val', dictionary, dataset=dataset,
cache_image_features=args.cache_features)
# Build the model using the original constructor
constructor = 'build_%s' % args.model
model = getattr(base_model, constructor)(eval_dset, args.num_hid).cuda()
if args.debias == "bias_product":
model.debias_loss_fn = BiasProduct()
elif args.debias == "none":
model.debias_loss_fn = Plain()
elif args.debias == "reweight":
model.debias_loss_fn = ReweightByInvBias()
elif args.debias == "learned_mixin":
model.debias_loss_fn = LearnedMixin(args.entropy_penalty)
else:
raise RuntimeError(args.mode)
model_state = torch.load(args.model_state)
model.load_state_dict(model_state)
model = model.cuda()
batch_size = args.batch_size
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
# The original version uses multiple workers, but that just seems slower on my setup
eval_loader = DataLoader(eval_dset, batch_size, shuffle=False, num_workers=0)
print("Starting eval...")
evaluate(model,eval_loader,qid2type)
if __name__ == '__main__':
main()
| 7,543 | 34.088372 | 113 | py |
introd | introd-main/css/attention.py | import torch
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
from fc import FCNet
class Attention(nn.Module):
def __init__(self, v_dim, q_dim, num_hid):
super(Attention, self).__init__()
self.nonlinear = FCNet([v_dim + q_dim, num_hid])
self.linear = weight_norm(nn.Linear(num_hid, 1), dim=None)
def forward(self, v, q):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
logits = self.logits(v, q)
w = nn.functional.softmax(logits, 1)
return w
def logits(self, v, q):
num_objs = v.size(1)
q = q.unsqueeze(1).repeat(1, num_objs, 1)
vq = torch.cat((v, q), 2)
joint_repr = self.nonlinear(vq)
logits = self.linear(joint_repr)
return logits
class NewAttention(nn.Module):
def __init__(self, v_dim, q_dim, num_hid, dropout=0.2):
super(NewAttention, self).__init__()
self.v_proj = FCNet([v_dim, num_hid])
self.q_proj = FCNet([q_dim, num_hid])
self.dropout = nn.Dropout(dropout)
self.linear = weight_norm(nn.Linear(q_dim, 1), dim=None)
def forward(self, v, q):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
logits = self.logits(v, q)
# w = nn.functional.softmax(logits, 1)
# return w
return logits
def logits(self, v, q):
batch, k, _ = v.size()
v_proj = self.v_proj(v) # [batch, k, qdim]
q_proj = self.q_proj(q).unsqueeze(1).repeat(1, k, 1)
joint_repr = v_proj * q_proj
joint_repr = self.dropout(joint_repr)
logits = self.linear(joint_repr)
return logits
| 1,686 | 28.086207 | 66 | py |
introd | introd-main/css/train.py | import json
import os
import pickle
import time
from os.path import join
import torch
import torch.nn as nn
import utils
from torch.autograd import Variable
import numpy as np
from tqdm import tqdm
import random
import copy
def compute_score_with_logits(logits, labels):
logits = torch.argmax(logits, 1)
one_hots = torch.zeros(*labels.size()).cuda()
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = (one_hots * labels)
return scores
def train(model, train_loader, eval_loader,args,qid2type):
dataset=args.dataset
num_epochs=args.epochs
mode=args.mode
run_eval=args.eval_each_epoch
output=args.output
optim = torch.optim.Adamax(model.parameters())
logger = utils.Logger(os.path.join(output, 'log.txt'))
total_step = 0
best_eval_score = 0
if mode=='q_debias':
topq=args.topq
keep_qtype=args.keep_qtype
elif mode=='v_debias':
topv=args.topv
top_hint=args.top_hint
elif mode=='q_v_debias':
topv=args.topv
top_hint=args.top_hint
topq=args.topq
keep_qtype=args.keep_qtype
qvp=args.qvp
for epoch in range(num_epochs):
total_loss = 0
train_score = 0
t = time.time()
for i, (v, q, a, b, hintscore,type_mask,notype_mask,q_mask) in tqdm(enumerate(train_loader), ncols=100,
desc="Epoch %d" % (epoch + 1), total=len(train_loader)):
total_step += 1
#########################################
v = Variable(v).cuda().requires_grad_()
q = Variable(q).cuda()
q_mask=Variable(q_mask).cuda()
a = Variable(a).cuda()
b = Variable(b).cuda()
hintscore = Variable(hintscore).cuda()
type_mask=Variable(type_mask).float().cuda()
notype_mask=Variable(notype_mask).float().cuda()
#########################################
if mode=='updn':
pred, loss,_ = model(v, q, a, b, None)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
elif mode=='q_debias':
if keep_qtype==True:
sen_mask=type_mask
else:
sen_mask=notype_mask
## first train
pred, loss,word_emb = model(v, q, a, b, None)
word_grad = torch.autograd.grad((pred * (a > 0).float()).sum(), word_emb, create_graph=True)[0]
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
## second train
word_grad_cam = word_grad.sum(2)
# word_grad_cam_sigmoid = torch.sigmoid(word_grad_cam * 1000)
word_grad_cam_sigmoid = torch.exp(word_grad_cam * sen_mask)
word_grad_cam_sigmoid = word_grad_cam_sigmoid * sen_mask
w_ind = word_grad_cam_sigmoid.sort(1, descending=True)[1][:, :topq]
q2 = copy.deepcopy(q_mask)
m1 = copy.deepcopy(sen_mask) ##[0,0,0...0,1,1,1,1]
m1.scatter_(1, w_ind, 0) ##[0,0,0...0,0,1,1,0]
m2 = 1 - m1 ##[1,1,1...1,1,0,0,1]
if dataset=='cpv1':
m3=m1*18330
else:
m3 = m1 * 18455 ##[0,0,0...0,0,18455,18455,0]
q2 = q2 * m2.long() + m3.long()
pred, _, _ = model(v, q2, None, b, None)
pred_ind = torch.argsort(pred, 1, descending=True)[:, :5]
false_ans = torch.ones(pred.shape[0], pred.shape[1]).cuda()
false_ans.scatter_(1, pred_ind, 0)
a2 = a * false_ans
q3 = copy.deepcopy(q)
if dataset=='cpv1':
q3.scatter_(1, w_ind, 18330)
else:
q3.scatter_(1, w_ind, 18455)
## third train
pred, loss, _ = model(v, q3, a2, b, None)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
elif mode=='v_debias':
## first train
pred, loss, _ = model(v, q, a, b, None)
visual_grad=torch.autograd.grad((pred * (a > 0).float()).sum(), v, create_graph=True)[0]
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
##second train
v_mask = torch.zeros(v.shape[0], 36).cuda()
visual_grad_cam = visual_grad.sum(2)
hint_sort, hint_ind = hintscore.sort(1, descending=True)
v_ind = hint_ind[:, :top_hint]
v_grad = visual_grad_cam.gather(1, v_ind)
if topv==-1:
v_grad_score,v_grad_ind=v_grad.sort(1,descending=True)
v_grad_score=nn.functional.softmax(v_grad_score*10,dim=1)
v_grad_sum=torch.cumsum(v_grad_score,dim=1)
v_grad_mask=(v_grad_sum<=0.65).long()
v_grad_mask[:,0] = 1
v_mask_ind=v_grad_mask*v_ind
for x in range(a.shape[0]):
num=len(torch.nonzero(v_grad_mask[x]))
v_mask[x].scatter_(0,v_mask_ind[x,:num],1)
else:
v_grad_ind = v_grad.sort(1, descending=True)[1][:, :topv]
v_star = v_ind.gather(1, v_grad_ind)
v_mask.scatter_(1, v_star, 1)
pred, _, _ = model(v, q, None, b, v_mask)
pred_ind = torch.argsort(pred, 1, descending=True)[:, :5]
false_ans = torch.ones(pred.shape[0], pred.shape[1]).cuda()
false_ans.scatter_(1, pred_ind, 0)
a2 = a * false_ans
v_mask = 1 - v_mask
pred, loss, _ = model(v, q, a2, b, v_mask)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
elif mode=='q_v_debias':
random_num = random.randint(1, 10)
if keep_qtype == True:
sen_mask = type_mask
else:
sen_mask = notype_mask
if random_num<=qvp:
## first train
pred, loss, word_emb = model(v, q, a, b, None)
word_grad = torch.autograd.grad((pred * (a > 0).float()).sum(), word_emb, create_graph=True)[0]
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
## second train
word_grad_cam = word_grad.sum(2)
# word_grad_cam_sigmoid = torch.sigmoid(word_grad_cam * 1000)
word_grad_cam_sigmoid = torch.exp(word_grad_cam * sen_mask)
word_grad_cam_sigmoid = word_grad_cam_sigmoid * sen_mask
w_ind = word_grad_cam_sigmoid.sort(1, descending=True)[1][:, :topq]
q2 = copy.deepcopy(q_mask)
m1 = copy.deepcopy(sen_mask) ##[0,0,0...0,1,1,1,1]
m1.scatter_(1, w_ind, 0) ##[0,0,0...0,0,1,1,0]
m2 = 1 - m1 ##[1,1,1...1,1,0,0,1]
if dataset=='cpv1':
m3=m1*18330
else:
m3 = m1 * 18455 ##[0,0,0...0,0,18455,18455,0]
q2 = q2 * m2.long() + m3.long()
pred, _, _ = model(v, q2, None, b, None)
pred_ind = torch.argsort(pred, 1, descending=True)[:, :5]
false_ans = torch.ones(pred.shape[0], pred.shape[1]).cuda()
false_ans.scatter_(1, pred_ind, 0)
a2 = a * false_ans
q3 = copy.deepcopy(q)
if dataset=='cpv1':
q3.scatter_(1, w_ind, 18330)
else:
q3.scatter_(1, w_ind, 18455)
## third train
pred, loss, _ = model(v, q3, a2, b, None)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
else:
## first train
pred, loss, _ = model(v, q, a, b, None)
visual_grad = torch.autograd.grad((pred * (a > 0).float()).sum(), v, create_graph=True)[0]
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
batch_score = compute_score_with_logits(pred, a.data).sum()
train_score += batch_score
##second train
v_mask = torch.zeros(v.shape[0], 36).cuda()
visual_grad_cam = visual_grad.sum(2)
hint_sort, hint_ind = hintscore.sort(1, descending=True)
v_ind = hint_ind[:, :top_hint]
v_grad = visual_grad_cam.gather(1, v_ind)
if topv == -1:
v_grad_score, v_grad_ind = v_grad.sort(1, descending=True)
v_grad_score = nn.functional.softmax(v_grad_score * 10, dim=1)
v_grad_sum = torch.cumsum(v_grad_score, dim=1)
v_grad_mask = (v_grad_sum <= 0.65).long()
v_grad_mask[:,0] = 1
v_mask_ind = v_grad_mask * v_ind
for x in range(a.shape[0]):
num = len(torch.nonzero(v_grad_mask[x]))
v_mask[x].scatter_(0, v_mask_ind[x,:num], 1)
else:
v_grad_ind = v_grad.sort(1, descending=True)[1][:, :topv]
v_star = v_ind.gather(1, v_grad_ind)
v_mask.scatter_(1, v_star, 1)
pred, _, _ = model(v, q, None, b, v_mask)
pred_ind = torch.argsort(pred, 1, descending=True)[:, :5]
false_ans = torch.ones(pred.shape[0], pred.shape[1]).cuda()
false_ans.scatter_(1, pred_ind, 0)
a2 = a * false_ans
v_mask = 1 - v_mask
pred, loss, _ = model(v, q, a2, b, v_mask)
if (loss != loss).any():
raise ValueError("NaN loss")
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optim.step()
optim.zero_grad()
total_loss += loss.item() * q.size(0)
if mode=='updn':
total_loss /= len(train_loader.dataset)
else:
total_loss /= len(train_loader.dataset) * 2
train_score = 100 * train_score / len(train_loader.dataset)
if run_eval:
model.train(False)
results = evaluate(model, eval_loader, qid2type)
results["epoch"] = epoch + 1
results["step"] = total_step
results["train_loss"] = total_loss
results["train_score"] = train_score
model.train(True)
eval_score = results["score"]
bound = results["upper_bound"]
yn = results['score_yesno']
other = results['score_other']
num = results['score_number']
logger.write('epoch %d, time: %.2f' % (epoch + 1, time.time() - t))
logger.write('\ttrain_loss: %.2f, score: %.2f' % (total_loss, train_score))
if run_eval:
logger.write('\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))
logger.write('\tyn score: %.2f other score: %.2f num score: %.2f' % (100 * yn, 100 * other, 100 * num))
if eval_score > best_eval_score:
model_path = os.path.join(output, 'model_best.pth')
torch.save(model.state_dict(), model_path)
best_eval_score = eval_score
model_path = os.path.join(output, 'model.pth')
torch.save(model.state_dict(), model_path)
best_eval_score = eval_score
def evaluate(model, dataloader, qid2type):
score = 0
upper_bound = 0
score_yesno = 0
score_number = 0
score_other = 0
total_yesno = 0
total_number = 0
total_other = 0
for v, q, a, b, qids, _ in tqdm(dataloader, ncols=100, total=len(dataloader), desc="eval"):
v = Variable(v, requires_grad=False).cuda()
q = Variable(q, requires_grad=False).cuda()
pred, _,_ = model(v, q, None, None, None)
batch_score = compute_score_with_logits(pred, a.cuda()).cpu().numpy().sum(1)
score += batch_score.sum()
upper_bound += (a.max(1)[0]).sum()
qids = qids.detach().cpu().int().numpy()
for j in range(len(qids)):
qid = qids[j]
typ = qid2type[str(qid)]
if typ == 'yes/no':
score_yesno += batch_score[j]
total_yesno += 1
elif typ == 'other':
score_other += batch_score[j]
total_other += 1
elif typ == 'number':
score_number += batch_score[j]
total_number += 1
else:
print('Hahahahahahahahahahaha')
score = score / len(dataloader.dataset)
upper_bound = upper_bound / len(dataloader.dataset)
score_yesno /= total_yesno
score_other /= total_other
score_number /= total_number
results = dict(
score=score,
upper_bound=upper_bound,
score_yesno=score_yesno,
score_other=score_other,
score_number=score_number,
)
return results
| 15,958 | 36.817536 | 115 | py |
introd | introd-main/css/language_model.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class WordEmbedding(nn.Module):
"""Word Embedding
The ntoken-th dim is used for padding_idx, which agrees *implicitly*
with the definition in Dictionary.
"""
def __init__(self, ntoken, emb_dim, dropout):
super(WordEmbedding, self).__init__()
self.emb = nn.Embedding(ntoken+1, emb_dim, padding_idx=ntoken)
self.dropout = nn.Dropout(dropout)
self.ntoken = ntoken
self.emb_dim = emb_dim
def init_embedding(self, np_file):
weight_init = torch.from_numpy(np.load(np_file))
assert weight_init.shape == (self.ntoken, self.emb_dim)
self.emb.weight.data[:self.ntoken] = weight_init
def forward(self, x):
emb = self.emb(x)
emb = self.dropout(emb)
return emb
class QuestionEmbedding(nn.Module):
def __init__(self, in_dim, num_hid, nlayers, bidirect, dropout, rnn_type='GRU'):
"""Module for question embedding
"""
super(QuestionEmbedding, self).__init__()
assert rnn_type == 'LSTM' or rnn_type == 'GRU'
rnn_cls = nn.LSTM if rnn_type == 'LSTM' else nn.GRU
self.rnn = rnn_cls(
in_dim, num_hid, nlayers,
bidirectional=bidirect,
dropout=dropout,
batch_first=True)
self.in_dim = in_dim
self.num_hid = num_hid
self.nlayers = nlayers
self.rnn_type = rnn_type
self.ndirections = 1 + int(bidirect)
def init_hidden(self, batch):
# just to get the type of tensor
weight = next(self.parameters()).data
hid_shape = (self.nlayers * self.ndirections, batch, self.num_hid)
if self.rnn_type == 'LSTM':
return (Variable(weight.new(*hid_shape).zero_()),
Variable(weight.new(*hid_shape).zero_()))
else:
return Variable(weight.new(*hid_shape).zero_())
def forward(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
if self.ndirections == 1:
return output[:, -1]
forward_ = output[:, -1, :self.num_hid]
backward = output[:, 0, self.num_hid:]
return torch.cat((forward_, backward), dim=1)
def forward_all(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
return output
| 2,639 | 31.195122 | 84 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/bq_service.py | from datetime import datetime, timedelta, timezone
import os
from functools import lru_cache
from pprint import pprint
from dotenv import load_dotenv
from google.cloud import bigquery
from google.cloud.bigquery import QueryJobConfig, ScalarQueryParameter
from pandas import DataFrame
from app import APP_ENV, seek_confirmation
from app.decorators.number_decorators import fmt_n
load_dotenv()
GOOGLE_APPLICATION_CREDENTIALS = os.getenv("GOOGLE_APPLICATION_CREDENTIALS") # implicit check by google.cloud (and keras)
PROJECT_NAME = os.getenv("BIGQUERY_PROJECT_NAME", default="tweet-collector-py")
DATASET_NAME = os.getenv("BIGQUERY_DATASET_NAME", default="impeachment_development") #> "_test" or "_production"
DESTRUCTIVE_MIGRATIONS = (os.getenv("DESTRUCTIVE_MIGRATIONS", default="false") == "true")
VERBOSE_QUERIES = (os.getenv("VERBOSE_QUERIES", default="false") == "true")
CLEANUP_MODE = (os.getenv("CLEANUP_MODE", default="true") == "true")
DEFAULT_START = "2019-12-02 01:00:00" # @deprectated, the "beginning of time" for the impeachment dataset. todo: allow customization via env var
DEFAULT_END = "2020-03-24 20:00:00" # @deprectated, the "end of time" for the impeachment dataset. todo: allow customization via env var
def generate_timestamp(dt=None):
"""Formats datetime object for storing in BigQuery. Uses current time by default. """
dt = dt or datetime.now()
return dt.strftime("%Y-%m-%d %H:%M:%S")
def generate_temp_table_id():
return datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
def split_into_batches(my_list, batch_size=9000):
"""Splits a list into evenly sized batches""" # h/t: h/t: https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
for i in range(0, len(my_list), batch_size):
yield my_list[i : i + batch_size]
class BigQueryService():
def __init__(self, project_name=PROJECT_NAME, dataset_name=DATASET_NAME,
verbose=VERBOSE_QUERIES, destructive=DESTRUCTIVE_MIGRATIONS, cautious=True):
self.project_name = project_name
self.dataset_name = dataset_name
self.dataset_address = f"{self.project_name}.{self.dataset_name}"
self.verbose = (verbose == True)
self.destructive = (destructive == True)
self.cautious = (cautious == True)
self.client = bigquery.Client()
print("-------------------------")
print("BIGQUERY SERVICE...")
print(" DATASET ADDRESS:", self.dataset_address.upper())
print(" DESTRUCTIVE MIGRATIONS:", self.destructive)
print(" VERBOSE QUERIES:", self.verbose)
if self.cautious:
seek_confirmation()
@property
def metadata(self):
return {"dataset_address": self.dataset_address, "destructive": self.destructive, "verbose": self.verbose}
def execute_query(self, sql):
"""Param: sql (str)"""
if self.verbose:
print(sql)
job = self.client.query(sql)
return job.result()
def execute_query_in_batches(self, sql, temp_table_name=None):
"""Param: sql (str)"""
if self.verbose:
print(sql)
if not temp_table_name:
temp_table_id = generate_temp_table_id()
temp_table_name = f"{self.dataset_address}.temp_{temp_table_id}"
job_config = bigquery.QueryJobConfig(
priority=bigquery.QueryPriority.BATCH,
allow_large_results=True,
destination=temp_table_name
)
job = self.client.query(sql, job_config=job_config)
print("BATCH QUERY JOB:", type(job), job.job_id, job.state, job.location)
return job
def query_to_df(self, sql):
"""high-level wrapper to return a DataFrame"""
results = self.execute_query(sql)
records = [dict(row) for row in list(results)]
df = DataFrame(records)
return df
def insert_records_in_batches(self, table, records):
"""
Params:
table (table ID string, Table, or TableReference)
records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
#errors = self.client.insert_rows(table, rows_to_insert)
#> ... google.api_core.exceptions.BadRequest: 400 POST https://bigquery.googleapis.com/bigquery/v2/projects/.../tables/daily_bot_probabilities/insertAll:
#> ... too many rows present in the request, limit: 10000 row count: 36092.
#> ... see: https://cloud.google.com/bigquery/quotas#streaming_inserts
errors = []
batches = list(split_into_batches(rows_to_insert, batch_size=5000))
for batch in batches:
errors += self.client.insert_rows(table, batch)
return errors
def delete_temp_tables_older_than(self, days=3):
"""Deletes all tables that:
have "temp_" in their name (product of the batch jobs), and were
created at least X days ago (safely avoid deleting tables being used by in-progress batch jobs)
"""
cutoff_date = datetime.now(tz=timezone.utc) - timedelta(days=days)
print("CUTOFF DATE:", cutoff_date)
tables = list(self.client.list_tables(self.dataset_name)) # API call
tables_to_delete = [t for t in tables if "temp_" in t.table_id and t.created < cutoff_date]
print("TABLES TO DELETE:")
pprint([t.table_id for t in tables_to_delete])
seek_confirmation()
print("DELETING...")
for old_temp_table in tables_to_delete:
print(" ", old_temp_table.table_id)
self.client.delete_table(old_temp_table)
#def get_table(self, table_name):
# return self.client.get_table(f"{self.dataset_address}.{table_name}") # API call. cache it here once.
#
# COLLECTING TWEETS V2
#
def migrate_topics_table(self):
print("MIGRATING TOPICS TABLE...")
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.topics`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.topics` (
topic STRING NOT NULL,
created_at TIMESTAMP,
);
"""
return list(self.execute_query(sql))
def migrate_tweets_table(self):
print("MIGRATING TWEETS TABLE...")
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.tweets`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.tweets` (
status_id STRING,
status_text STRING,
truncated BOOLEAN,
retweeted_status_id STRING,
retweeted_user_id STRING,
retweeted_user_screen_name STRING,
reply_status_id STRING,
reply_user_id STRING,
is_quote BOOLEAN,
geo STRING,
created_at TIMESTAMP,
user_id STRING,
user_name STRING,
user_screen_name STRING,
user_description STRING,
user_location STRING,
user_verified BOOLEAN,
user_created_at TIMESTAMP
);
"""
return list(self.execute_query(sql))
@property
@lru_cache(maxsize=None)
def topics_table(self):
return self.client.get_table(f"{self.dataset_address}.topics") # an API call (caches results for subsequent inserts)
@property
@lru_cache(maxsize=None)
def tweets_table(self):
return self.client.get_table(f"{self.dataset_address}.tweets") # an API call (caches results for subsequent inserts)
def fetch_topics(self):
"""Returns a list of topic strings"""
sql = f"""
SELECT topic, created_at
FROM `{self.dataset_address}.topics`
ORDER BY created_at;
"""
return self.execute_query(sql)
def fetch_topic_names(self):
return [row.topic for row in self.fetch_topics()]
def append_topics(self, topics):
"""
Inserts topics unless they already exist.
Param: topics (list of dict)
"""
rows = self.fetch_topics()
existing_topics = [row.topic for row in rows]
new_topics = [topic for topic in topics if topic not in existing_topics]
if new_topics:
rows_to_insert = [[new_topic, generate_timestamp()] for new_topic in new_topics]
errors = self.client.insert_rows(self.topics_table, rows_to_insert)
return errors
else:
print("NO NEW TOPICS...")
return []
def append_tweets(self, tweets):
"""Param: tweets (list of dict)"""
rows_to_insert = [list(d.values()) for d in tweets]
errors = self.client.insert_rows(self.tweets_table, rows_to_insert)
return errors
#
# COLLECTING USER FRIENDS
#
def migrate_populate_users(self):
"""
Resulting table has a row for each user id / screen name combo
(multiple rows per user id if they changed their screen name)
"""
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.users`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.users` as (
SELECT DISTINCT
user_id
,user_screen_name as screen_name
FROM `{self.dataset_address}.tweets`
WHERE user_id IS NOT NULL AND user_screen_name IS NOT NULL
ORDER BY 1
);
"""
results = self.execute_query(sql)
return list(results)
def migrate_user_friends(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_friends`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_friends` (
user_id STRING,
screen_name STRING,
friend_count INT64,
friend_names ARRAY<STRING>,
start_at TIMESTAMP,
end_at TIMESTAMP
);
"""
results = self.execute_query(sql)
return list(results)
def fetch_remaining_users(self, min_id=None, max_id=None, limit=None):
"""Returns a list of table rows"""
sql = f"""
SELECT
u.user_id
,u.screen_name
FROM `{self.dataset_address}.users` u
LEFT JOIN `{self.dataset_address}.user_friends` f ON u.user_id = f.user_id
WHERE f.user_id IS NULL
"""
if min_id and max_id:
sql += f" AND CAST(u.user_id as int64) BETWEEN {int(min_id)} AND {int(max_id)} "
sql += f"ORDER BY u.user_id "
if limit:
sql += f"LIMIT {int(limit)};"
results = self.execute_query(sql)
return list(results)
@property
@lru_cache(maxsize=None)
def user_friends_table(self):
return self.client.get_table(f"{self.dataset_address}.user_friends") # an API call (caches results for subsequent inserts)
def insert_user_friends(self, records):
"""
Param: records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
#rows_to_insert = [list(d.values()) for d in records if any(d["friend_names"])] # doesn't store failed attempts. can try those again later
#if any(rows_to_insert):
errors = self.client.insert_rows(self.user_friends_table, rows_to_insert)
return errors
def user_friend_collection_progress(self):
sql = f"""
SELECT
count(distinct user_id) as user_count
,round(avg(runtime_seconds), 2) as avg_duration
,round(sum(has_friends) / count(distinct user_id), 2) as pct_friendly
,round(avg(CASE WHEN has_friends = 1 THEN runtime_seconds END), 2) as avg_duration_friendly
,round(avg(CASE WHEN has_friends = 1 THEN friend_count END), 2) as avg_friends_friendly
FROM (
SELECT
user_id
,friend_count
,if(friend_count > 0, 1, 0) as has_friends
,start_at
,end_at
,DATETIME_DIFF(CAST(end_at as DATETIME), cast(start_at as DATETIME), SECOND) as runtime_seconds
FROM `{service.dataset_address}.user_friends`
) subq
"""
return self.execute_query(sql)
#
# FRIEND GRAPHS
#
def fetch_user_friends(self, min_id=None, max_id=None, limit=None):
sql = f"""
SELECT user_id, screen_name, friend_count, friend_names, start_at, end_at
FROM `{self.dataset_address}.user_friends`
"""
if min_id and max_id:
sql += f" WHERE CAST(user_id as int64) BETWEEN {int(min_id)} AND {int(max_id)} "
sql += f"ORDER BY user_id "
if limit:
sql += f"LIMIT {int(limit)};"
#return list(self.execute_query(sql))
return self.execute_query(sql) # return the generator so we can avoid storing the results in memory
def fetch_user_friends_in_batches(self, limit=None, min_friends=None):
sql = f"""
SELECT user_id, screen_name, friend_count, friend_names
FROM `{self.dataset_address}.user_friends`
"""
if min_friends:
sql += f" WHERE ARRAY_LENGTH(friend_names) >= {int(min_friends)} "
if limit:
sql += f" LIMIT {int(limit)}; "
return self.execute_query_in_batches(sql)
def partition_user_friends(self, n=10):
"""Params n (int) the number of partitions, each will be of equal size"""
sql = f"""
SELECT
partition_id
,count(DISTINCT user_id) as user_count
,min(user_id) as min_id
,max(user_id) as max_id
FROM (
SELECT
NTILE({int(n)}) OVER (ORDER BY CAST(user_id as int64)) as partition_id
,CAST(user_id as int64) as user_id
FROM (SELECT DISTINCT user_id FROM `{self.dataset_address}.user_friends`)
) user_partitions
GROUP BY partition_id
"""
results = self.execute_query(sql)
return list(results)
def fetch_random_users(self, limit=1000, topic="impeach", start_at=DEFAULT_START, end_at=DEFAULT_END):
"""
Fetches a random slice of users talking about a given topic during a given timeframe.
Params:
topic (str) the topic they were tweeting about:
to be balanced, choose 'impeach', '#IGHearing', '#SenateHearing', etc.
to be left-leaning, choose '#ImpeachAndConvict', '#ImpeachAndRemove', etc.
to be right-leaning, choose '#ShamTrial', '#AquittedForever', '#MAGA', etc.
limit (int) the max number of users to fetch
start_at (str) a date string for the earliest tweet
end_at (str) a date string for the latest tweet
"""
sql = f"""
SELECT DISTINCT user_id, user_screen_name, user_created_at
FROM `{self.dataset_address}.tweets`
WHERE upper(status_text) LIKE '%{topic.upper()}%' AND (created_at BETWEEN '{start_at}' AND '{end_at}')
ORDER BY rand()
LIMIT {int(limit)};
"""
return self.execute_query(sql)
#
# RETWEET GRAPHS
#
def fetch_retweet_counts_in_batches(self, topic=None, start_at=None, end_at=None):
"""
For each retweeter, includes the number of times each they retweeted each other user.
Optionally about a given topic.
Optionally with within a given timeframe.
Params:
topic (str) the topic they were tweeting about, like 'impeach', '#MAGA', "@politico", etc.
start_at (str) a date string for the earliest tweet
end_at (str) a date string for the latest tweet
"""
sql = f"""
SELECT
user_id
,user_screen_name
,retweet_user_screen_name
,count(distinct status_id) as retweet_count
FROM `{self.dataset_address}.retweets`
WHERE user_screen_name <> retweet_user_screen_name -- excludes people retweeting themselves
"""
if topic:
sql+=f"""
AND upper(status_text) LIKE '%{topic.upper()}%'
"""
if start_at and end_at:
sql+=f"""
AND (created_at BETWEEN '{start_at}' AND '{end_at}')
"""
sql += """
GROUP BY 1,2,3
"""
return self.execute_query_in_batches(sql)
def fetch_specific_user_friends(self, screen_names):
sql = f"""
SELECT user_id, screen_name, friend_count, friend_names, start_at, end_at
FROM `{self.dataset_address}.user_friends`
WHERE screen_name in {tuple(screen_names)} -- tuple conversion surrounds comma-separated screen_names in parens
"""
return self.execute_query(sql)
def fetch_specific_retweet_counts(self, screen_names):
"""FYI this fetches multiple rows per screen_name, for each screen_name that user retweeted"""
sql = f"""
SELECT user_id, user_screen_name, retweet_user_screen_name, retweet_count
FROM `{self.dataset_address}.retweet_counts`
WHERE user_screen_name in {tuple(screen_names)} -- tuple conversion surrounds comma-separated screen_names in parens
-- AND user_screen_name <> retweet_user_screen_name -- exclude users who have retweeted themselves
ORDER BY 2,3
"""
return self.execute_query(sql)
def fetch_retweet_weeks(self, start_at=None, end_at=None):
"""
Params:
start_at (str) like "2019-12-15 00:00:00"
end_at (str) like "2020-03-21 23:59:59"
"""
sql = f"""
SELECT
CASE
WHEN EXTRACT(week from created_at) = 0 THEN EXTRACT(year from created_at) - 1 -- treat first week of new year as the previous year
ELSE EXTRACT(year from created_at)
END year
,CASE
WHEN EXTRACT(week from created_at) = 0 THEN 52 -- treat first week of new year as the previous week
ELSE EXTRACT(week from created_at)
END week
,count(DISTINCT EXTRACT(day from created_at)) as day_count
,min(created_at) as min_created
,max(created_at) as max_created
,count(DISTINCT status_id) as retweet_count
,count(DISTINCT user_id) as user_count
FROM `{self.dataset_address}.retweets`
"""
if start_at and end_at:
sql += f"""
WHERE created_at BETWEEN '{start_at}' AND '{end_at}'
"""
sql += """
GROUP BY 1,2
ORDER BY 1,2
"""
return self.execute_query(sql)
#
# LOCAL ANALYSIS (PG PIPELINE)
#
def fetch_tweets_in_batches(self, limit=None, start_at=None, end_at=None):
sql = f"""
SELECT
status_id
,status_text
,truncated
,NULL as retweeted_status_id -- restore for version 2
,NULL as retweeted_user_id -- restore for version 2
,NULL as retweeted_user_screen_name -- restore for version 2
,reply_status_id
,reply_user_id
,is_quote
,geo
,created_at
,user_id
,user_name
,user_screen_name
,user_description
,user_location
,user_verified
,user_created_at
FROM `{self.dataset_address}.tweets`
"""
if start_at and end_at:
sql+=f"""
WHERE (created_at BETWEEN '{str(start_at)}' AND '{str(end_at)}')
"""
if limit:
sql += f" LIMIT {int(limit)}; "
return self.execute_query_in_batches(sql)
def fetch_user_details_in_batches(self, limit=None):
sql = f"""
SELECT
user_id
,screen_name
,name
,description
,location
,verified
,created_at
,screen_name_count
,name_count
,description_count
,location_count
,verified_count
,created_at_count
,screen_names
,names
,descriptions
,locations
,verifieds
,created_ats
,friend_count
,status_count
,retweet_count
-- these topics are specific to the impeachment dataset, so will need to generalize if/when working with another topic (leave for future concern)
,impeach_and_convict
,senate_hearing
,ig_hearing
,facts_matter
,sham_trial
,maga
,acquitted_forever
FROM `{self.dataset_address}.user_details`
"""
if limit:
sql += f"LIMIT {int(limit)};"
return self.execute_query_in_batches(sql)
def fetch_retweeter_details_in_batches(self, limit=None):
sql = f"""
SELECT
user_id
,verified
,created_at
,screen_name_count
,name_count
,retweet_count
,ig_report
,ig_hearing
,senate_hearing
,not_above_the_law
,impeach_and_convict
,impeach_and_remove
,facts_matter
,sham_trial
,maga
,acquitted_forever
,country_over_party
FROM `{self.dataset_address}.retweeter_details`
"""
if limit:
sql += f"LIMIT {int(limit)};"
return self.execute_query_in_batches(sql)
def fetch_retweeters_by_topic_exclusive(self, topic):
"""
Get the retweeters talking about topic x and those not, so we can perform a two-sample KS-test on them.
"""
topic = topic.upper() # do uppercase conversion once here instead of many times inside sql below
sql = f"""
-- TOPIC: '{topic}'
SELECT
rt.user_id
,rt.user_created_at
,count(distinct case when REGEXP_CONTAINS(upper(rt.status_text), '{topic}') then rt.status_id end) as count
FROM {self.dataset_address}.retweets rt
GROUP BY 1,2
"""
return self.execute_query(sql)
def fetch_retweeters_by_topics_exclusive(self, x_topic, y_topic):
"""
Get the retweeters talking about topic x and not y (and vice versa).
For each user, determines how many times they were talking about topic x and y.
Only returns users who were talking about one or the other, so we can perform a two-sample KS-test on them.
"""
x_topic = x_topic.upper() # do uppercase conversion once here instead of many times inside sql below
y_topic = y_topic.upper() # do uppercase conversion once here instead of many times inside sql below
sql = f"""
-- TOPICS: '{x_topic}' | '{y_topic}'
SELECT
rt.user_id
,rt.user_created_at
,count(distinct case when REGEXP_CONTAINS(upper(rt.status_text), '{x_topic}') then rt.status_id end) as x_count
,count(distinct case when REGEXP_CONTAINS(upper(rt.status_text), '{y_topic}') then rt.status_id end) as y_count
FROM {self.dataset_address}.retweets rt
WHERE REGEXP_CONTAINS(upper(rt.status_text), '{x_topic}')
OR REGEXP_CONTAINS(upper(rt.status_text), '{y_topic}')
GROUP BY 1,2
HAVING (x_count > 0 and y_count = 0) OR (x_count = 0 and y_count > 0) -- mutually exclusive populations
"""
return self.execute_query(sql)
#
# RETWEET GRAPHS V2 - USER ID LOOKUPS
#
def fetch_idless_screen_names(self):
sql = f"""
SELECT DISTINCT rt.retweet_user_screen_name as screen_name
FROM {self.dataset_address}.retweets rt
LEFT JOIN {self.dataset_address}.tweets t on t.user_screen_name = rt.retweet_user_screen_name
WHERE t.user_id IS NULL
"""
return self.execute_query(sql)
def migrate_user_id_lookups_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_id_lookups`; "
sql += f"""
CREATE TABLE `{self.dataset_address}.user_id_lookups` (
lookup_at TIMESTAMP,
counter INT64,
screen_name STRING,
user_id STRING,
message STRING
);
"""
return self.execute_query(sql)
@property
@lru_cache(maxsize=None)
def user_id_lookups_table(self):
return self.client.get_table(f"{self.dataset_address}.user_id_lookups") # an API call (caches results for subsequent inserts)
def upload_user_id_lookups(self, records):
"""
Param: records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
errors = self.client.insert_rows(self.user_id_lookups_table, rows_to_insert)
return errors
def fetch_max_user_id_postlookup(self):
sql = f"""
SELECT max(user_id) as max_user_id -- 999999827600650240
FROM (
SELECT DISTINCT user_id FROM {self.dataset_address}.tweets -- 3,600,545
UNION ALL
SELECT DISTINCT user_id FROM {self.dataset_address}.user_id_lookups WHERE user_id IS NOT NULL -- 14,969
) all_user_ids -- 3,615,409
"""
results = list(self.execute_query(sql))
return int(results[0]["max_user_id"])
def fetch_idless_screen_names_postlookup(self):
sql = f"""
SELECT distinct upper(screen_name) as screen_name
FROM {self.dataset_address}.user_id_lookups
WHERE user_id is NULL
ORDER BY screen_name
"""
return self.execute_query(sql)
def migrate_user_id_assignments_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_id_assignments`; "
sql += f"""
CREATE TABLE `{self.dataset_address}.user_id_assignments` (
screen_name STRING,
user_id STRING,
);
"""
return self.execute_query(sql)
@property
@lru_cache(maxsize=None)
def user_id_assignments_table(self):
return self.client.get_table(f"{self.dataset_address}.user_id_assignments") # an API call (caches results for subsequent inserts)
def upload_user_id_assignments(self, records):
"""
Param: records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
errors = self.client.insert_rows(self.user_id_assignments_table, rows_to_insert)
return errors
def migrate_populate_user_screen_names_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_screen_names`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_screen_names` as (
SELECT DISTINCT user_id, upper(screen_name) as screen_name
FROM (
SELECT DISTINCT user_id, user_screen_name as screen_name FROM `{self.dataset_address}.tweets` -- 3,636,492
UNION ALL
SELECT DISTINCT user_id, screen_name FROM `{self.dataset_address}.user_id_lookups` WHERE user_id IS NOT NULL -- 14,969
UNION ALL
SELECT DISTINCT user_id, screen_name FROM `{self.dataset_address}.user_id_assignments` -- 2,224
) all_user_screen_names -- 3,615,409
ORDER BY user_id, screen_name
);
"""
return self.execute_query(sql)
def migrate_populate_user_details_table_v2(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_details_v2`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_details_v2` as (
SELECT
user_id
,count(DISTINCT UPPER(screen_name)) as screen_name_count
,ARRAY_AGG(DISTINCT UPPER(screen_name) IGNORE NULLS) as screen_names
-- ,ANY_VALUE(screen_name) as screen_name
FROM `{self.dataset_address}.user_screen_names`
GROUP BY 1
ORDER BY 2 desc
-- LIMIT 100
);
"""
return self.execute_query(sql)
def migrate_populate_retweets_table_v2(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.retweets_v2`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.retweets_v2` as (
SELECT
cast(rt.user_id as int64) as user_id
,UPPER(rt.user_screen_name) as user_screen_name
,rt.user_created_at
,cast(sn.user_id as int64) as retweeted_user_id
,UPPER(rt.retweet_user_screen_name) as retweeted_user_screen_name
,rt.status_id
,rt.status_text
,rt.created_at
FROM `{self.dataset_address}.retweets` rt
JOIN `{self.dataset_address}.user_screen_names` sn
ON UPPER(rt.retweet_user_screen_name) = UPPER(sn.screen_name)
WHERE rt.user_screen_name <> rt.retweet_user_screen_name -- excludes people retweeting themselves
);
"""
return self.execute_query(sql)
def fetch_retweet_edges_in_batches_v2(self, topic=None, start_at=None, end_at=None):
"""
For each retweeter, includes the number of times each they retweeted each other user.
Optionally about a given topic.
Optionally with within a given timeframe.
Params:
topic (str) : the topic they were tweeting about, like 'impeach', '#MAGA', "@politico", etc.
start_at (str) : a date string for the earliest tweet
end_at (str) : a date string for the latest tweet
"""
sql = f"""
SELECT
rt.user_id
,rt.retweeted_user_id
,count(distinct rt.status_id) as retweet_count
FROM `{self.dataset_address}.retweets_v2` rt
WHERE rt.user_screen_name <> rt.retweeted_user_screen_name -- excludes people retweeting themselves
"""
if topic:
sql+=f"""
AND upper(rt.status_text) LIKE '%{topic.upper()}%'
"""
if start_at and end_at:
sql+=f"""
AND (rt.created_at BETWEEN '{str(start_at)}' AND '{str(end_at)}')
"""
sql += """
GROUP BY 1,2
"""
return self.execute_query_in_batches(sql)
def migrate_daily_bot_probabilities_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.daily_bot_probabilities`; "
sql += f"""
CREATE TABLE `{self.dataset_address}.daily_bot_probabilities` (
start_date STRING,
user_id INT64,
bot_probability FLOAT64,
);
"""
return self.execute_query(sql)
#
# RETWEET GRAPHS V2 - BOT CLASSIFICATIONS
#
@property
@lru_cache(maxsize=None)
def daily_bot_probabilities_table(self):
return self.client.get_table(f"{self.dataset_address}.daily_bot_probabilities") # an API call (caches results for subsequent inserts)
def upload_daily_bot_probabilities(self, records):
return self.insert_records_in_batches(self.daily_bot_probabilities_table, records)
def sql_fetch_bot_ids(self, bot_min=0.8):
sql = f"""
SELECT DISTINCT bp.user_id
FROM `{self.dataset_address}.daily_bot_probabilities` bp
WHERE bp.bot_probability >= {float(bot_min)}
"""
return sql
def fetch_bot_ids(self, bot_min=0.8):
"""Returns any user who has ever had a bot score above the given threshold."""
return self.execute_query(self.sql_fetch_bot_ids(bot_min))
def fetch_bot_retweet_edges_in_batches(self, bot_min=0.8):
"""
For each bot (user with any bot score greater than the specified threshold),
and each user they retweeted, includes the number of times the bot retweeted them.
Params:
bot_min (float) consider users with any score above this threshold as bots
"""
sql = f"""
SELECT
rt.user_id
,rt.retweeted_user_id
,count(distinct rt.status_id) as retweet_count
FROM `{self.dataset_address}.retweets_v2` rt
JOIN (
{self.sql_fetch_bot_ids(bot_min)}
) bp ON bp.user_id = rt.user_id
WHERE rt.user_screen_name <> rt.retweeted_user_screen_name -- excludes people retweeting themselves
GROUP BY 1,2
-- ORDER BY 1,2
"""
return self.execute_query_in_batches(sql)
#
# RETWEET GRAPHS V2 - BOT COMMUNITIES
#
#@property
#@lru_cache(maxsize=None) # don't cache, or cache one for each value of n_communities
def n_bot_communities_table(self, n_communities):
return self.client.get_table(f"{self.dataset_address}.{n_communities}_bot_communities") # an API call (caches results for subsequent inserts)
def destructively_migrate_n_bot_communities_table(self, n_communities):
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.{n_communities}_bot_communities`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.{n_communities}_bot_communities` (
user_id INT64,
community_id INT64,
);
"""
return self.execute_query(sql)
def overwrite_n_bot_communities_table(self, n_communities, records):
self.destructively_migrate_n_bot_communities_table(n_communities)
table = self.n_bot_communities_table(n_communities)
return self.insert_records_in_batches(table, records)
def download_n_bot_community_tweets_in_batches(self, n_communities):
sql = f"""
SELECT
bc.community_id
,t.user_id
,t.user_name
,t.user_screen_name
,t.user_description
,t.user_location
,t.user_verified
,t.user_created_at
,t.status_id
,t.status_text
,t.retweet_status_id
,t.reply_user_id
,t.is_quote as status_is_quote
,t.geo as status_geo
,t.created_at as status_created_at
FROM `{self.dataset_address}.{n_communities}_bot_communities` bc -- 681
JOIN `{self.dataset_address}.tweets` t on CAST(t.user_id as int64) = bc.user_id
-- WHERE t.retweet_status_id IS NULL
-- ORDER BY 1,2
"""
return self.execute_query_in_batches(sql)
def download_n_bot_community_retweets_in_batches(self, n_communities):
sql = f"""
SELECT
bc.community_id
,ud.user_id
,ud.screen_name_count as user_screen_name_count
,ARRAY_TO_STRING(ud.screen_names, ' | ') as user_screen_names
,rt.user_created_at
,rt.retweeted_user_id
,rt.retweeted_user_screen_name
,rt.status_id
,rt.status_text
,rt.created_at as status_created_at
FROM `{self.dataset_address}.{n_communities}_bot_communities` bc -- 681
JOIN `{self.dataset_address}.user_details_v2` ud on CAST(ud.user_id as int64) = bc.user_id
JOIN `{self.dataset_address}.retweets_v2` rt on rt.user_id = bc.user_id
-- ORDER BY 1,2
"""
return self.execute_query_in_batches(sql)
def destructively_migrate_token_frequencies_table(self, table_address, records):
print("DESTRUCTIVELY MIGRATING TABLE:", table_address)
sql = f"""
DROP TABLE IF EXISTS `{table_address}`;
CREATE TABLE IF NOT EXISTS `{table_address}` (
token STRING,
rank INT64,
count INT64,
pct FLOAT64,
doc_count INT64,
doc_pct FLOAT64
);
"""
self.execute_query(sql)
table = self.client.get_table(table_address) # API call
print("INSERTING", len(records), "RECORDS...")
return self.insert_records_in_batches(table, records)
def fetch_bot_community_profiles(self, n_communities=2):
sql = f"""
SELECT community_id, bot_id as user_id, user_descriptions
FROM `{self.dataset_address}.{int(n_communities)}_community_profiles`
"""
return self.execute_query(sql)
def upload_bot_community_profile_tokens(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_profile_tokens"
self.destructively_migrate_token_frequencies_table(table_address=table_address, records=records)
def upload_bot_community_profile_tags(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_profile_tags"
self.destructively_migrate_token_frequencies_table(table_address=table_address, records=records)
def fetch_bot_community_statuses(self, n_communities, community_id=None, limit=None):
sql = f"""
SELECT community_id, user_id, status_id, status_text
FROM `{self.dataset_address}.{int(n_communities)}_community_labeled_tweets`
"""
if community_id:
sql += f" WHERE community_id = {int(community_id)}"
if limit:
sql += f" LIMIT {int(limit)}"
return self.execute_query(sql)
def upload_bot_community_status_tokens(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_status_tokens"
self.destructively_migrate_token_frequencies_table(table_address, records=records)
def upload_bot_community_status_tags(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_status_tags"
self.destructively_migrate_token_frequencies_table(table_address, records=records)
#
# BOT FOLLOWER GRAPHS
#
def destructively_migrate_user_friends_flat(self):
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.user_friends_flat`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_friends_flat` as (
SELECT user_id, upper(screen_name) as screen_name, upper(friend_name) as friend_name
FROM `{self.dataset_address}.user_friends`
CROSS JOIN UNNEST(friend_names) AS friend_name
);
""" # 1,976,670,168 rows WAT
return self.execute_query(sql)
def destructively_migrate_bots_table(self, bot_min=0.8):
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.bots_above_{bot_min_str}`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.bots_above_{bot_min_str}` as (
SELECT
bp.user_id as bot_id
,sn.screen_name as bot_screen_name
,count(distinct start_date) as day_count
,avg(bot_probability) as avg_daily_score
FROM `{self.dataset_address}.daily_bot_probabilities` bp
JOIN `{self.dataset_address}.user_screen_names` sn ON CAST(sn.user_id as int64) = bp.user_id
WHERE bp.bot_probability >= {float(bot_min)}
GROUP BY 1,2
ORDER BY 3 desc
);
"""
return self.execute_query(sql)
def destructively_migrate_bot_followers_table(self, bot_min=0.8):
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.bot_followers_above_{bot_min_str}`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.bot_followers_above_{bot_min_str}` as (
SELECT
b.bot_id
,b.bot_screen_name
,uff.user_id as follower_id
,uff.screen_name as follower_screen_name
FROM `{self.dataset_address}.user_friends_flat` uff
JOIN `{self.dataset_address}.bots_above_{bot_min_str}` b ON upper(b.bot_screen_name) = upper(uff.friend_name)
);
""" # 29,861,268 rows WAT
return self.execute_query(sql)
def fetch_bot_followers_in_batches(self, bot_min=0.8):
"""
Returns a row for each bot for each user who follows them.
Params: bot_min (float) consider users with any score above this threshold as bots (uses pre-computed classification scores)
"""
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
SELECT DISTINCT bot_id, follower_id
FROM `{self.dataset_address}.bot_followers_above_{bot_min_str}`
"""
return self.execute_query_in_batches(sql)
def fetch_bot_follower_lists(self, bot_min=0.8):
"""
Returns a row for each bot, with a list of aggregated follower ids.
Params: bot_min (float) consider users with any score above this threshold as bots (uses pre-computed classification scores)
"""
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
SELECT bot_id, ARRAY_AGG(distinct follower_id) as follower_ids
FROM `{self.dataset_address}.bot_followers_above_{bot_min_str}`
GROUP BY 1
""" # takes 90 seconds for ~25K rows
return self.execute_query(sql)
#
# NLP (BASILICA)
#
@property
@lru_cache(maxsize=None)
def basilica_embeddings_table(self):
return self.client.get_table(f"{self.dataset_address}.basilica_embeddings") # an API call (caches results for subsequent inserts)
def upload_basilica_embeddings(self, records):
return self.insert_records_in_batches(self.basilica_embeddings_table, records)
def fetch_basilica_embedless_partitioned_statuses(self, min_val=0.0, max_val=1.0, limit=None, in_batches=False):
"""Params min_val and max_val reference partition decimal values from 0.0 to 1.0"""
sql = f"""
SELECT ps.status_id, ps.status_text
FROM `{self.dataset_address}.partitioned_statuses` ps
LEFT JOIN `{self.dataset_address}.basilica_embeddings` emb ON ps.status_id = emb.status_id
WHERE emb.status_id IS NULL
AND ps.partition_val BETWEEN {float(min_val)} AND {float(max_val)}
"""
if limit:
sql += f" LIMIT {int(limit)};"
if in_batches:
print("FETCHING STATUSES IN BATCHES...")
return self.execute_query_in_batches(sql)
else:
print("FETCHING STATUSES...")
return self.execute_query(sql)
#
# NLP (CUSTOM)
#
def fetch_labeled_tweets_in_batches(self, limit=None):
sql = f"""
SELECT
status_id
,status_text
,community_id
--,community_score
FROM `{self.dataset_address}.2_community_labeled_tweets`
"""
if limit:
sql += f" LIMIT {int(limit)}"
return self.execute_query(sql)
else:
return self.execute_query_in_batches(sql)
def fetch_unlabeled_statuses_in_batches(self, limit=None):
sql = f"""
SELECT s.status_id, s.status_text
FROM `{self.dataset_address}.statuses` s
LEFT JOIN `{self.dataset_address}.2_community_labeled_tweets` l ON l.status_id = s.status_id
WHERE l.status_id IS NULL
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
else:
return self.execute_query_in_batches(sql)
def destructively_migrate_2_community_predictions_table(self):
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.2_community_predictions`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.2_community_predictions` (
status_id INT64,
predicted_community_id INT64
);
"""
return self.execute_query(sql)
@property
@lru_cache(maxsize=None)
def community_predictions_table(self):
return self.client.get_table(f"{self.dataset_address}.2_community_predictions") # an API call (caches results for subsequent inserts)
def upload_predictions_in_batches(self, records):
return self.insert_records_in_batches(self.community_predictions_table, records)
def fetch_predictions(self, limit=None):
sql = f"""
SELECT status_id, predicted_community_id
FROM `{self.dataset_address}.2_community_predictions`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
else:
return self.execute_query_in_batches(sql)
#
# NLP V2
#
def nlp_v2_fetch_statuses(self, limit=None):
sql = f"""
SELECT s.status_id, s.status_text
FROM `{self.dataset_address}.statuses` s
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def nlp_v2_destructively_migrate_predictions_table(self, model_name):
if model_name.lower() == "bert":
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.nlp_v2_predictions_bert`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.nlp_v2_predictions_bert` (
status_id INT64,
logit_0 FLOAT64,
logit_1 FLOAT64,
prediction FLOAT64
);
"""
else:
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.nlp_v2_predictions_{model_name}`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.nlp_v2_predictions_{model_name}` (
status_id INT64,
prediction STRING -- todo: convert this D/R label back to 0/1 "score"
);
"""
return self.execute_query(sql)
def nlp_v2_get_predictions_table(self, model_name):
return self.client.get_table(f"{self.dataset_address}.nlp_v2_predictions_{model_name}") # API call.
#
# DAILY ACTIVE FRIEND GRAPHS V4
#
def fetch_daily_statuses(self, date, limit=None):
sql = f"""
SELECT DISTINCT
t.status_id
, t.status_text
, t.created_at
, t.user_id
, UPPER(t.user_screen_name) as screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
--,bu.community_id
-- ,r.tweet_count as rate
FROM `{self.dataset_address}.tweets` t
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = cast(t.user_id as int64)
WHERE EXTRACT(DATE from created_at) = '{date}'
--LIMIT 10
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_tweeter_statuses(self, date, tweet_min=None, limit=None):
sql = f"""
SELECT DISTINCT
t.status_id
,t.status_text
,t.created_at
,t.user_id
,UPPER(t.user_screen_name) as screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,r.tweet_count as rate
FROM `{self.dataset_address}.tweets` t
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = cast(t.user_id as int64)
JOIN (
SELECT
cast(user_id as INT64) as user_id, count(distinct status_id) as tweet_count
FROM `{self.dataset_address}.tweets` t
WHERE EXTRACT(DATE from created_at) = '{date}'
GROUP BY 1
-- LIMIT 10
) r ON r.user_id = cast(t.user_id as int64)
WHERE EXTRACT(DATE from created_at) = '{date}'
"""
if tweet_min:
sql += f" AND tweet_count >= {int(tweet_min)};"
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_tweeter_statuses_for_model_training(self, date, tweet_min=None, limit=None):
sql = f"""
WITH daily_tweets AS (
SELECT
cast(t.user_id as int64) as user_id
,UPPER(t.user_screen_name) as screen_name
,cast(t.status_id as int64) as status_id
,t.status_text
,t.created_at
FROM `{self.dataset_address}.tweets` t
WHERE extract(date from t.created_at) = '{date}'
)
SELECT DISTINCT
t.status_id ,t.status_text ,t.created_at
,t.user_id ,t.screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,r.tweet_count as rate
,st.status_count as status_text_occurrence
FROM daily_tweets t
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = t.user_id
JOIN (
SELECT
CAST(user_id as INT64) as user_id
,count(distinct status_id) as tweet_count
FROM daily_tweets t
GROUP BY 1
) r ON r.user_id = cast(t.user_id as int64)
LEFT JOIN (
SELECT
t.status_text
,count(distinct t.status_id) as status_count
FROM daily_tweets t
GROUP BY 1
) st ON st.status_text = t.status_text
"""
if tweet_min:
sql += f" AND tweet_count >= {int(tweet_min)};"
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_user_friends(self, date, tweet_min=None, limit=None):
sql = f"""
SELECT dau.user_id, dau.rate, uf.screen_name ,uf.friend_count, uf.friend_names
FROM (
SELECT cast(user_id as INT64) as user_id, count(distinct status_id) as rate
FROM `{self.dataset_address}.tweets` t
WHERE EXTRACT(DATE from t.created_at) = '{date}'
GROUP BY 1
) dau
JOIN `{self.dataset_address}.active_user_friends` uf ON uf.user_id = dau.user_id
"""
if tweet_min:
sql += f" WHERE dau.rate >= {int(tweet_min)};"
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_edge_friends(self, date, tweet_min=2, limit=None):
sql = f"""
WITH dau AS (
SELECT
cast(user_id as INT64) as user_id
,upper(user_screen_name) as screen_name
,count(distinct status_id) as rate
FROM `{self.dataset_address}.tweets`
WHERE EXTRACT(DATE FROM created_at) = '{date}'
GROUP BY 1,2
HAVING count(distinct status_id) >= {int(tweet_min)}
)
SELECT
dau.user_id
,dau.screen_name
,dau.rate
,ARRAY_AGG(DISTINCT uff.friend_name) as friend_names
,count(DISTINCT uff.friend_name) as friend_count
FROM dau
JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = dau.user_id
WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM dau)
GROUP BY 1,2,3
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_edge_friends_for_csv(self, date, tweet_min=2, limit=None):
sql = f"""
WITH dau AS (
SELECT
cast(user_id as INT64) as user_id
,upper(user_screen_name) as screen_name
,count(distinct status_id) as rate
FROM `{self.dataset_address}.tweets`
WHERE EXTRACT(DATE FROM created_at) = '{date}'
GROUP BY 1,2
HAVING count(distinct status_id) >= {int(tweet_min)}
)
SELECT
dau.user_id
,dau.screen_name
,dau.rate
,STRING_AGG(DISTINCT uff.friend_name) as friend_names -- STRING AGG FOR CSV OUTPUT!
,count(DISTINCT uff.friend_name) as friend_count
FROM dau
JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = dau.user_id
WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM dau)
GROUP BY 1,2,3
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_statuses_with_opinion_scores(self, date, limit=None):
sql = f"""
WITH daily_tweets as (
SELECT user_id ,screen_name ,status_id ,status_text ,created_at ,score_lr ,score_nb
FROM `{self.dataset_address}.nlp_v2_predictions_combined` p
WHERE extract(date from created_at) = '{date}'
AND score_lr is not null and score_nb is not null -- there are 30,000 total null lr scores. drop for now
)
SELECT
t.user_id
,t.screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,r.status_count as rate
,t.status_id
,t.status_text
,st.status_count as status_text_occurrences
,t.created_at
,t.score_lr
,t.score_nb
FROM daily_tweets t
JOIN (
SELECT user_id, count(distinct status_id) as status_count
FROM daily_tweets
GROUP BY 1
) r ON r.user_id = t.user_id
LEFT JOIN (
SELECT status_text ,count(distinct status_id) as status_count
FROM daily_tweets
GROUP BY 1
) st ON st.status_text = t.status_text
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = t.user_id
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# DAILY ACTIVE FRIEND GRAPHS V5
#
def fetch_daily_nodes_with_active_edges(self, date, limit=None):
sql = f"""
WITH dau AS (
SELECT
user_id
,screen_name
,count(distinct status_id) as rate
FROM `{self.dataset_address}.nlp_v2_predictions_combined` p
WHERE extract(date from created_at) = '{date}'
AND score_lr is not null and score_nb is not null -- there are 30,000 total null lr scores. drop for now
GROUP BY 1,2
)
SELECT
dau.user_id
,dau.screen_name
,dau.rate
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,STRING_AGG(DISTINCT uff.friend_name) as friend_names -- STRING AGG FOR CSV OUTPUT!
,count(DISTINCT uff.friend_name) as friend_count
FROM dau
JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = dau.user_id
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = dau.user_id
WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM dau)
GROUP BY 1,2,3,4,5
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# ACTIVE FRIEND GRAPHS V6
#
#def migrate_populate_nodes_with_active_edges_v6(self, limit=None):
# sql = f"""
# WITH au AS (
# SELECT
# cast(user_id as int64) as user_id
# ,upper(user_screen_name) as screen_name
# ,count(distinct status_id) as rate
# FROM `{self.dataset_address}.tweets` t
# WHERE created_at BETWEEN '2019-12-20 00:00:00' AND '2020-02-15 23:59:59' -- inclusive (primary collection period)
# GROUP BY 1,2
# )
#
# SELECT
# au.user_id
# ,au.screen_name
# ,au.rate
# ,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
# ,cast(bu.community_id as int64) as community_id
# ,STRING_AGG(DISTINCT uff.friend_name) as friend_names -- STRING AGG FOR CSV OUTPUT!
# ,count(DISTINCT uff.friend_name) as friend_count
# FROM au
# JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = au.user_id
# LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = au.user_id
# WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM au)
# GROUP BY 1,2,3,4,5
# """
# if limit:
# sql += f" LIMIT {int(limit)};"
# return self.execute_query(sql)
def fetch_nodes_with_active_edges_v6(self, limit=None):
sql = f"""
SELECT user_id, screen_name, rate, bot, community_id, friend_names, friend_count
FROM`{self.dataset_address}.nodes_with_active_edges_v6`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_sn_nodes_with_active_edges_v7(self, limit=None):
sql = f"""
SELECT user_id, screen_name, status_count as rate, is_bot as bot, community_id, friend_names, friend_count
FROM`{self.dataset_address}.nodes_with_active_edges_v7_sn`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# BOT ANALYSIS
#
def fetch_statuses_with_tags(self, limit=None):
sql = f"""
SELECT user_id, is_bot, status_id, status_text
FROM`{self.dataset_address}.statuses_with_tags`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_user_details_vq(self, limit=None):
sql = f"""
SELECT
user_id ,creation_date ,screen_name_count, screen_names
,status_count, rt_count
,is_bot ,bot_community
,mean_opinion ,opinion_community
,q_status_count ,q_status_pct
FROM`{self.dataset_address}.user_details_vq`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_tweet_details_v6(self, limit=None):
sql = f"""
SELECT
status_id
,status_created_at
,is_rt ,rt_user_screen_name
,user_id
,screen_names ,screen_name_count
,created_on ,created_jan17 ,created_inaug
,is_bot ,is_q
,opinion_community ,mean_opinion
FROM `{self.dataset_address}.tweet_details_v6_slim`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# API - V0
# ... ALL ENDPOINTS MUST PREVENT SQL INJECTION
def fetch_user_details_api_v0(self, screen_name="politico"):
# TODO: super-charge this with cool stuff, like mention counts, average opinion score, etc.
# TODO: create some temporary tables, to make the query faster
sql = f"""
SELECT
user_id
,user_created_at
,tweet_count
,screen_name_count
,screen_names
,user_names
,user_descriptions
FROM `{self.dataset_address}.user_details_v3`
WHERE UPPER(@screen_name) in UNNEST(SPLIT(screen_names, '|'))
LIMIT 1
"""
job_config = bigquery.QueryJobConfig(query_parameters=[bigquery.ScalarQueryParameter("screen_name", "STRING", screen_name)])
return self.client.query(sql, job_config=job_config)
def fetch_user_tweets_api_v0(self, screen_name="politico"):
# TODO: create some temporary tables maybe, to make the query faster
sql = f"""
SELECT
t.status_id
,t.status_text
,t.created_at
,p.predicted_community_id as opinion_score
FROM `{self.dataset_address}.tweets` t
LEFT JOIN `{self.dataset_address}.2_community_predictions` p ON p.status_id = cast(t.status_id as int64)
WHERE upper(t.user_screen_name) = upper(@screen_name)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("screen_name", "STRING", screen_name)])
return self.client.query(sql, job_config=job_config)
def fetch_users_most_retweeted_api_v0(self, metric=None, limit=None):
"""
Params:
metric : whether to calculate top users based on "retweet_count" or "retweeter_count"
limit : the number of top users to return for each community (max 1,000)
"""
metric = metric or "retweet_count"
limit = limit or 25
sql = f"""
(
SELECT community_id ,retweeted_user_screen_name ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_0_users_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
UNION ALL
(
SELECT community_id ,retweeted_user_screen_name ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_1_users_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[
ScalarQueryParameter("metric", "STRING", metric),
ScalarQueryParameter("limit", "INT64", int(limit)),
])
return self.client.query(sql, job_config=job_config)
def fetch_statuses_most_retweeted_api_v0(self, metric=None, limit=None):
"""
Params:
metric : whether to calculate top statuses based on "retweet_count" or "retweeter_count"
limit : the number of top statuses to return for each community (max 1,000)
"""
metric = metric or "retweet_count"
limit = limit or 25
sql = f"""
(
SELECT community_id ,retweeted_user_screen_name ,status_text ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_0_statuses_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
UNION ALL
(
SELECT community_id ,retweeted_user_screen_name ,status_text ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_1_statuses_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[
ScalarQueryParameter("metric", "STRING", metric),
ScalarQueryParameter("limit", "INT64", int(limit)),
])
return self.client.query(sql, job_config=job_config)
def fetch_top_profile_tokens_api_v0(self, limit=None):
"""
Params: limit : the number of top tokens to return for each community
"""
limit = limit or 20
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_0_profile_tokens`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_1_profile_tokens`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
def fetch_top_profile_tags_api_v0(self, limit=None):
"""
Params: limit : the number of top tags to return for each community
"""
limit = limit or 20
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_0_profile_tags`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_1_profile_tags`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
def fetch_top_status_tokens_api_v0(self, limit=None):
"""
Params: limit : the number of top tokens to return for each community
"""
limit = limit or 50
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_0_status_tokens`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_1_status_tokens`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
def fetch_top_status_tags_api_v0(self, limit=None):
"""
Params: limit : the number of top tokens to return for each community
"""
limit = limit or 50
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_0_status_tags`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_1_status_tags`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
#
# API - V1
# ... ALL ENDPOINTS MUST PREVENT SQL INJECTION
def fetch_user_tweets_api_v1(self, screen_name="politico"):
sql = f"""
SELECT
status_id
,status_text
,created_at
,score_lr
,score_nb
,score_bert
FROM `{self.dataset_address}.nlp_v2_predictions_combined` p
WHERE upper(screen_name) = upper(@screen_name)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("screen_name", "STRING", screen_name)])
return self.client.query(sql, job_config=job_config)
def fetch_users_most_followed_api_v1(self, limit=None):
limit = limit or 500 # max 1000 based on the size of the precomputed table
sql = f"""
SELECT
screen_name --, user_id, user_created_at
,status_count
,follower_count
,avg_score_lr
,avg_score_nb
,avg_score_bert
,user_category as category
FROM `{self.dataset_address}.nlp_v2_predictions_by_user_most_followed`
ORDER BY follower_count DESC
LIMIT @limit
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
if __name__ == "__main__":
service = BigQueryService()
print(f" CLEANUP MODE: {CLEANUP_MODE}")
if CLEANUP_MODE:
service.delete_temp_tables_older_than(days=3)
seek_confirmation()
print("--------------------")
print("FETCHED TOPICS:")
print([row.topic for row in service.fetch_topics()])
sql = f"SELECT count(distinct status_id) as tweet_count FROM `{service.dataset_address}.tweets`"
results = service.execute_query(sql)
print("--------------------")
tweet_count = list(results)[0].tweet_count
print(f"FETCHED {fmt_n(tweet_count)} TWEETS")
print("--------------------")
sql = f"SELECT count(distinct user_id) as user_count FROM `{service.dataset_address}.tweets`"
results = service.execute_query(sql)
user_count = list(results)[0].user_count
print(f"FETCHED {fmt_n(user_count)} USERS")
results = service.user_friend_collection_progress()
row = list(results)[0]
collected_count = row.user_count
pct = collected_count / user_count
#print("--------------------")
#print("USERS COLLECTED:", collected_count)
#print(" PCT COLLECTED:", f"{(pct * 100):.1f}%")
#print(" AVG DURATION:", row.avg_duration)
if collected_count > 0:
print("--------------------")
print(f"USERS WITH FRIENDS: {row.pct_friendly * 100}%")
print(" AVG FRIENDS:", round(row.avg_friends_friendly))
#print(" AVG DURATION:", row.avg_duration_friendly)
| 72,536 | 38.040366 | 161 | py |
tweet-analysis-2020 | tweet-analysis-2020-main/app/toxicity/model_manager.py |
#
# adapted from: https://github.com/unitaryai/detoxify/blob/master/detoxify/detoxify.py
#
# using the pre-trained toxicity models provided via Detoxify checkpoints, but...
# 1) let's try different / lighter torch requirement approaches (to enable installation on heroku) - see requirements.txt file
# 2) let's also try to return the raw scores (to save processing time)
#
# references:
# https://github.com/unitaryai/detoxify/blob/master/detoxify/detoxify.py
# https://pytorch.org/docs/stable/hub.html
# https://pytorch.org/docs/stable/hub.html#torch.hub.load_state_dict_from_url
# https://pytorch.org/docs/stable/generated/torch.no_grad.html
#
import os
from pprint import pprint
from functools import lru_cache
from dotenv import load_dotenv
import torch
import transformers
from pandas import DataFrame
load_dotenv()
CHECKPOINT_NAME = os.getenv("CHECKPOINT_NAME", default="original") # "original" or "unbiased" (see README)
CHECKPOINT_URLS = {
"original": "https://github.com/unitaryai/detoxify/releases/download/v0.1-alpha/toxic_original-c1212f89.ckpt",
"unbiased": "https://github.com/unitaryai/detoxify/releases/download/v0.1-alpha/toxic_bias-4e693588.ckpt",
#"multilingual": "https://github.com/unitaryai/detoxify/releases/download/v0.1-alpha/toxic_multilingual-bbddc277.ckpt",
#"original-small": "https://github.com/unitaryai/detoxify/releases/download/v0.1.2/original-albert-0e1d6498.ckpt",
#"unbiased-small": "https://github.com/unitaryai/detoxify/releases/download/v0.1.2/unbiased-albert-c8519128.ckpt"
}
class ModelManager:
def __init__(self, checkpoint_name=None):
self.checkpoint_name = checkpoint_name or CHECKPOINT_NAME
self.checkpoint_url = CHECKPOINT_URLS[self.checkpoint_name]
self.model_state = None
self.state_dict = None
self.config = None
self.tokenizer_name = None
self.model_name = None
self.model_type = None
self.num_classes = None
self.class_names = None
def load_model_state(self):
"""Loads pre-trained model from saved checkpoint metadata."""
if not self.model_state:
print("---------------------------")
print("LOADING MODEL STATE...")
# see: https://pytorch.org/docs/stable/hub.html#torch.hub.load_state_dict_from_url
self.model_state = torch.hub.load_state_dict_from_url(self.checkpoint_url, map_location="cpu")
self.state_dict = self.model_state["state_dict"]
self.config = self.model_state["config"]
self.tokenizer_name = self.config["arch"]["args"]["tokenizer_name"] #> BertTokenizer
self.model_name = self.config["arch"]["args"]["model_name"] #> BertForSequenceClassification
self.model_type = self.config["arch"]["args"]["model_type"] #> bert-base-uncased
self.num_classes = self.config["arch"]["args"]["num_classes"] #> 6
self.class_names = self.config["dataset"]["args"]["classes"] #> ['toxicity', 'severe_toxicity', 'obscene', 'threat', 'insult', 'identity_hate']
print("---------------------------")
print("MODEL TYPE:", self.model_type)
print("MODEL NAME:", self.model_name)
print("TOKENIZER NAME:", self.tokenizer_name)
print(f"CLASS NAMES ({self.num_classes}):", self.class_names)
@property
@lru_cache(maxsize=None)
def model(self):
if not self.model_state and self.model_name and self.model_type and self.num_classes and self.state_dict:
self.load_model_state()
# see: https://huggingface.co/transformers/main_classes/model.html#transformers.PreTrainedModel.from_pretrained
return getattr(transformers, self.model_name).from_pretrained(
pretrained_model_name_or_path=None,
config=self.model_type,
num_labels=self.num_classes,
state_dict=self.state_dict,
_fast_init=False
)
@property
@lru_cache(maxsize=None)
def tokenizer(self):
if not self.model_state and self.tokenizer_name and self.model_type:
self.load_model_state()
return getattr(transformers, self.tokenizer_name).from_pretrained(self.model_type)
@torch.no_grad()
def predict_scores(self, texts):
"""Returns the raw scores, without formatting (for those desiring a faster experience)."""
self.model.eval()
inputs = self.tokenizer(texts, return_tensors="pt", truncation=True, padding=True).to(self.model.device)
out = self.model(**inputs)[0]
scores = torch.sigmoid(out).cpu().detach().numpy()
return scores
def predict_records(self, texts):
"""Optional, if you want the scores returned as a list of dict, with the texts in there as well."""
records = []
for i, score_row in enumerate(self.predict_scores(texts)):
record = {}
record["text"] = texts[i]
for class_index, class_name in enumerate(self.class_names):
record[class_name] = float(score_row[class_index])
records.append(record)
return records
def predict_df(self, texts):
"""Optional, if you want the scores returned as a dataframe."""
return DataFrame(self.predict_records(texts))
if __name__ == '__main__':
texts = [
"RT @realDonaldTrump: Crazy Nancy Pelosi should spend more time in her decaying city and less time on the Impeachment Hoax! https://t.co/eno…",
"RT @SpeakerPelosi: The House cannot choose our impeachment managers until we know what sort of trial the Senate will conduct. President Tr…",
]
mgr = ModelManager()
mgr.load_model_state()
print("------------")
print("MODEL:", type(mgr.model))
print("TOKENIZER:", type(mgr.tokenizer))
scores = mgr.predict_scores(texts)
print("------------")
print("SCORES:", type(scores), scores.shape)
print(scores[0])
records = mgr.predict_records(texts)
print("------------")
print("RECORDS:", type(records), len(records))
print(records[0])
| 6,115 | 39.773333 | 155 | py |
FBNETGEN | FBNETGEN-main/main.py | from pathlib import Path
import argparse
import yaml
import torch
from model import FBNETGEN, GNNPredictor, SeqenceModel, BrainNetCNN
from train import BasicTrain, BiLevelTrain, SeqTrain, GNNTrain, BrainCNNTrain
from datetime import datetime
from dataloader import init_dataloader
def main(args):
with open(args.config_filename) as f:
config = yaml.load(f, Loader=yaml.Loader)
dataloaders, node_size, node_feature_size, timeseries_size = \
init_dataloader(config['data'])
config['train']["seq_len"] = timeseries_size
config['train']["node_size"] = node_size
if config['model']['type'] == 'seq':
model = SeqenceModel(config['model'], node_size, timeseries_size)
use_train = SeqTrain
elif config['model']['type'] == 'gnn':
model = GNNPredictor(node_feature_size, node_size)
use_train = GNNTrain
elif config['model']['type'] == 'fbnetgen':
model = FBNETGEN(config['model'], node_size,
node_feature_size, timeseries_size)
use_train = BasicTrain
elif config['model']['type'] == 'brainnetcnn':
model = BrainNetCNN(node_size)
use_train = BrainCNNTrain
if config['train']['method'] == 'bilevel' and \
config['model']['type'] == 'fbnetgen':
parameters = {
'lr': config['train']['lr'],
'weight_decay': config['train']['weight_decay'],
'params': [
{'params': model.extract.parameters()},
{'params': model.emb2graph.parameters()}
]
}
optimizer1 = torch.optim.Adam(**parameters)
optimizer2 = torch.optim.Adam(model.predictor.parameters(),
lr=config['train']['lr'],
weight_decay=config['train']['weight_decay'])
opts = (optimizer1, optimizer2)
use_train = BiLevelTrain
else:
optimizer = torch.optim.Adam(
model.parameters(), lr=config['train']['lr'],
weight_decay=config['train']['weight_decay'])
opts = (optimizer,)
loss_name = 'loss'
if config['train']["group_loss"]:
loss_name = f"{loss_name}_group_loss"
if config['train']["sparsity_loss"]:
loss_name = f"{loss_name}_sparsity_loss"
now = datetime.now()
date_time = now.strftime("%m-%d-%H-%M-%S")
extractor_type = config['model']['extractor_type'] if 'extractor_type' in config['model'] else "none"
embedding_size = config['model']['embedding_size'] if 'embedding_size' in config['model'] else "none"
window_size = config['model']['window_size'] if 'window_size' in config['model'] else "none"
save_folder_name = Path(config['train']['log_folder'])/Path(
date_time +
f"_{config['data']['dataset']}_{config['model']['type']}_{config['train']['method']}"
+ f"_{extractor_type}_{loss_name}_{embedding_size}_{window_size}")
train_process = use_train(
config['train'], model, opts, dataloaders, save_folder_name)
train_process.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default='setting/pnc.yaml', type=str,
help='Configuration filename for training the model.')
parser.add_argument('--repeat_time', default=5, type=int)
args = parser.parse_args()
for i in range(args.repeat_time):
main(args)
| 3,687 | 35.514851 | 109 | py |
FBNETGEN | FBNETGEN-main/dataloader.py |
import numpy as np
import torch
import torch.utils.data as utils
from sklearn import preprocessing
import pandas as pd
from scipy.io import loadmat
import pathlib
class StandardScaler:
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def infer_dataloader(dataset_config):
label_df = pd.read_csv(dataset_config["label"])
if dataset_config["dataset"] == "PNC":
fc_data = np.load(dataset_config["time_seires"], allow_pickle=True).item()
fc_timeseires = fc_data['data'].transpose((0, 2, 1))
fc_id = fc_data['id']
id2gender = dict(zip(label_df['SUBJID'], label_df['sex']))
final_fc, final_label = [], []
for fc, l in zip(fc_timeseires, fc_id):
if l in id2gender:
final_fc.append(fc)
final_label.append(id2gender[l])
final_fc = np.array(final_fc)
elif dataset_config["dataset"] == 'ABCD':
fc_data = np.load(dataset_config["time_seires"], allow_pickle=True)
_, node_size, timeseries = final_fc.shape
encoder = preprocessing.LabelEncoder()
encoder.fit(label_df["sex"])
labels = encoder.transform(final_label)
final_fc = torch.from_numpy(final_fc).float()
return final_fc, labels, node_size, timeseries
def init_dataloader(dataset_config):
if dataset_config["dataset"] == 'ABIDE':
data = np.load(dataset_config["time_seires"], allow_pickle=True).item()
final_fc = data["timeseires"]
final_pearson = data["corr"]
labels = data["label"]
elif dataset_config["dataset"] == "HIV" or dataset_config["dataset"] == "BP":
data = loadmat(dataset_config["node_feature"])
labels = data['label']
labels = labels.reshape(labels.shape[0])
labels[labels==-1] = 0
view = dataset_config["view"]
final_pearson = data[view]
final_pearson = np.array(final_pearson).transpose(2, 0, 1)
final_fc = np.ones((final_pearson.shape[0],1,1))
elif dataset_config["dataset"] == 'PPMI' or dataset_config["dataset"] == 'PPMI_balanced':
m = loadmat(dataset_config["node_feature"])
labels = m['label'] if dataset_config["dataset"] != 'PPMI_balanced' else m['label_new']
labels = labels.reshape(labels.shape[0])
data = m['X'] if dataset_config["dataset"] == 'PPMI' else m['X_new']
final_pearson = np.zeros((data.shape[0], 84, 84))
modal_index = 0
for (index, sample) in enumerate(data):
# Assign the first view in the three views of PPMI to a1
final_pearson[index, :, :] = sample[0][:, :, modal_index]
final_fc = np.ones((final_pearson.shape[0],1,1))
else:
fc_data = np.load(dataset_config["time_seires"], allow_pickle=True)
pearson_data = np.load(dataset_config["node_feature"], allow_pickle=True)
label_df = pd.read_csv(dataset_config["label"])
if dataset_config["dataset"] == 'ABCD':
with open(dataset_config["node_id"], 'r') as f:
lines = f.readlines()
pearson_id = [line[:-1] for line in lines]
with open(dataset_config["seires_id"], 'r') as f:
lines = f.readlines()
fc_id = [line[:-1] for line in lines]
id2pearson = dict(zip(pearson_id, pearson_data))
id2gender = dict(zip(label_df['id'], label_df['sex']))
final_fc, final_label, final_pearson = [], [], []
for fc, l in zip(fc_data, fc_id):
if l in id2gender and l in id2pearson:
if np.any(np.isnan(id2pearson[l])) == False:
final_fc.append(fc)
final_label.append(id2gender[l])
final_pearson.append(id2pearson[l])
final_pearson = np.array(final_pearson)
final_fc = np.array(final_fc)
elif dataset_config["dataset"] == "PNC":
pearson_data, fc_data = pearson_data.item(), fc_data.item()
pearson_id = pearson_data['id']
pearson_data = pearson_data['data']
id2pearson = dict(zip(pearson_id, pearson_data))
fc_id = fc_data['id']
fc_data = fc_data['data']
id2gender = dict(zip(label_df['SUBJID'], label_df['sex']))
final_fc, final_label, final_pearson = [], [], []
for fc, l in zip(fc_data, fc_id):
if l in id2gender and l in id2pearson:
final_fc.append(fc)
final_label.append(id2gender[l])
final_pearson.append(id2pearson[l])
final_pearson = np.array(final_pearson)
final_fc = np.array(final_fc).transpose(0, 2, 1)
_, _, timeseries = final_fc.shape
_, node_size, node_feature_size = final_pearson.shape
scaler = StandardScaler(mean=np.mean(
final_fc), std=np.std(final_fc))
final_fc = scaler.transform(final_fc)
if dataset_config["dataset"] == 'PNC' or dataset_config["dataset"] == 'ABCD':
encoder = preprocessing.LabelEncoder()
encoder.fit(label_df["sex"])
labels = encoder.transform(final_label)
final_fc, final_pearson, labels = [torch.from_numpy(
data).float() for data in (final_fc, final_pearson, labels)]
length = final_fc.shape[0]
train_length = int(length*dataset_config["train_set"])
val_length = int(length*dataset_config["val_set"])
dataset = utils.TensorDataset(
final_fc,
final_pearson,
labels
)
train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(
dataset, [train_length, val_length, length-train_length-val_length])
train_dataloader = utils.DataLoader(
train_dataset, batch_size=dataset_config["batch_size"], shuffle=True, drop_last=False)
val_dataloader = utils.DataLoader(
val_dataset, batch_size=dataset_config["batch_size"], shuffle=True, drop_last=False)
test_dataloader = utils.DataLoader(
test_dataset, batch_size=dataset_config["batch_size"], shuffle=True, drop_last=False)
return (train_dataloader, val_dataloader, test_dataloader), node_size, node_feature_size, timeseries
| 6,468 | 30.556098 | 104 | py |
FBNETGEN | FBNETGEN-main/train.py | from typing import overload
import torch
from numpy.lib import save
from util import Logger, accuracy, TotalMeter
import numpy as np
from pathlib import Path
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_fscore_support
from util.prepossess import mixup_criterion, mixup_data
from util.loss import mixup_cluster_loss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class BasicTrain:
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
self.logger = Logger()
self.model = model.to(device)
self.train_dataloader, self.val_dataloader, self.test_dataloader = dataloaders
self.epochs = train_config['epochs']
self.optimizers = optimizers
self.loss_fn = torch.nn.CrossEntropyLoss(reduction='mean')
self.group_loss = train_config['group_loss']
self.sparsity_loss = train_config['sparsity_loss']
self.sparsity_loss_weight = train_config['sparsity_loss_weight']
self.save_path = log_folder
self.save_learnable_graph = True
self.init_meters()
def init_meters(self):
self.train_loss, self.val_loss, self.test_loss, self.train_accuracy,\
self.val_accuracy, self.test_accuracy, self.edges_num = [
TotalMeter() for _ in range(7)]
self.loss1, self.loss2, self.loss3 = [TotalMeter() for _ in range(3)]
def reset_meters(self):
for meter in [self.train_accuracy, self.val_accuracy, self.test_accuracy,
self.train_loss, self.val_loss, self.test_loss, self.edges_num,
self.loss1, self.loss2, self.loss3]:
meter.reset()
def train_per_epoch(self, optimizer):
self.model.train()
for data_in, pearson, label in self.train_dataloader:
label = label.long()
data_in, pearson, label = data_in.to(
device), pearson.to(device), label.to(device)
inputs, nodes, targets_a, targets_b, lam = mixup_data(
data_in, pearson, label, 1, device)
output, learnable_matrix, edge_variance = self.model(inputs, nodes)
loss = 2 * mixup_criterion(
self.loss_fn, output, targets_a, targets_b, lam)
if self.group_loss:
loss += mixup_cluster_loss(learnable_matrix,
targets_a, targets_b, lam)
if self.sparsity_loss:
sparsity_loss = self.sparsity_loss_weight * \
torch.norm(learnable_matrix, p=1)
loss += sparsity_loss
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1 = accuracy(output, label)[0]
self.train_accuracy.update_with_weight(top1, label.shape[0])
self.edges_num.update_with_weight(edge_variance, label.shape[0])
def test_per_epoch(self, dataloader, loss_meter, acc_meter):
labels = []
result = []
self.model.eval()
for data_in, pearson, label in dataloader:
label = label.long()
data_in, pearson, label = data_in.to(
device), pearson.to(device), label.to(device)
output, _, _ = self.model(data_in, pearson)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
top1 = accuracy(output, label)[0]
acc_meter.update_with_weight(top1, label.shape[0])
result += F.softmax(output, dim=1)[:, 1].tolist()
labels += label.tolist()
auc = roc_auc_score(labels, result)
result = np.array(result)
result[result > 0.5] = 1
result[result <= 0.5] = 0
metric = precision_recall_fscore_support(
labels, result, average='micro')
return [auc] + list(metric)
def generate_save_learnable_matrix(self):
learable_matrixs = []
labels = []
for data_in, nodes, label in self.test_dataloader:
label = label.long()
data_in, nodes, label = data_in.to(
device), nodes.to(device), label.to(device)
_, learable_matrix, _ = self.model(data_in, nodes)
learable_matrixs.append(learable_matrix.cpu().detach().numpy())
labels += label.tolist()
self.save_path.mkdir(exist_ok=True, parents=True)
np.save(self.save_path/"learnable_matrix.npy", {'matrix': np.vstack(
learable_matrixs), "label": np.array(labels)}, allow_pickle=True)
def save_result(self, results):
self.save_path.mkdir(exist_ok=True, parents=True)
np.save(self.save_path/"training_process.npy",
results, allow_pickle=True)
torch.save(self.model.state_dict(), self.save_path/"model.pt")
def train(self):
training_process = []
for epoch in range(self.epochs):
self.reset_meters()
self.train_per_epoch(self.optimizers[0])
val_result = self.test_per_epoch(self.val_dataloader,
self.val_loss, self.val_accuracy)
test_result = self.test_per_epoch(self.test_dataloader,
self.test_loss, self.test_accuracy)
self.logger.info(" | ".join([
f'Epoch[{epoch}/{self.epochs}]',
f'Train Loss:{self.train_loss.avg: .3f}',
f'Train Accuracy:{self.train_accuracy.avg: .3f}%',
f'Edges:{self.edges_num.avg: .3f}',
f'Test Loss:{self.test_loss.avg: .3f}',
f'Test Accuracy:{self.test_accuracy.avg: .3f}%',
f'Val AUC:{val_result[0]:.2f}',
f'Test AUC:{test_result[0]:.2f}'
]))
training_process.append([self.train_accuracy.avg, self.train_loss.avg,
self.val_loss.avg, self.test_loss.avg]
+ val_result + test_result)
if self.save_learnable_graph:
self.generate_save_learnable_matrix()
self.save_result(training_process)
class BiLevelTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
def train(self):
training_process = []
matrix_epoch = 5
for epoch in range(self.epochs):
self.reset_meters()
if epoch % 10 < matrix_epoch:
self.train_per_epoch(self.optimizers[0])
else:
self.train_per_epoch(self.optimizers[1])
val_result = self.test_per_epoch(self.val_dataloader,
self.val_loss, self.val_accuracy)
test_result = self.test_per_epoch(self.test_dataloader,
self.test_loss, self.test_accuracy)
self.logger.info(" | ".join([
f'Epoch[{epoch}/{self.epochs}]',
f'Train Loss:{self.train_loss.avg: .3f}',
f'Train Accuracy:{self.train_accuracy.avg: .3f}%',
f'Edges:{self.edges_num.avg: .3f}',
f'Test Loss:{self.test_loss.avg: .3f}',
f'Test Accuracy:{self.test_accuracy.avg: .3f}%',
f'Val AUC:{val_result[0]:.2f}',
f'Test AUC:{test_result[0]:.2f}'
]))
training_process.append([self.train_accuracy.avg, self.train_loss.avg,
self.val_loss.avg, self.test_loss.avg]
+ val_result + test_result)
if self.save_learnable_graph:
self.generate_save_learnable_matrix()
self.save_result(training_process)
class GNNTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
self.pure_gnn_graph = train_config['pure_gnn_graph']
self.save_learnable_graph = False
def train_per_epoch(self, optimizer):
self.model.train()
for _, pearson, label in self.train_dataloader:
label = label.long()
pearson, label = pearson.to(device), label.to(device)
bz, module_num, _ = pearson.shape
if self.pure_gnn_graph == "uniform":
graph = torch.ones(
(bz, module_num, module_num)).float().to(device)
elif self.pure_gnn_graph == "pearson":
graph = torch.abs(pearson)
graph, nodes, targets_a, targets_b, lam = mixup_data(
graph, pearson, label, 1, device)
output = self.model(graph, nodes)
loss = mixup_criterion(
self.loss_fn, output, targets_a, targets_b, lam)
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1 = accuracy(output, label)[0]
self.train_accuracy.update_with_weight(top1, label.shape[0])
def test_per_epoch(self, dataloader, loss_meter, acc_meter):
labels = []
result = []
self.model.eval()
for _, pearson, label in dataloader:
label = label.long()
pearson, label = pearson.to(device), label.to(device)
bz, module_num, _ = pearson.shape
if self.pure_gnn_graph == "uniform":
graph = torch.ones(
(bz, module_num, module_num)).float().to(device)
elif self.pure_gnn_graph == "pearson":
graph = torch.abs(pearson)
output = self.model(graph, pearson)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
top1 = accuracy(output, label)[0]
acc_meter.update_with_weight(top1, label.shape[0])
result += F.softmax(output, dim=1)[:, 1].tolist()
labels += label.tolist()
auc = roc_auc_score(labels, result)
result = np.array(result)
result[result > 0.5] = 1
result[result <= 0.5] = 0
metric = precision_recall_fscore_support(
labels, result, average='micro')
return [auc] + list(metric)
class SeqTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
self.save_learnable_graph = False
def train_per_epoch(self, optimizer):
self.model.train()
for seq_group, _, label in self.train_dataloader:
label = label.long()
seq_group, label = seq_group.to(device), label.to(device)
seq_group, _, targets_a, targets_b, lam = mixup_data(
seq_group, seq_group, label, 1, device)
output = self.model(seq_group)
loss = mixup_criterion(
self.loss_fn, output, targets_a, targets_b, lam)
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1 = accuracy(output, label)[0]
self.train_accuracy.update_with_weight(top1, label.shape[0])
def test_per_epoch(self, dataloader, loss_meter, acc_meter):
labels = []
result = []
self.model.eval()
for seq_group, _, label in dataloader:
label = label.long()
seq_group, label = seq_group.to(device), label.to(device)
output = self.model(seq_group)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
top1 = accuracy(output, label)[0]
acc_meter.update_with_weight(top1, label.shape[0])
result += F.softmax(output, dim=1)[:, 1].tolist()
labels += label.tolist()
auc = roc_auc_score(labels, result)
result = np.array(result)
result[result > 0.5] = 1
result[result <= 0.5] = 0
metric = precision_recall_fscore_support(
labels, result, average='micro')
return [auc] + list(metric)
class BrainCNNTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder) -> None:
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
self.save_learnable_graph = False
def train_per_epoch(self, optimizer):
self.model.train()
for _, pearson, label in self.train_dataloader:
label = label.long()
pearson, label = pearson.to(device), label.to(device)
_, nodes, targets_a, targets_b, lam = mixup_data(
pearson, pearson, label, 1, device)
output = self.model(nodes)
loss = mixup_criterion(
self.loss_fn, output, targets_a, targets_b, lam)
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1 = accuracy(output, label)[0]
self.train_accuracy.update_with_weight(top1, label.shape[0])
def test_per_epoch(self, dataloader, loss_meter, acc_meter):
labels = []
result = []
self.model.eval()
for _, pearson, label in dataloader:
label = label.long()
pearson, label = pearson.to(device), label.to(device)
output = self.model(pearson)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
top1 = accuracy(output, label)[0]
acc_meter.update_with_weight(top1, label.shape[0])
result += F.softmax(output, dim=1)[:, 1].tolist()
labels += label.tolist()
auc = roc_auc_score(labels, result)
result = np.array(result)
result[result > 0.5] = 1
result[result <= 0.5] = 0
metric = precision_recall_fscore_support(
labels, result, average='micro')
return [auc] + list(metric)
class FCNetTrain(BasicTrain):
def __init__(self, train_config, model, optimizers, dataloaders, log_folder):
super().__init__(train_config, model, optimizers, dataloaders, log_folder)
self.generated_graph = []
def train_per_epoch(self, optimizer):
self.model.train()
for seq_group, label in self.train_dataloader:
label = label.long()
seq_group, label = seq_group.to(device), label.to(device)
output = self.model(seq_group)
loss = self.loss_fn(output, label)
self.train_loss.update_with_weight(loss.item(), label.shape[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_per_epoch(self, dataloader, loss_meter, acc_meter, save_graph=False):
self.model.eval()
self.generated_graph = []
for seq_group, label in dataloader:
label = label.long()
seq_group, label = seq_group.to(device), label.to(device)
output = self.model(seq_group)
loss = self.loss_fn(output, label)
loss_meter.update_with_weight(
loss.item(), label.shape[0])
return None
def train(self):
training_process = []
for epoch in range(self.epochs):
self.reset_meters()
self.train_per_epoch(self.optimizers[0])
self.test_per_epoch(self.val_dataloader,
self.val_loss, self.val_accuracy)
self.test_per_epoch(self.test_dataloader,
self.test_loss, self.test_accuracy, save_graph=True)
self.logger.info(" | ".join([
f'Epoch[{epoch}/{self.epochs}]',
f'Train Loss:{self.train_loss.avg: .3f}',
f'Train Accuracy:{self.train_accuracy.avg: .3f}%',
f'Edges:{self.edges_num.avg: .3f}',
f'Test Loss:{self.test_loss.avg: .3f}',
f'Test Accuracy:{self.test_accuracy.avg: .3f}%'
]))
training_process.append([self.train_accuracy.avg, self.train_loss.avg,
self.val_loss.avg, self.test_loss.avg])
self.save_result(training_process)
| 16,952 | 34.690526 | 98 | py |
FBNETGEN | FBNETGEN-main/util/prepossess.py | import torch
import numpy as np
import random
def mixup_data(x, nodes, y, alpha=1.0, device='cuda'):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).to(device)
mixed_nodes = lam * nodes + (1 - lam) * nodes[index, :]
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, mixed_nodes, y_a, y_b, lam
def mixup_data_by_class(x, nodes, y, alpha=1.0, device='cuda'):
'''Returns mixed inputs, pairs of targets, and lambda'''
mix_xs, mix_nodes, mix_ys = [], [], []
for t_y in y.unique():
idx = y == t_y
t_mixed_x, t_mixed_nodes, _, _, _ = mixup_data(
x[idx], nodes[idx], y[idx], alpha=alpha, device=device)
mix_xs.append(t_mixed_x)
mix_nodes.append(t_mixed_nodes)
mix_ys.append(y[idx])
return torch.cat(mix_xs, dim=0), torch.cat(mix_nodes, dim=0), torch.cat(mix_ys, dim=0)
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def cal_step_connect(connectity, step):
multi_step = connectity
for _ in range(step):
multi_step = np.dot(multi_step, connectity)
multi_step[multi_step > 0] = 1
return multi_step
def obtain_partition(dataloader, fc_threshold, step=2):
pearsons = []
for data_in, pearson, label in dataloader:
pearsons.append(pearson)
fc_data = torch.mean(torch.cat(pearsons), dim=0)
fc_data[fc_data > fc_threshold] = 1
fc_data[fc_data <= fc_threshold] = 0
_, n = fc_data.shape
final_partition = torch.zeros((n, (n-1)*n//2))
connection = cal_step_connect(fc_data, step)
temp = 0
for i in range(connection.shape[0]):
temp += i
for j in range(i):
if connection[i, j] > 0:
final_partition[i, temp-i+j] = 1
final_partition[j, temp-i+j] = 1
# a = random.randint(0, n-1)
# b = random.randint(0, n-1)
# final_partition[a, temp-i+j] = 1
# final_partition[b, temp-i+j] = 1
connect_num = torch.sum(final_partition > 0)/n
print(f'Final Partition {connect_num}')
return final_partition.cuda().float(), connect_num
| 2,388 | 27.783133 | 90 | py |
FBNETGEN | FBNETGEN-main/util/loss.py | import torch
def inner_loss(label, matrixs):
loss = 0
if torch.sum(label == 0) > 1:
loss += torch.mean(torch.var(matrixs[label == 0], dim=0))
if torch.sum(label == 1) > 1:
loss += torch.mean(torch.var(matrixs[label == 1], dim=0))
return loss
def intra_loss(label, matrixs):
a, b = None, None
if torch.sum(label == 0) > 0:
a = torch.mean(matrixs[label == 0], dim=0)
if torch.sum(label == 1) > 0:
b = torch.mean(matrixs[label == 1], dim=0)
if a is not None and b is not None:
return 1 - torch.mean(torch.pow(a-b, 2))
else:
return 0
def mixup_cluster_loss(matrixs, y_a, y_b, lam, intra_weight=2):
y_1 = lam * y_a.float() + (1 - lam) * y_b.float()
y_0 = 1 - y_1
bz, roi_num, _ = matrixs.shape
matrixs = matrixs.reshape((bz, -1))
sum_1 = torch.sum(y_1)
sum_0 = torch.sum(y_0)
loss = 0.0
if sum_0 > 0:
center_0 = torch.matmul(y_0, matrixs)/sum_0
diff_0 = torch.norm(matrixs-center_0, p=1, dim=1)
loss += torch.matmul(y_0, diff_0)/(sum_0*roi_num*roi_num)
if sum_1 > 0:
center_1 = torch.matmul(y_1, matrixs)/sum_1
diff_1 = torch.norm(matrixs-center_1, p=1, dim=1)
loss += torch.matmul(y_1, diff_1)/(sum_1*roi_num*roi_num)
if sum_0 > 0 and sum_1 > 0:
loss += intra_weight * \
(1 - torch.norm(center_0-center_1, p=1)/(roi_num*roi_num))
return loss
| 1,451 | 24.928571 | 70 | py |
FBNETGEN | FBNETGEN-main/util/meter.py | from typing import List
import torch
def accuracy(output: torch.Tensor, target: torch.Tensor, top_k=(1,)) -> List[float]:
max_k = max(top_k)
batch_size = target.size(0)
_, predict = output.topk(max_k, 1, True, True)
predict = predict.t()
correct = predict.eq(target.view(1, -1).expand_as(predict))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size).item())
return res
class AverageMeter:
def __init__(self, length: int, name: str = None):
assert length > 0
self.name = name
self.count = 0
self.sum = 0.0
self.current: int = -1
self.history: List[float] = [None] * length
@property
def val(self) -> float:
return self.history[self.current]
@property
def avg(self) -> float:
return self.sum / self.count
def update(self, val: float):
self.current = (self.current + 1) % len(self.history)
self.sum += val
old = self.history[self.current]
if old is None:
self.count += 1
else:
self.sum -= old
self.history[self.current] = val
class TotalMeter:
def __init__(self):
self.sum = 0.0
self.count = 0
def update(self, val: float):
self.sum += val
self.count += 1
def update_with_weight(self, val: float, count: int):
self.sum += val*count
self.count += count
def reset(self):
self.sum = 0
self.count = 0
@property
def avg(self):
if self.count == 0:
return -1
return self.sum / self.count
| 1,699 | 22.943662 | 84 | py |
FBNETGEN | FBNETGEN-main/util/FCNet/infer.py | import torch
import argparse
import yaml
from model import SeqenceModel, FCNet
from dataloader import infer_dataloader
from pathlib import Path
import numpy as np
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
def main(args):
with open(args.config_filename) as f:
config = yaml.load(f, Loader=yaml.Loader)
if config['model']['type'] == 'FCNet':
dataset, labels, node_size, timeseries_size = \
infer_dataloader(config['data'])
xs, ys = torch.tril_indices(node_size, node_size, offset=-1)
config['train']["seq_len"] = timeseries_size
config['train']["node_size"] = node_size
if config['model']['type'] == 'seq':
model = SeqenceModel(config['model'], node_size, timeseries_size)
elif config['model']['type'] == 'FCNet':
model = FCNet(node_size, timeseries_size)
model.load_state_dict(torch.load(Path(args.model_path)/'model.pt'))
model.cuda()
model.eval()
features = []
interval = 1000
for d in dataset:
outputs = []
for index in range(0, xs.shape[0], interval):
data = []
for x, y in zip(xs[index: index+interval], ys[index: index+interval]):
data.append(d[[x,y],:])
data = torch.stack(data, dim=0).cuda()
output = model(data)
outputs.append(output[:, 1].detach().cpu().numpy())
outputs = np.concatenate(outputs)
features.append(outputs)
features = np.array(features)
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.33, random_state=42)
linearmodel = ElasticNet(alpha=1.0, l1_ratio=0.2, fit_intercept=False).fit(X_train, y_train)
select_feature = linearmodel.coef_!=0
print('Used feature number: ', np.sum(select_feature))
X_train = X_train[:, select_feature]
X_test = X_test[:, select_feature]
svm = SVC(gamma='auto', probability=True).fit(X_train, y_train)
print("acc", svm.score(X_test, y_test))
prob_result = svm.predict_proba(X_test)
auc = roc_auc_score(y_test, prob_result[:, 1])
print("auc", auc)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', default='result/02-07-15-29-00_PNC_FCNet_normal_none_loss_none_none', type=str,
help='The path of the folder containing the model.')
parser.add_argument('--config_filename', default='setting/pnc.yaml', type=str,
help='Configuration filename for training the model.')
args = parser.parse_args()
main(args)
| 2,775 | 22.726496 | 119 | py |
FBNETGEN | FBNETGEN-main/model/GSL.py | import torch
import torch.nn as nn
from torch.nn import functional as F
from model.cell import DCGRUCell
import numpy as np
from .model import GNNPredictor, ConvKRegion, Embed2GraphByLinear, GruKRegion, Embed2GraphByProduct
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def cosine_similarity_torch(x1, x2=None, eps=1e-8):
x2 = x1 if x2 is None else x2
w1 = x1.norm(p=2, dim=1, keepdim=True)
w2 = w1 if x2 is x1 else x2.norm(p=2, dim=1, keepdim=True)
return torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps)
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).to(device)
return -torch.autograd.Variable(torch.log(-torch.log(U + eps) + eps))
def gumbel_softmax_sample(logits, temperature, eps=1e-10):
sample = sample_gumbel(logits.size(), eps=eps)
y = logits + sample
return F.softmax(y / temperature, dim=-1)
def gumbel_softmax(logits, temperature, hard=False, eps=1e-10):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y_soft = gumbel_softmax_sample(logits, temperature=temperature, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
y_hard = torch.zeros(*shape).to(device)
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
y = torch.autograd.Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
class GCNPredictor(nn.Module):
def __init__(self, node_input_dim, roi_num=360):
super().__init__()
inner_dim = roi_num
self.roi_num = roi_num
self.project1 = nn.Sequential(
nn.Linear(node_input_dim, inner_dim),
nn.BatchNorm1d(roi_num),
nn.Dropout(p=0.4),
nn.LeakyReLU(negative_slope=0.33)
)
self.project2 = nn.Sequential(
nn.Linear(inner_dim, inner_dim),
nn.BatchNorm1d(roi_num),
nn.Dropout(p=0.4),
nn.LeakyReLU(negative_slope=0.33)
)
self.project3 = nn.Sequential(
nn.Linear(inner_dim, inner_dim),
nn.BatchNorm1d(roi_num),
nn.Dropout(p=0.4),
nn.LeakyReLU(negative_slope=0.33)
)
self.fcn = nn.Sequential(
nn.Linear(inner_dim, 32),
nn.LeakyReLU(negative_slope=0.33),
nn.Linear(32, 2)
)
def normalize(self, m):
left = torch.sum(m, dim=2, keepdim=True)
right = torch.sum(m, dim=1, keepdim=True)
normalize = 1.0/torch.sqrt(torch.bmm(left, right))
normalize[torch.isinf(normalize)] = 0
return torch.mul(m, normalize)
def forward(self, m, node_feature):
m = self.normalize(m)
x = self.project1(node_feature)
x = torch.bmm(m, node_feature)
x = self.project2(node_feature)
x = torch.bmm(m, node_feature)
x = self.project3(node_feature)
x = torch.sum(x, dim=1)
return self.fcn(x)
class Seq2SeqAttrs:
def __init__(self, num_nodes=360):
self.max_diffusion_step = 2
self.cl_decay_steps = 1000
self.filter_type = 'laplacian'
self.num_nodes = num_nodes
self.num_rnn_layers = 3
self.rnn_units = 1
self.hidden_state_size = self.num_nodes * self.rnn_units
class EncoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, seq_len, input_dim=1, num_nodes=360):
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, num_nodes=num_nodes)
self.input_dim = input_dim
self.seq_len = seq_len # for the encoder
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, adj, hidden_state=None):
"""
Encoder forward pass.
:param inputs: shape (batch_size, self.num_nodes * self.input_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.hidden_state_size)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
batch_size, _ = inputs.size()
if hidden_state is None:
hidden_state = torch.zeros((self.num_rnn_layers, batch_size, self.hidden_state_size),
device=device)
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(
output, hidden_state[layer_num], adj)
hidden_states.append(next_hidden_state)
output = next_hidden_state
# runs in O(num_layers) so not too slow
return output, torch.stack(hidden_states)
class DecoderModel(nn.Module, Seq2SeqAttrs):
def __init__(self, horizn=32, num_nodes=360):
# super().__init__(is_training, adj_mx, **model_kwargs)
nn.Module.__init__(self)
Seq2SeqAttrs.__init__(self, num_nodes=num_nodes)
self.output_dim = 1
self.horizn = horizn # for the decoder
self.projection_layer = nn.Linear(self.rnn_units, self.output_dim)
self.dcgru_layers = nn.ModuleList(
[DCGRUCell(self.rnn_units, self.max_diffusion_step, self.num_nodes,
filter_type=self.filter_type) for _ in range(self.num_rnn_layers)])
def forward(self, inputs, adj, hidden_state=None):
"""
:param inputs: shape (batch_size, self.num_nodes * self.output_dim)
:param hidden_state: (num_layers, batch_size, self.hidden_state_size)
optional, zeros if not provided
:return: output: # shape (batch_size, self.num_nodes * self.output_dim)
hidden_state # shape (num_layers, batch_size, self.hidden_state_size)
(lower indices mean lower layers)
"""
hidden_states = []
output = inputs
for layer_num, dcgru_layer in enumerate(self.dcgru_layers):
next_hidden_state = dcgru_layer(
output, hidden_state[layer_num], adj)
hidden_states.append(next_hidden_state)
output = next_hidden_state
projected = self.projection_layer(output.view(-1, self.rnn_units))
output = projected.view(-1, self.num_nodes * self.output_dim)
return output, torch.stack(hidden_states)
class TSConstruction(nn.Module, Seq2SeqAttrs):
def __init__(self, feature_dim=8, seq_len=64, node_num=360, discrete=True):
super().__init__()
Seq2SeqAttrs.__init__(self, num_nodes=node_num)
self.seq_len = seq_len
self.horizn_len = seq_len
self.encoder_model = EncoderModel(seq_len, num_nodes=self.num_nodes)
self.decoder_model = DecoderModel(seq_len, num_nodes=self.num_nodes)
self.discrete = discrete
self.extactor = GruKRegion(out_size=feature_dim)
# self.graph_generator = Embed2GraphByLinear(
# input_dim=feature_dim, roi_num=self.num_nodes)
self.graph_generator = Embed2GraphByProduct(
input_dim=feature_dim, roi_num=self.num_nodes)
def encoder(self, inputs, adj):
"""
Encoder forward pass
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:return: encoder_hidden_state: ( batch_size, self.hidden_state_size)
"""
encoder_hidden_state = None
for t in range(self.encoder_model.seq_len):
last_hidden_state, encoder_hidden_state = self.encoder_model(
inputs[t], adj, encoder_hidden_state)
return encoder_hidden_state
def decoder(self, encoder_hidden_state, adj):
"""
Decoder forward pass
:param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
:param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference]
:param batches_seen: global step [optional, not exist for inference]
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
batch_size = encoder_hidden_state.size(1)
go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim),
device=device)
decoder_hidden_state = encoder_hidden_state
decoder_input = go_symbol
outputs = []
for t in range(self.decoder_model.horizn):
decoder_output, decoder_hidden_state = self.decoder_model(decoder_input, adj,
decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
outputs = torch.stack(outputs)
return outputs
def calculate_random_walk_matrix(self, adj_mx):
# tf.Print(adj_mx, [adj_mx], message="This is adj: ")
adj_mx = adj_mx + torch.eye(int(adj_mx.shape[1])).to(device)
d = torch.sum(adj_mx, 2)
d_inv = 1. / d
d_inv = torch.where(torch.isinf(d_inv), torch.zeros(
d_inv.shape).to(device), d_inv)
d_mat_inv = torch.diag_embed(d_inv)
random_walk_mx = torch.bmm(d_mat_inv, adj_mx)
return random_walk_mx
def forward(self, full_seq, reconstruct_seq, node_feas, temperature):
"""
:param inputs: shape (batch_size, num_sensor, seq_len)
:param batches_seen: batches seen till now
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
extracted_feature = self.extactor(full_seq)
# if torch.any(torch.isnan(extracted_feature)):
# print('has nan1')
# extracted_feature = F.softmax(extracted_feature, dim=-1)
# if torch.any(torch.isnan(extracted_feature)):
# print('has nan2')
adj = self.graph_generator(extracted_feature)
if self.discrete:
adj = gumbel_softmax(
adj[:, :, :, 0], temperature=temperature, hard=True)
else:
adj = adj[:, :, :, 0]
# mask = torch.eye(self.num_nodes, self.num_nodes).to(device).byte()
mask = torch.eye(self.num_nodes, self.num_nodes).bool().to(device)
adj = torch.where(mask, torch.zeros(
mask.shape).to(device), adj)
random_walk_matrix = self.calculate_random_walk_matrix(adj)
random_walk_matrix = adj
reconstruct_seq = reconstruct_seq.permute(2,0,1)
encoder_hidden_state = self.encoder(reconstruct_seq, random_walk_matrix)
outputs = self.decoder(encoder_hidden_state, random_walk_matrix)
outputs = outputs.permute(1,2,0)
return outputs
class BrainGSLModel(nn.Module, Seq2SeqAttrs):
def __init__(self, feature_dim=8, seq_len=64, node_num=360, discrete=True):
super().__init__()
Seq2SeqAttrs.__init__(self, num_nodes=node_num)
self.seq_len = seq_len
self.horizn_len = seq_len
self.encoder_model = EncoderModel(seq_len, num_nodes=self.num_nodes)
self.decoder_model = DecoderModel(seq_len, num_nodes=self.num_nodes)
self.discrete = discrete
self.extactor = GruKRegion(out_size=feature_dim)
# self.graph_generator = Embed2GraphByLinear(
# input_dim=feature_dim, roi_num=self.num_nodes)
self.graph_generator = Embed2GraphByProduct(
input_dim=feature_dim, roi_num=self.num_nodes)
self.predictor = GCNPredictor(
node_input_dim=self.num_nodes, roi_num=self.num_nodes)
def encoder(self, inputs, adj):
"""
Encoder forward pass
:param inputs: shape (seq_len, batch_size, num_sensor * input_dim)
:return: encoder_hidden_state: ( batch_size, self.hidden_state_size)
"""
encoder_hidden_state = None
for t in range(self.encoder_model.seq_len):
last_hidden_state, encoder_hidden_state = self.encoder_model(
inputs[t], adj, encoder_hidden_state)
return encoder_hidden_state
def decoder(self, encoder_hidden_state, adj):
"""
Decoder forward pass
:param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size)
:param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference]
:param batches_seen: global step [optional, not exist for inference]
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
batch_size = encoder_hidden_state.size(1)
go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim),
device=device)
decoder_hidden_state = encoder_hidden_state
decoder_input = go_symbol
outputs = []
for t in range(self.decoder_model.horizn):
decoder_output, decoder_hidden_state = self.decoder_model(decoder_input, adj,
decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
outputs = torch.stack(outputs)
return outputs
def calculate_random_walk_matrix(self, adj_mx):
# tf.Print(adj_mx, [adj_mx], message="This is adj: ")
adj_mx = adj_mx + torch.eye(int(adj_mx.shape[1])).to(device)
d = torch.sum(adj_mx, 2)
d_inv = 1. / d
d_inv = torch.where(torch.isinf(d_inv), torch.zeros(
d_inv.shape).to(device), d_inv)
d_mat_inv = torch.diag_embed(d_inv)
random_walk_mx = torch.bmm(d_mat_inv, adj_mx)
return random_walk_mx
def forward(self, full_seq, reconstruct_seq, node_feas, temperature):
"""
:param inputs: shape (batch_size, num_sensor, seq_len)
:param batches_seen: batches seen till now
:return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim)
"""
extracted_feature = self.extactor(full_seq)
# if torch.any(torch.isnan(extracted_feature)):
# print('has nan1')
# extracted_feature = F.softmax(extracted_feature, dim=-1)
# if torch.any(torch.isnan(extracted_feature)):
# print('has nan2')
adj = self.graph_generator(extracted_feature)
if self.discrete:
adj = gumbel_softmax(
adj[:, :, :, 0], temperature=temperature, hard=True)
else:
adj = adj[:, :, :, 0]
# mask = torch.eye(self.num_nodes, self.num_nodes).to(device).byte()
mask = torch.eye(self.num_nodes, self.num_nodes).bool().to(device)
adj = torch.where(mask, torch.zeros(
mask.shape).to(device), adj)
random_walk_matrix = self.calculate_random_walk_matrix(adj)
random_walk_matrix = adj
reconstruct_seq = reconstruct_seq.permute(2,0,1)
encoder_hidden_state = self.encoder(reconstruct_seq, random_walk_matrix)
outputs = self.decoder(encoder_hidden_state, random_walk_matrix)
outputs = outputs.permute(1,2,0)
adj = torch.where(mask, torch.ones(
mask.shape).to(device), adj)
prediction = self.predictor(adj, node_feas)
return outputs, prediction, adj
| 16,048 | 35.894253 | 119 | py |
FBNETGEN | FBNETGEN-main/model/model.py | from turtle import forward
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv1d, MaxPool1d, Linear, GRU
import math
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).cuda()
return -torch.autograd.Variable(torch.log(-torch.log(U + eps) + eps))
def gumbel_softmax_sample(logits, temperature, eps=1e-10):
sample = sample_gumbel(logits.size(), eps=eps)
y = logits + sample
return F.softmax(y / temperature, dim=-1)
def gumbel_softmax(logits, temperature, hard=False, eps=1e-10):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y_soft = gumbel_softmax_sample(logits, temperature=temperature, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
y_hard = torch.zeros(*shape).cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
y = torch.autograd.Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
class GruKRegion(nn.Module):
def __init__(self, kernel_size=8, layers=4, out_size=8, dropout=0.5):
super().__init__()
self.gru = GRU(kernel_size, kernel_size, layers,
bidirectional=True, batch_first=True)
self.kernel_size = kernel_size
self.linear = nn.Sequential(
nn.Dropout(dropout),
Linear(kernel_size*2, kernel_size),
nn.LeakyReLU(negative_slope=0.2),
Linear(kernel_size, out_size)
)
def forward(self, raw):
b, k, d = raw.shape
x = raw.view((b*k, -1, self.kernel_size))
x, h = self.gru(x)
x = x[:, -1, :]
x = x.view((b, k, -1))
x = self.linear(x)
return x
class ConvKRegion(nn.Module):
def __init__(self, k=1, out_size=8, kernel_size=8, pool_size=16, time_series=512):
super().__init__()
self.conv1 = Conv1d(in_channels=k, out_channels=32,
kernel_size=kernel_size, stride=2)
output_dim_1 = (time_series-kernel_size)//2+1
self.conv2 = Conv1d(in_channels=32, out_channels=32,
kernel_size=8)
output_dim_2 = output_dim_1 - 8 + 1
self.conv3 = Conv1d(in_channels=32, out_channels=16,
kernel_size=8)
output_dim_3 = output_dim_2 - 8 + 1
self.max_pool1 = MaxPool1d(pool_size)
output_dim_4 = output_dim_3 // pool_size * 16
self.in0 = nn.InstanceNorm1d(time_series)
self.in1 = nn.BatchNorm1d(32)
self.in2 = nn.BatchNorm1d(32)
self.in3 = nn.BatchNorm1d(16)
self.linear = nn.Sequential(
Linear(output_dim_4, 32),
nn.LeakyReLU(negative_slope=0.2),
Linear(32, out_size)
)
def forward(self, x):
b, k, d = x.shape
x = torch.transpose(x, 1, 2)
x = self.in0(x)
x = torch.transpose(x, 1, 2)
x = x.contiguous()
x = x.view((b*k, 1, d))
x = self.conv1(x)
x = self.in1(x)
x = self.conv2(x)
x = self.in2(x)
x = self.conv3(x)
x = self.in3(x)
x = self.max_pool1(x)
x = x.view((b, k, -1))
x = self.linear(x)
return x
class SeqenceModel(nn.Module):
def __init__(self, model_config, roi_num=360, time_series=512):
super().__init__()
if model_config['extractor_type'] == 'cnn':
self.extract = ConvKRegion(
out_size=model_config['embedding_size'], kernel_size=model_config['window_size'],
time_series=time_series, pool_size=4, )
elif model_config['extractor_type'] == 'gru':
self.extract = GruKRegion(
out_size=model_config['embedding_size'], kernel_size=model_config['window_size'],
layers=model_config['num_gru_layers'], dropout=model_config['dropout'])
self.linear = nn.Sequential(
Linear(model_config['embedding_size']*roi_num, 256),
nn.Dropout(model_config['dropout']),
nn.ReLU(),
Linear(256, 32),
nn.Dropout(model_config['dropout']),
nn.ReLU(),
Linear(32, 2)
)
def forward(self, x):
x = self.extract(x)
x = x.flatten(start_dim=1)
x = self.linear(x)
return x
class Embed2GraphByProduct(nn.Module):
def __init__(self, input_dim, roi_num=264):
super().__init__()
def forward(self, x):
m = torch.einsum('ijk,ipk->ijp', x, x)
m = torch.unsqueeze(m, -1)
return m
class Embed2GraphByLinear(nn.Module):
def __init__(self, input_dim, roi_num=360):
super().__init__()
self.fc_out = nn.Linear(input_dim * 2, input_dim)
self.fc_cat = nn.Linear(input_dim, 1)
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
off_diag = np.ones([roi_num, roi_num])
rel_rec = np.array(encode_onehot(
np.where(off_diag)[0]), dtype=np.float32)
rel_send = np.array(encode_onehot(
np.where(off_diag)[1]), dtype=np.float32)
self.rel_rec = torch.FloatTensor(rel_rec).cuda()
self.rel_send = torch.FloatTensor(rel_send).cuda()
def forward(self, x):
batch_sz, region_num, _ = x.shape
receivers = torch.matmul(self.rel_rec, x)
senders = torch.matmul(self.rel_send, x)
x = torch.cat([senders, receivers], dim=2)
x = torch.relu(self.fc_out(x))
x = self.fc_cat(x)
x = torch.relu(x)
m = torch.reshape(
x, (batch_sz, region_num, region_num, -1))
return m
class GNNPredictor(nn.Module):
def __init__(self, node_input_dim, roi_num=360):
super().__init__()
inner_dim = roi_num
self.roi_num = roi_num
self.gcn = nn.Sequential(
nn.Linear(node_input_dim, inner_dim),
nn.LeakyReLU(negative_slope=0.2),
Linear(inner_dim, inner_dim)
)
self.bn1 = torch.nn.BatchNorm1d(inner_dim)
self.gcn1 = nn.Sequential(
nn.Linear(inner_dim, inner_dim),
nn.LeakyReLU(negative_slope=0.2),
)
self.bn2 = torch.nn.BatchNorm1d(inner_dim)
self.gcn2 = nn.Sequential(
nn.Linear(inner_dim, 64),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(64, 8),
nn.LeakyReLU(negative_slope=0.2),
)
self.bn3 = torch.nn.BatchNorm1d(inner_dim)
self.fcn = nn.Sequential(
nn.Linear(8*roi_num, 256),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(256, 32),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(32, 2)
)
def forward(self, m, node_feature):
bz = m.shape[0]
x = torch.einsum('ijk,ijp->ijp', m, node_feature)
x = self.gcn(x)
x = x.reshape((bz*self.roi_num, -1))
x = self.bn1(x)
x = x.reshape((bz, self.roi_num, -1))
x = torch.einsum('ijk,ijp->ijp', m, x)
x = self.gcn1(x)
x = x.reshape((bz*self.roi_num, -1))
x = self.bn2(x)
x = x.reshape((bz, self.roi_num, -1))
x = torch.einsum('ijk,ijp->ijp', m, x)
x = self.gcn2(x)
x = self.bn3(x)
x = x.view(bz,-1)
return self.fcn(x)
class FBNETGEN(nn.Module):
def __init__(self, model_config, roi_num=360, node_feature_dim=360, time_series=512):
super().__init__()
self.graph_generation = model_config['graph_generation']
if model_config['extractor_type'] == 'cnn':
self.extract = ConvKRegion(
out_size=model_config['embedding_size'], kernel_size=model_config['window_size'],
time_series=time_series)
elif model_config['extractor_type'] == 'gru':
self.extract = GruKRegion(
out_size=model_config['embedding_size'], kernel_size=model_config['window_size'],
layers=model_config['num_gru_layers'])
if self.graph_generation == "linear":
self.emb2graph = Embed2GraphByLinear(
model_config['embedding_size'], roi_num=roi_num)
elif self.graph_generation == "product":
self.emb2graph = Embed2GraphByProduct(
model_config['embedding_size'], roi_num=roi_num)
self.predictor = GNNPredictor(node_feature_dim, roi_num=roi_num)
def forward(self, t, nodes):
x = self.extract(t)
x = F.softmax(x, dim=-1)
m = self.emb2graph(x)
m = m[:, :, :, 0]
bz, _, _ = m.shape
edge_variance = torch.mean(torch.var(m.reshape((bz, -1)), dim=1))
return self.predictor(m, nodes), m, edge_variance
class E2EBlock(torch.nn.Module):
'''E2Eblock.'''
def __init__(self, in_planes, planes, roi_num, bias=True):
super().__init__()
self.d = roi_num
self.cnn1 = torch.nn.Conv2d(in_planes, planes, (1, self.d), bias=bias)
self.cnn2 = torch.nn.Conv2d(in_planes, planes, (self.d, 1), bias=bias)
def forward(self, x):
a = self.cnn1(x)
b = self.cnn2(x)
return torch.cat([a]*self.d, 3)+torch.cat([b]*self.d, 2)
class BrainNetCNN(torch.nn.Module):
def __init__(self, roi_num):
super().__init__()
self.in_planes = 1
self.d = roi_num
self.e2econv1 = E2EBlock(1, 32, roi_num, bias=True)
self.e2econv2 = E2EBlock(32, 64, roi_num, bias=True)
self.E2N = torch.nn.Conv2d(64, 1, (1, self.d))
self.N2G = torch.nn.Conv2d(1, 256, (self.d, 1))
self.dense1 = torch.nn.Linear(256, 128)
self.dense2 = torch.nn.Linear(128, 30)
self.dense3 = torch.nn.Linear(30, 2)
def forward(self, x):
x = x.unsqueeze(dim=1)
out = F.leaky_relu(self.e2econv1(x), negative_slope=0.33)
out = F.leaky_relu(self.e2econv2(out), negative_slope=0.33)
out = F.leaky_relu(self.E2N(out), negative_slope=0.33)
out = F.dropout(F.leaky_relu(
self.N2G(out), negative_slope=0.33), p=0.5)
out = out.view(out.size(0), -1)
out = F.dropout(F.leaky_relu(
self.dense1(out), negative_slope=0.33), p=0.5)
out = F.dropout(F.leaky_relu(
self.dense2(out), negative_slope=0.33), p=0.5)
out = F.leaky_relu(self.dense3(out), negative_slope=0.33)
return out
class FCNet(nn.Module):
def __init__(self, node_size, seq_len, kernel_size=3):
super().__init__()
self.ind1, self.ind2 = torch.triu_indices(node_size, node_size, offset=1)
seq_len -= kernel_size//2*2
channel1 = 32
self.block1 = nn.Sequential(
Conv1d(in_channels=1, out_channels=channel1,
kernel_size=kernel_size),
nn.BatchNorm1d(channel1),
nn.LeakyReLU(),
nn.MaxPool1d(kernel_size=2, stride=2)
)
seq_len //= 2
seq_len -= kernel_size//2*2
channel2 = 64
self.block2 = nn.Sequential(
Conv1d(in_channels=channel1, out_channels=channel2,
kernel_size=kernel_size),
nn.BatchNorm1d(channel2),
nn.LeakyReLU(),
nn.MaxPool1d(kernel_size=2, stride=2)
)
seq_len //= 2
seq_len -= kernel_size//2*2
channel3 = 96
self.block3 = nn.Sequential(
Conv1d(in_channels=channel2, out_channels=channel3,
kernel_size=kernel_size),
nn.BatchNorm1d(channel3),
nn.LeakyReLU()
)
channel4 = 64
self.block4 = nn.Sequential(
Conv1d(in_channels=channel3, out_channels=channel4,
kernel_size=kernel_size),
Conv1d(in_channels=channel4, out_channels=channel4,
kernel_size=kernel_size),
nn.MaxPool1d(kernel_size=2, stride=2)
)
seq_len -= kernel_size//2*2
seq_len -= kernel_size//2*2
seq_len //= 2
self.fc = nn.Linear(in_features=seq_len*channel4, out_features=32)
self.diff_mode = nn.Sequential(
nn.Linear(in_features=32*2, out_features=32),
nn.Linear(in_features=32, out_features=32),
nn.Linear(in_features=32, out_features=2)
)
def forward(self, x):
bz, _, time_series = x.shape
x = x.reshape((bz*2, 1, time_series))
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = x.reshape((bz, 2, -1))
x = self.fc(x)
x = x.reshape((bz, -1))
diff = self.diff_mode(x)
return diff
| 13,552 | 29.050998 | 97 | py |
FBNETGEN | FBNETGEN-main/model/cell.py | import numpy as np
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LayerParams:
def __init__(self, rnn_network: torch.nn.Module, layer_type: str):
self._rnn_network = rnn_network
self._params_dict = {}
self._biases_dict = {}
self._type = layer_type
def get_weights(self, shape):
if shape not in self._params_dict:
nn_param = torch.nn.Parameter(torch.empty(*shape, device=device))
torch.nn.init.xavier_normal_(nn_param)
self._params_dict[shape] = nn_param
self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)),
nn_param)
return self._params_dict[shape]
def get_biases(self, length, bias_start=0.0):
if length not in self._biases_dict:
biases = torch.nn.Parameter(torch.empty(length, device=device))
torch.nn.init.constant_(biases, bias_start)
self._biases_dict[length] = biases
self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)),
biases)
return self._biases_dict[length]
class DCGRUCell(torch.nn.Module):
def __init__(self, num_units, max_diffusion_step, num_nodes, nonlinearity='tanh',
filter_type="laplacian", use_gc_for_ru=True):
"""
:param num_units:
:param adj_mx:
:param max_diffusion_step:
:param num_nodes:
:param nonlinearity:
:param filter_type: "laplacian", "random_walk", "dual_random_walk".
:param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
"""
super().__init__()
self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
# support other nonlinearities up here?
self._num_nodes = num_nodes
self._num_units = num_units
self._max_diffusion_step = max_diffusion_step
self._supports = []
self._use_gc_for_ru = use_gc_for_ru
self._fc_params = LayerParams(self, 'fc')
self._gconv_params = LayerParams(self, 'gconv')
@staticmethod
def _build_sparse_matrix(L):
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
# this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=device)
return L
def forward(self, inputs, hx, adj):
"""Gated recurrent unit (GRU) with Graph Convolution.
:param inputs: (B, num_nodes * input_dim)
:param hx: (B, num_nodes * rnn_units)
:return
- Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`.
"""
# adj_mx = self._calculate_random_walk_matrix(adj)
adj_mx = adj.permute(0, 2, 1)
output_size = 2 * self._num_units
if self._use_gc_for_ru:
fn = self._gconv
else:
fn = self._fc
value = torch.sigmoid(
fn(inputs, adj_mx, hx, output_size, bias_start=1.0))
value = torch.reshape(value, (-1, self._num_nodes, output_size))
r, u = torch.split(
tensor=value, split_size_or_sections=self._num_units, dim=-1)
r = torch.reshape(r, (-1, self._num_nodes * self._num_units))
u = torch.reshape(u, (-1, self._num_nodes * self._num_units))
c = self._gconv(inputs, adj_mx, r * hx, self._num_units)
if self._activation is not None:
c = self._activation(c)
new_state = u * hx + (1.0 - u) * c
return new_state
@staticmethod
def _concat(x, x_):
x_ = x_.unsqueeze(0)
return torch.cat([x, x_], dim=0)
def _fc(self, inputs, state, output_size, bias_start=0.0):
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1))
state = torch.reshape(state, (batch_size * self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=-1)
input_size = inputs_and_state.shape[-1]
weights = self._fc_params.get_weights((input_size, output_size))
value = torch.sigmoid(torch.matmul(inputs_and_state, weights))
biases = self._fc_params.get_biases(output_size, bias_start)
value += biases
return value
def _gconv(self, inputs, adj_mx, state, output_size, bias_start=0.0):
# Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
batch_size = inputs.shape[0]
inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1))
state = torch.reshape(state, (batch_size, self._num_nodes, -1))
inputs_and_state = torch.cat([inputs, state], dim=2)
input_size = inputs_and_state.size(2)
# batch_size, num_nodes, input_dim
x = inputs_and_state
x0 = x
# x0 = torch.reshape(
# x0, shape=[self._num_nodes, input_size * batch_size])
x = torch.unsqueeze(x0, 0)
if self._max_diffusion_step == 0:
pass
else:
x1 = torch.bmm(adj_mx, x0)
x = self._concat(x, x1)
for k in range(2, self._max_diffusion_step + 1):
x2 = 2 * torch.bmm(adj_mx, x1) - x0
x = self._concat(x, x2)
x1, x0 = x2, x1
num_matrices = self._max_diffusion_step + 1 # Adds for x itself.
# order, batch_size, num_nodes, input_dim
x = x.permute(1, 2, 3, 0) # (batch_size, num_nodes, input_size, order)
x = torch.reshape(
x, shape=[batch_size * self._num_nodes, input_size * num_matrices])
weights = self._gconv_params.get_weights(
(input_size * num_matrices, output_size))
# (batch_size * self._num_nodes, output_size)
x = torch.matmul(x, weights)
biases = self._gconv_params.get_biases(output_size, bias_start)
x += biases
# Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
return torch.reshape(x, [batch_size, self._num_nodes * output_size])
| 6,299 | 38.873418 | 105 | py |
dynet | dynet-master/examples/variational-autoencoder/basic-image-recon/vae.py | from __future__ import print_function
from utils import load_mnist, make_grid, pre_pillow_float_img_process, save_image
import numpy as np
import argparse
import dynet as dy
import os
if not os.path.exists('results'):
os.makedirs('results')
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--dynet-gpu', action='store_true', default=False,
help='enables DyNet CUDA training')
parser.add_argument('--dynet-gpus', type=int, default=1, metavar='N',
help='number of gpu devices to use')
parser.add_argument('--dynet-seed', type=int, default=None, metavar='N',
help='random seed (default: random inside DyNet)')
parser.add_argument('--dynet-mem', type=int, default=None, metavar='N',
help='allocating memory (default: default of DyNet 512MB)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
train_data = load_mnist('training', './data')
batch_size = args.batch_size
test_data = load_mnist('testing', './data')
def generate_batch_loader(data, batch_size):
i = 0
n = len(data)
while i + batch_size <= n:
yield np.asarray(data[i:i+batch_size])
i += batch_size
# if i < n:
# pass # last short batch ignored
# # yield data[i:]
class DynetLinear:
def __init__(self, dim_in, dim_out, dyParameterCollection):
assert(isinstance(dyParameterCollection, dy.ParameterCollection))
self.dim_in = dim_in
self.dim_out = dim_out
self.pW = dyParameterCollection.add_parameters((dim_out, dim_in))
self.pb = dyParameterCollection.add_parameters((dim_out))
def __call__(self, x):
assert(isinstance(x, dy.Expression))
self.W = dy.parameter(self.pW) # add parameters to graph as expressions # m2.add_parameters((8, len(inputs)))
self.b = dy.parameter(self.pb)
self.x = x
return self.W * self.x + self.b
pc = dy.ParameterCollection()
class VAE:
def __init__(self, dyParameterCollection):
assert (isinstance(dyParameterCollection, dy.ParameterCollection))
self.fc1 = DynetLinear(784, 400, dyParameterCollection)
self.fc21 = DynetLinear(400, 20, dyParameterCollection)
self.fc22 = DynetLinear(400, 20, dyParameterCollection)
self.fc3 = DynetLinear(20, 400, dyParameterCollection)
self.fc4 = DynetLinear(400, 784, dyParameterCollection)
self.relu = dy.rectify
self.sigmoid = dy.logistic
self.training = False
def encode(self, x):
h1 = self.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = dy.exp(logvar * 0.5)
eps = dy.random_normal(dim=std.dim()[0], mean=0.0, stddev=1.0)
return dy.cmult(eps, std) + mu
else:
return mu
def decode(self, z):
h3 = self.relu(self.fc3(z))
return self.sigmoid(self.fc4(h3))
def forward(self, x):
assert(isinstance(x, dy.Expression))
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
model = VAE(pc)
optimizer = dy.AdamTrainer(pc, alpha=1e-3) # alpha: initial learning rate
# # Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = dy.binary_log_loss(recon_x, x) # equiv to torch.nn.functional.binary_cross_entropy(?,?, size_average=False)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * dy.sum_elems(1 + logvar - dy.pow(mu, dy.scalarInput(2)) - dy.exp(logvar))
return BCE + KLD
def train(epoch):
model.training = True
train_loss = 0
train_loader = generate_batch_loader(train_data, batch_size=batch_size)
for batch_idx, data in enumerate(train_loader):
# Dymanic Construction of Graph
dy.renew_cg()
x = dy.inputTensor(data.reshape(-1, 784).T)
recon_x, mu, logvar = model.forward(x)
loss = loss_function(recon_x, x, mu, logvar)
# Forward
loss_value = loss.value()
train_loss += loss_value
# Backward
loss.backward()
optimizer.update()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_data),
100. * batch_idx / (len(train_data) / batch_size),
loss_value / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_data)))
def test(epoch):
model.training = False
test_loss = 0
test_loader = generate_batch_loader(test_data, batch_size=batch_size)
for i, data in enumerate(test_loader):
# Dymanic Construction of Graph
dy.renew_cg()
x = dy.inputTensor(data.reshape(-1, 784).T)
recon_x, mu, logvar = model.forward(x)
loss = loss_function(recon_x, x, mu, logvar)
# Forward
loss_value = loss.value()
test_loss += loss_value
if i == 0:
n = min(data.shape[0], 8)
comparison = np.concatenate([data[:n],
recon_x.npvalue().T.reshape(batch_size, 1, 28, 28)[:n]])
save_image(comparison,
'results/reconstruction_' + str(epoch) + '.png', nrow=n)
test_loss /= len(test_data)
print('====> Test set loss: {:.4f}'.format(test_loss))
import time
tictocs = []
for epoch in range(1, args.epochs + 1):
tic = time.time()
train(epoch)
test(epoch)
sample = dy.inputTensor(np.random.randn(20, 64))
sample = model.decode(sample)
save_image(sample.npvalue().T.reshape(64, 1, 28, 28),
'results/sample_' + str(epoch) + '.png')
toc = time.time()
tictocs.append(toc - tic)
print('############\n\n')
print('Total Time Cost:', np.sum(tictocs))
print('Epoch Time Cost', np.average(tictocs), '+-', np.std(tictocs) / np.sqrt(len(tictocs)))
print('\n\n############')
| 6,690 | 31.639024 | 118 | py |
dynet | dynet-master/examples/mnist/basic-mnist-benchmarks/mnist_pytorch.py | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import time
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=10000, metavar='N',
help='input batch size for testing (default: 10000)')
parser.add_argument('--epochs', type=int, default=20, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
# parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
# help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.fc1 = nn.Linear(7*7*64, 1024)
self.fc2 = nn.Linear(1024, 10, bias=False)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 7*7*64)
x = F.relu(self.fc1(x))
x = F.dropout(x, 0.4)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net()
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr) # , momentum=args.momentum)
def train(epoch):
model.train()
epoch_start = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
epoch_end = time.time()
print("{} s per epoch".format(epoch_end-epoch_start))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, args.epochs + 1):
train(epoch)
test()
| 4,645 | 38.372881 | 95 | py |
dynet | dynet-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# DyNet documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 13 16:13:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import subprocess
sys.path.insert(0, os.path.abspath('.'))
import doc_util
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('../../examples/tutorials'))
tutorials_folder = 'tutorials_notebooks'
if os.path.islink(tutorials_folder):
os.remove(tutorials_folder)
os.symlink('../../examples/jupyter-tutorials', tutorials_folder)
# Create copy of _dynet.pyx for documentation purposes
doc_util.create_doc_copy(in_file = '../../python/_dynet.pyx',out_file = 'dynet.py')
# Run doxygen if on Readthedocs :
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
subprocess.call('cd ../doxygen; doxygen', shell=True)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'breathe',
'nbsphinx',
'sphinx.ext.autodoc',
'm2r',
'sphinxcontrib.napoleon' # Yay Napoleon! Go France!
]
breathe_projects = {"dynet": "../doxygen/xml/"}
breathe_default_project = "dynet"
# Don't execute notebooks because it requires installing DyNet
nbsphinx_execute = 'never'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DyNet'
copyright = u'2016, Clab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/dynet_logo_white.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DyNetdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DyNet.tex', u'DyNet Documentation',
u'Clab', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dynet', u'DyNet Documentation',
[u'Clab'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DyNet', u'DyNet Documentation',
u'Clab', 'DyNet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 9,095 | 30.583333 | 83 | py |
pose_refinement | pose_refinement-master/src/training/loaders.py | import numpy as np
from torch.utils.data import DataLoader, SequentialSampler
from itertools import chain
import torch
from databases.datasets import pose_grid_from_index, Mpi3dTrainDataset, PersonStackedMucoTempDataset, ConcatPoseDataset
class ConcatSampler(torch.utils.data.Sampler):
""" Concatenates two samplers. """
def __init__(self, sampler1, sampler2):
self.sampler1 = sampler1
self.sampler2 = sampler2
def __iter__(self):
return chain(iter(self.sampler1), iter(self.sampler2))
def __len__(self):
return len(self.sampler1) + len(self.sampler2)
class UnchunkedGenerator:
"""
Loader that can be used with VideoPose3d model to load all frames of a video at once.
Useful for testing/prediction.
"""
def __init__(self, dataset, pad, augment):
self.seqs = sorted(np.unique(dataset.index.seq))
self.dataset = dataset
self.pad = pad
self.augment = augment
def __iter__(self):
for seq in self.seqs:
inds = np.where(self.dataset.index.seq == seq)[0]
batch = self.dataset.get_samples(inds, False)
batch_2d = np.expand_dims(np.pad(batch['pose2d'], ((self.pad, self.pad), (0, 0)), 'edge'), axis=0)
batch_3d = np.expand_dims(batch['pose3d'], axis=0)
batch_valid = np.expand_dims(batch['valid_pose'], axis=0)
if self.augment:
flipped_batch = self.dataset.get_samples(inds, True)
flipped_batch_2d = np.expand_dims(np.pad(flipped_batch['pose2d'],
((self.pad, self.pad), (0, 0)), 'edge'), axis=0)
flipped_batch_3d = np.expand_dims(flipped_batch['pose3d'], axis=0)
batch_2d = np.concatenate((batch_2d, flipped_batch_2d), axis=0)
batch_3d = np.concatenate((batch_3d, flipped_batch_3d), axis=0)
batch_valid = np.concatenate((batch_valid, batch_valid), axis=0)
# yield {'pose2d': batch_2d, 'pose3d':batch_3d}
yield batch_2d, batch_valid
class ChunkedGenerator:
"""
Generator to be used with temporal model, during training.
"""
def __init__(self, dataset, batch_size, pad, augment, shuffle=True):
"""
pad: 2D input padding to compensate for valid convolutions, per side (depends on the receptive field)
it is usually (receptive_field-1)/2
augment: turn on random horizontal flipping for training
shuffle: randomly shuffle the dataset before each epoch
"""
assert isinstance(dataset, (Mpi3dTrainDataset, PersonStackedMucoTempDataset, ConcatPoseDataset)), "Only works with Mpi datasets"
self.dataset = dataset
self.batch_size = batch_size
self.pad = pad
self.shuffle = shuffle
self.augment = augment
N = len(dataset.index)
frame_start = np.arange(N)-pose_grid_from_index(dataset.index.seq)[1] # index of the start of the frame
frame_end = np.arange(N)-pose_grid_from_index(dataset.index.seq[::-1])[1]
frame_end = N-frame_end[::-1]-1 # index of the end of the frame (last frame)
self.frame_start = frame_start
self.frame_end = frame_end
assert np.all(frame_start<=frame_end)
assert np.all(dataset.index.seq[frame_start] == dataset.index.seq[frame_end])
assert np.all(dataset.index.seq[frame_start] == dataset.index.seq)
def __len__(self):
return len(self.dataset)//self.batch_size
def __iter__(self):
N = len(self.dataset)
num_batch = N//self.batch_size
indices = np.arange(N)
if self.shuffle:
np.random.shuffle(indices)
SUB_BATCH = 4
assert self.batch_size % SUB_BATCH == 0, "SUB_BATCH must divide batch_size"
class LoadingDataset:
def __len__(iself):
return num_batch*SUB_BATCH
def __getitem__(iself, ind):
sub_batch_size = self.batch_size//SUB_BATCH
batch_inds = indices[ind*sub_batch_size: (ind+1)*sub_batch_size] # (nBatch,)
batch_frame_start = self.frame_start[batch_inds][:, np.newaxis]
batch_frame_end = self.frame_end[batch_inds][:, np.newaxis]
if self.augment:
flip = np.random.random(sub_batch_size) < 0.5
else:
flip = np.zeros(sub_batch_size, dtype='bool')
flip = np.tile(flip[:, np.newaxis], (1, 2*self.pad+1))
# expand batch_inds such that it includes lower&upper bound indices for every element
chunk_inds = batch_inds[:, np.newaxis] + np.arange(-self.pad, self.pad+1)[np.newaxis, :]
chunk_inds = np.clip(chunk_inds, batch_frame_start, batch_frame_end)
assert np.all(chunk_inds>=batch_frame_start)
assert np.all(chunk_inds<=batch_frame_end)
chunk = self.dataset.get_samples(chunk_inds.ravel(), flip.ravel())
chunk_pose2d = chunk['pose2d'].reshape(chunk_inds.shape+chunk['pose2d'].shape[1:])
chunk_pose3d = chunk['pose3d'].reshape(chunk_inds.shape+chunk['pose3d'].shape[1:])
chunk_valid = chunk['valid_pose'].reshape(chunk_inds.shape+chunk['valid_pose'].shape[1:])
# for non temporal values select the middle item:
chunk_pose3d = chunk_pose3d[:, self.pad]
chunk_valid = chunk_valid[:, self.pad]
chunk_pose3d = np.expand_dims(chunk_pose3d, 1)
return chunk_pose2d, chunk_pose3d, chunk_valid
wrapper_dataset = LoadingDataset()
loader = DataLoader(wrapper_dataset, sampler=SequentialSampler(wrapper_dataset),
batch_size=SUB_BATCH, num_workers=4)
for chunk_pose2d, chunk_pose3d, chunk_valid in loader:
chunk_pose2d = chunk_pose2d.reshape((-1,)+chunk_pose2d.shape[2:])
chunk_pose3d = chunk_pose3d.reshape((-1,)+chunk_pose3d.shape[2:])
chunk_valid = chunk_valid.reshape(-1)
yield {'temporal_pose2d': chunk_pose2d, 'pose3d': chunk_pose3d, 'valid_pose': chunk_valid}
| 6,259 | 41.297297 | 136 | py |
pose_refinement | pose_refinement-master/src/training/callbacks.py | import math
import numpy as np
import torch
from training.loaders import UnchunkedGenerator
from training.torch_tools import eval_results
from util.pose import remove_root, mrpe, optimal_scaling, r_mpjpe
class BaseCallback(object):
def on_itergroup_end(self, iter_cnt, epoch_loss):
pass
def on_epoch_end(self, model, epoch, epoch_loss, optimizer, epoch_vals):
pass
def _sample_value(dictionary):
""" Selects a value from a dictionary, it is always the same element. """
return list(dictionary.values())[0]
class BaseMPJPECalculator(BaseCallback):
"""
Base class for calculating and displaying MPJPE stats, grouped by something (sequence most of the time).
"""
PCK_THRESHOLD = 150
def __init__(self, data_3d_mm, joint_set, post_process3d=None, csv=None, prefix='val'):
"""
:param data_3d_mm: dict, group_name-> ndarray(n.Poses, nJoints, 3). The ground truth poses in mm.
"""
self.csv = csv
self.prefix = prefix
self.pctiles = [5, 10, 50, 90, 95, 99]
if self.csv is not None:
with open(csv, 'w') as f:
f.write('epoch,type,name,avg')
f.write(''.join([',pct' + str(x) for x in self.pctiles]))
f.write('\n')
self.data_3d_mm = data_3d_mm
self.is_absolute = _sample_value(self.data_3d_mm).shape[1] == joint_set.NUM_JOINTS
self.num_joints = joint_set.NUM_JOINTS if self.is_absolute else joint_set.NUM_JOINTS - 1
self.joint_set = joint_set
self.post_process3d = post_process3d
self.sequences = sorted(list(data_3d_mm.keys()))
def on_epoch_end(self, model, epoch, epoch_loss, optimizer, epoch_vals):
sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles = self.eval(model, verbose=True)
if self.csv is not None:
joint_names = self.joint_set.NAMES.copy()
if not self.is_absolute:
joint_names = np.delete(joint_names, self.joint_set.index_of('hip')) # remove root
with open(self.csv, 'a') as f:
for seq in self.sequences:
f.write('%d,%s,%s,%f' % (epoch, 'sequence', seq, sequence_mpjpes[seq]))
for i in range(len(self.pctiles)):
f.write(',%f' % sequence_pctiles[seq][i])
f.write('\n')
for joint_id in range(self.num_joints):
f.write('%d,%s,%s,%f' % (epoch, 'joint', joint_names[joint_id], joint_means[joint_id]))
for i in range(len(self.pctiles)):
f.write(',%f' % joint_pctiles[i, joint_id])
f.write('\n')
def eval(self, model=None, calculate_scale_free=False, verbose=False):
"""
:param model: the evaluator can use this model, if self.model is nor provided
:param calculate_scale_free: if True, also calculates N-MRPE and N_RMPJPE
:return:
"""
losses, preds = self.pred_and_calc_loss(model)
losses = np.concatenate([losses[seq] for seq in self.sequences])
self.val_loss = np.nanmean(losses)
self.losses_to_log = {self.prefix + '_loss': self.val_loss}
self.losses = losses
self.preds = preds
# Assuming hip is the last component
if self.is_absolute:
self.losses_to_log[self.prefix + '_abs_loss'] = np.nanmean(losses[:, -3:])
self.losses_to_log[self.prefix + '_rel_loss'] = np.nanmean(losses[:, :-3])
else:
self.losses_to_log[self.prefix + '_rel_loss'] = self.val_loss
assert self.pctiles[-1] == 99, "Currently the last percentile is hardcoded to be 99 for printing"
sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles = \
eval_results(preds, self.data_3d_mm, self.joint_set, pctiles=self.pctiles, verbose=verbose)
self.losses_to_log[self.prefix + '_mrpe'] = np.mean([mrpe(preds[s], self.data_3d_mm[s], self.joint_set)
for s in preds])
# Calculate relative error
if self.is_absolute:
rel_pred = {}
rel_gt = {}
for seq in preds:
rel_pred[seq] = remove_root(preds[seq], self.joint_set.index_of('hip'))
rel_gt[seq] = remove_root(self.data_3d_mm[seq], self.joint_set.index_of('hip'))
rel_mean_error, _, _, _, _ = eval_results(rel_pred, rel_gt, self.joint_set, verbose=False)
rel_mean_error = np.mean(np.asarray(list(rel_mean_error.values()), dtype=np.float32))
if verbose:
print("Root relative error (MPJPE): %.2f" % rel_mean_error)
self.rel_mean_error = rel_mean_error
self.losses_to_log[self.prefix + '_rel_error'] = rel_mean_error
self.mean_sequence_mpjpe = np.mean(np.asarray(list(sequence_mpjpes.values()), dtype=np.float32))
self.mean_sequence_pck = np.mean(np.asarray(list(sequence_pcks.values()), dtype=np.float32))
self.losses_to_log[self.prefix + '_err'] = self.mean_sequence_mpjpe
self.losses_to_log[self.prefix + '_pck'] = self.mean_sequence_pck
if calculate_scale_free:
scaled_preds = {}
for seq in preds:
# predict a single scale for the full video
pred_points = preds[seq].reshape(1, -1, 3)
gt_points = self.data_3d_mm[seq].reshape(1, -1, 3)
s = optimal_scaling(pred_points, gt_points)
scaled_preds[seq] = preds[seq] * s
n_mrpe = np.mean([mrpe(scaled_preds[s], self.data_3d_mm[s], self.joint_set) for s in scaled_preds])
n_rmpjpe = np.mean([r_mpjpe(scaled_preds[s], self.data_3d_mm[s], self.joint_set) for s in scaled_preds])
if verbose:
print('N-MRPE: %.1f' % n_mrpe)
print('N-MPJPE: %.1f' % n_rmpjpe)
self.losses_to_log[self.prefix + '_n_mrpe'] = n_mrpe
self.losses_to_log[self.prefix + '_n_rel_err'] = n_rmpjpe
return sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles
def results_and_gt(self):
"""
Returns the gt and result matrices as list of (seq, pred, gt) tuples
"""
keys = sorted(list(self.data_3d_mm.keys()))
return [(seq, self.preds[seq], self.data_3d_mm[seq]) for seq in keys]
def pred_and_calc_loss(self, model):
"""
Subclasses must implement this method. It calculates the loss
and the predictions of the current model.
:param model: model received in the on_epoch_end callback
:return: (loss, pred) pair, each is a dictionary from sequence name to loss or prediction
"""
raise NotImplementedError()
class TemporalTestEvaluator(BaseMPJPECalculator):
""" Can be used with MPII-3DHP dataset to create"""
def __init__(self, model, dataset, loss, augment, post_process3d=None, prefix='test'):
self.model = model
self.dataset = dataset
self.augment = augment
pad = (model.receptive_field() - 1) // 2
self.generator = UnchunkedGenerator(dataset, pad, self.augment)
self.seqs = sorted(np.unique(dataset.index.seq))
data_3d_mm = {}
self.preprocessed3d = {}
for seq in self.seqs:
inds = np.where(dataset.index.seq == seq)[0]
batch = dataset.get_samples(inds, False)
self.preprocessed3d[seq] = batch['pose3d'][batch['valid_pose']]
data_3d_mm[seq] = dataset.poses3d[inds][batch['valid_pose']]
if loss == 'l1' or loss == 'l1_nan':
self.loss = lambda p, t: np.abs(p - t)
elif loss == 'l2':
self.loss = lambda p, t: np.square(p - t)
super().__init__(data_3d_mm, dataset.pose3d_jointset, post_process3d=post_process3d, csv=None, prefix=prefix)
def pred_and_calc_loss(self, model):
"""
Subclasses must implement this method. It calcula
:param model: model received in the on_epoch_end callback
:return: (loss, pred) pair, each is a dictionary from sequence name to loss or prediction
"""
preds = {}
self.raw_preds = {}
losses = {}
with torch.no_grad():
for i, (pose2d, valid) in enumerate(self.generator):
seq = self.seqs[i]
pred3d = self.model(torch.from_numpy(pose2d).cuda()).detach().cpu().numpy()
self.raw_preds[seq] = pred3d.copy()
valid = valid[0]
losses[seq] = self.loss(pred3d[0][valid], self.preprocessed3d[seq])
pred_real_pose = self.post_process3d(pred3d[0], seq) # unnormalized output
if self.augment:
pred_real_pose_aug = self.post_process3d(pred3d[1], seq)
pred_real_pose_aug[:, :, 0] *= -1
pred_real_pose_aug = self.dataset.pose3d_jointset.flip(pred_real_pose_aug)
pred_real_pose = (pred_real_pose + pred_real_pose_aug) / 2
preds[seq] = pred_real_pose[valid]
return losses, preds
class TemporalMupotsEvaluator(TemporalTestEvaluator):
""" Can be used with PersonStackedMupots dataset for a temporal model. """
def __init__(self, model, dataset, loss, augment, post_process3d=None, prefix='test'):
super().__init__(model, dataset, loss, augment, post_process3d=post_process3d, prefix=prefix)
self.data_3d_mm = TemporalMupotsEvaluator._group_by_seq(self.data_3d_mm)
self.sequences = sorted(self.data_3d_mm.keys())
@staticmethod
def _group_by_seq(data):
per_person_keys = sorted(data.keys())
result = {}
for seq in range(1, 21):
keys = sorted([k for k in per_person_keys if k.startswith('%d/' % seq)])
assert len(keys) > 0, per_person_keys
result[seq] = np.concatenate([data[k] for k in keys])
return result
def pred_and_calc_loss(self, model):
losses, preds = super().pred_and_calc_loss(model)
losses = TemporalMupotsEvaluator._group_by_seq(losses)
preds = TemporalMupotsEvaluator._group_by_seq(preds)
return losses, preds
class ModelCopyTemporalEvaluator(TemporalTestEvaluator):
"""
Same as TemporalTestEvaluator but uses another model for evaluation than for training,
and before evaluation copies the weights to the 'eval' model
"""
def pred_and_calc_loss(self, train_model):
""" train_model is coming from the training loop """
self.model.load_state_dict(train_model.state_dict())
self.model.eval()
return super().pred_and_calc_loss(None)
def preds_from_logger(dataset, logger):
"""
Arranges results from LogAllMillimeterCallback according to index in dataset
"""
# Special handling for multipose inputs
if dataset.poses3d.ndim == 4:
pose_shape = list(logger.data_3d_mm.values())[0].shape
result = np.zeros((dataset.poses3d.shape[:2]) + pose_shape[1:])
result[:] = np.nan
seqs = np.unique(dataset.index.seq)
for seq in seqs:
inds = dataset.index.seq == seq
mask = np.zeros(result.shape[:2], dtype='bool')
assert np.all(~mask)
mask[inds] = dataset.good_poses[inds] # composing masks
result[mask] = logger.preds[seq]
return result
elif dataset.poses3d.ndim == 3:
pose_shape = list(logger.data_3d_mm.values())[0].shape
result = np.zeros((len(dataset.index),) + pose_shape[1:])
seqs = np.unique(dataset.index.seq)
for seq in seqs:
inds = dataset.index.seq == seq
mask = np.zeros(len(result), dtype='bool')
mask[inds] = dataset.good_poses[inds] # composing masks
result[mask] = logger.preds[seq]
return result
else:
raise Exception("unexpected shape")
class ModelSaver(BaseCallback):
"""
Saves the best model at every epoch.
%d in the path can specify the epoch
"""
def __init__(self, path):
self.path = path
def on_epoch_end(self, model, epoch, epoch_loss, optimizer, epoch_vals):
path = self.path
if '%d' in path:
path = path % epoch
torch.save(model.state_dict(), path)
class BestModelSaver(BaseCallback):
"""
Saves the best model according to a given metric.
Useful together with early stopping.
"""
def __init__(self, path, evaluator, metric, lower_better=True):
assert lower_better, "lower_better=False not implemented yet"
self.path = path
self.evaluator = evaluator
self.metric = metric
self.best_value = math.inf
def on_epoch_end(self, model, epoch, epoch_loss, optimizer, epoch_vals):
path = self.path
if '%d' in path:
path = path % epoch
if self.evaluator.losses_to_log[self.metric] < self.best_value:
self.best_value = self.evaluator.losses_to_log[self.metric]
torch.save(model.state_dict(), path)
| 13,217 | 38.57485 | 117 | py |
pose_refinement | pose_refinement-master/src/training/torch_tools.py | import numpy as np
from torch.utils.data import DataLoader, TensorDataset
from itertools import zip_longest, chain
import torch
from util.misc import assert_shape
from inspect import signature
import time
from torch import optim
from util.pose import mrpe
def exp_decay(params):
def f(epoch):
return params.learning_rate * (0.96 ** (epoch * 0.243))
return f
def dataset2numpy(dataset, fields):
"""
Converts a PyTorch Dataset to a numpy array.
Parameters:
fields: list of fields to return from the full dataset.
"""
loader = DataLoader(dataset, batch_size=len(dataset) // 8, num_workers=8)
parts = []
for l in loader:
parts.append(l)
return [np.concatenate([p[f].numpy() for p in parts], axis=0) for f in fields]
def torch_predict(model, input, batch_size=None, device='cuda'):
"""
:param model: PyTorch Model(nn.Module)
:param input: a numpy array or a PyTorch dataloader
:param batch_size: if input was a numpy array, this is the batch size used for evaluation
:return:
"""
model.eval()
if isinstance(input, np.ndarray):
data_loader = DataLoader(TensorDataset(torch.from_numpy(input).to(device)), batch_size)
needs_move = False
elif isinstance(input, torch.Tensor):
data_loader = DataLoader(TensorDataset(input.to(device)), batch_size)
needs_move = False
else:
data_loader = input
needs_move = True
result = []
with torch.no_grad():
for batch in data_loader:
if needs_move:
if isinstance(batch, (list, tuple, map)):
batch = map(lambda x: x.to(device), batch)
elif isinstance(batch, dict):
batch = {k: v.to(device) for k, v in batch.items()}
else:
batch = batch.to(device)
if isinstance(batch, (list, tuple, map)):
pred = model(*batch)
elif isinstance(batch, dict):
pred = model(**batch)
else:
pred = model(batch)
if isinstance(pred, (list, tuple, map)):
result.append([x.cpu().numpy() for x in pred])
else:
result.append(pred.cpu().numpy())
del pred
if isinstance(result[0], list):
out = []
for i in range(len(result[0])):
out.append(np.concatenate([x[i] for x in result]))
result = out
else:
result = np.concatenate(result)
return result
def torch_eval(model, loader, loss_fn, input_name, target_name, device='cuda'):
"""
Evaluates a PyTorch model.
:param model: PyTorch Model(nn.Module)
:param loader: a PyTorch DataLoader producing input batches
:param loss_fn: a function or dictionary of functions. The metrics evaluated. The functions
should return a single scalar torch tensor. They can have 3 parameters, the third is optional.
The first is the input to model, the second is the target variable, and the thirs is the full
batch used in the eval iteration.
It is expected that the output losses are averaged over the batch.
:param input_name: name of input fields passed to the model (a single name or array of names)
:return:
"""
assert isinstance(loader, DataLoader)
model.eval()
loss_was_func = False
if not isinstance(loss_fn, dict):
loss_fn = {'loss': loss_fn}
loss_was_func = True
if not isinstance(input_name, (list, tuple)):
input_name = [input_name]
metrics = {}
num_args = {}
for name, func in loss_fn.items():
metrics[name] = 0
num_args[name] = len(signature(func).parameters)
total_cnt = 0
with torch.no_grad():
for batch in loader:
# batch = list(map(lambda x: x.to(device), batch))
batch = {k: v.to(device) for k, v in batch.items()}
pred = model(*[batch[x] for x in input_name])
for name, loss_func in loss_fn.items():
if num_args[name] == 2:
loss = loss_func(pred, batch[target_name])
else:
loss = loss_func(pred, batch[target_name], batch)
metrics[name] += loss.item() * len(batch[input_name[0]])
total_cnt += len(batch[input_name[0]])
for name in loss_fn.keys():
metrics[name] /= total_cnt
if loss_was_func:
return metrics['loss']
else:
return metrics
def get_optimizer(parameters, config):
if config['optimiser'] == "adam":
return optim.Adam(parameters, lr=config['learning_rate'], amsgrad=config['adam_amsgrad'])
elif config['optimiser'] == "rmsprop":
return optim.RMSprop(parameters, lr=config['learning_rate'])
elif config['optimiser'] == "sgd":
return optim.SGD(parameters, lr=config['learning_rate'], momentum=config['sgd_momentum'])
elif config['optimiser'] == "radam":
return RAdam(parameters, lr=config['learning_rate'])
else:
raise Exception('Unimplemented optimiser: ' + config['optimiser'])
def _get_scheduler(optimizer, config):
""" Decodes a scheduler config. Returns none if no schedulers were specified """
if config is None or config['type'] == 'none':
return None
# scheduler = None
# assert not _config['weight_decay'] or not _config['lr_div_10'], "weight decay and stepwise lr can't be turned on at the same time"
if config['type'] == 'martinez_weight_decay':
return optim.lr_scheduler.LambdaLR(optimizer, lambda x: (0.96 ** (x * 0.243)))
elif config['type'] == 'multiplicative':
return optim.lr_scheduler.StepLR(optimizer, step_size=config['step_size'], gamma=config['multiplier'])
elif config['type'] == 'lr_div_10_wd': # exponential decay + division by ten at certain epochs
def lr_fn(x):
scale = config['lr_div_10_scale']
base = (0.96 ** (x * 0.243))
if x >= 80:
factor = scale * scale
elif x >= 40:
factor = scale
else:
factor = 1
return factor * base
return optim.lr_scheduler.LambdaLR(optimizer, lr_fn)
else:
raise NotImplementedError("Unknown scheduler type: ", config['type'])
def torch_train(train_loader, model, update_fn, _config, callbacks=[]):
"""
Trains a model.
:param train_loader: training data is loaded from here, PyTorch DataLoader
:param model: PyTorch model to train
:param update_fn: the function called on every iteration, must calculate the loss
:param _config: Sacred config object
:param callbacks: optional callbacks for training
:return:
"""
optimizer = get_optimizer(model.parameters(), _config)
scheduler = _get_scheduler(optimizer, _config['lr_scheduler'])
if not isinstance(callbacks, list):
callbacks = [callbacks]
epoch_len = _config['num_epochs']
iter_cnt = 0
for epoch in range(epoch_len): # loop over the dataset multiple times
model.train()
epoch_loss = 0
epoch_val = {}
epoch_start = time.time()
iter_start = time.time()
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# zero the parameter gradients
optimizer.zero_grad()
batch_start = time.time()
loss, vals = update_fn(model, data)
loss.backward()
optimizer.step()
batch_time = time.time() - batch_start
# print statistics
running_loss += loss.item()
epoch_loss += loss.item()
for k, v in vals.items():
epoch_val[k] = epoch_val.get(k, 0) + v
del loss # free up memory
if (i + 1) % 50 == 0: # print every 50 mini-batches
iter_time = (time.time() - iter_start) / 50
print('\r[%d, %5d] loss: %.3f b=%4dms i=%dms' % (epoch + 1, i + 1, running_loss / 50,
int(batch_time * 1000), int(iter_time * 1000)), end='')
for c in callbacks:
c.on_itergroup_end(iter_cnt, running_loss / 50)
running_loss = 0.0
iter_start = time.time()
iter_cnt += 1
if _config.get('SHORT_EPOCH', False):
if i > 600:
break
print("Iterations done:", i)
if scheduler is not None:
scheduler.step()
epoch_time = time.time() - epoch_start
epoch_loss = epoch_loss / len(train_loader)
epoch_val = {k: v / len(train_loader) for k, v in epoch_val.items()}
print()
print("Epoch %3d: loss: %4.3f %4.1fs" % (epoch + 1, epoch_loss, epoch_time))
# evaluate
model.eval()
for c in callbacks:
c.on_epoch_end(model, epoch, epoch_loss, optimizer, epoch_val)
def set_requires_grad(module, requires_grad):
""" Helper function to set requires_grad on all parameters of the model. """
for param in module.parameters():
param.requires_grad = requires_grad
def eval_results(pred3d, gt3d, joint_set, verbose=True, pck_threshold=150, pctiles=[99]):
"""
Evaluates the results by printing various statistics. Also returns those results.
Poses can be represented either in hipless 16 joints or 17 joints with hip format.
Order is MuPo-TS order in all cases.
Parameters:
pred3d: dictionary of predictions in mm, seqname -> (nSample, [16|17], 3)
gt3d: dictionary of ground truth in mm, seqname -> (nSample, [16|17], 3)
joint_set; JointSet instance describing the order of joints
verbose: if True, a table of the results is printed
pctiles: list of percentiles of the errors to calculate
Returns:
sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles
"""
has_hip = list(pred3d.values())[0].shape[1] == joint_set.NUM_JOINTS # whether it contains the hip or not
sequence_mpjpes = {}
sequence_pcks = {}
sequence_pctiles = {}
all_errs = []
for k in sorted(pred3d.keys()):
pred = pred3d[k]
gt = gt3d[k]
assert pred.shape == gt.shape, "Pred shape:%s, gt shape:%s" % (pred.shape, gt.shape)
assert (not has_hip and pred.shape[1:] == (joint_set.NUM_JOINTS - 1, 3)) or \
(has_hip and pred.shape[1:] == (joint_set.NUM_JOINTS, 3)), \
"Unexpected shape:" + str(pred.shape)
errs = np.linalg.norm(pred - gt, axis=2, ord=2) # (nSample, nJoints)
sequence_pctiles[k] = np.nanpercentile(errs, pctiles)
sequence_pcks[k] = np.nanmean((errs < pck_threshold).astype(np.float64))
sequence_mpjpes[k] = np.nanmean(errs)
# Adjusting results for missing hip
if not has_hip:
N = float(joint_set.NUM_JOINTS)
sequence_pcks[k] = sequence_pcks[k] * ((N - 1) / N) + 1. / N
sequence_mpjpes[k] = sequence_mpjpes[k] * ((N - 1) / N)
all_errs.append(errs)
all_errs = np.concatenate(all_errs) # errors per joint, (nPoses, nJoints)
joint_mpjpes = np.nanmean(all_errs, axis=0)
joint_pctiles = np.nanpercentile(all_errs, pctiles, axis=0)
num_joints = joint_set.NUM_JOINTS if has_hip else joint_set.NUM_JOINTS - 1
assert_shape(all_errs, (None, num_joints))
assert_shape(joint_mpjpes, (num_joints,))
assert_shape(joint_pctiles, (len(pctiles), num_joints))
if verbose:
joint_names = joint_set.NAMES.copy()
if not has_hip:
joint_names = np.delete(joint_names, joint_set.index_of('hip')) # remove root
# Index of the percentile that will be printed. If 99 is calculated it is selected,
# otherwise the last one
pctile_ind = len(pctiles) - 1
if 99 in pctiles:
pctile_ind = pctiles.index(99)
print("----- Per sequence and joint errors in millimeter on the validation set ----- ")
print("%s %6s %5s %6s \t %22s %6s %6s" % ('Sequence', 'Avg', 'PCK', str(pctiles[pctile_ind]) + '%', '',
'Avg', str(pctiles[pctile_ind]) + '%'))
for seq, joint_id in zip_longest(sorted(pred3d.keys()), range(num_joints)):
if seq is not None:
seq_str = "%-8s: %6.2f mm %4.1f%% %6.2f mm\t " \
% (str(seq), sequence_mpjpes[seq], sequence_pcks[seq] * 100, sequence_pctiles[seq][pctile_ind])
else:
seq_str = " " * 49
if joint_id is not None:
print('%s%15s (#%2d): %6.2f mm %6.2f mm ' % (seq_str, joint_names[joint_id], joint_id,
joint_mpjpes[joint_id], joint_pctiles[pctile_ind, joint_id]))
else:
print(seq_str)
mean_sequence_err = np.mean(np.asarray(list(sequence_mpjpes.values()), dtype=np.float32))
print("\nMean sequence error (Absolute MPJPE) is %6.2f mm" % mean_sequence_err)
print("---------------------------------------------------------------- ")
print("MRPE: %.1f" % np.mean([mrpe(pred3d[k], gt3d[k], joint_set) for k in gt3d.keys()]))
return sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_mpjpes, joint_pctiles
| 13,551 | 35.926431 | 136 | py |
pose_refinement | pose_refinement-master/src/training/preprocess.py | import numpy as np
import torch
from databases.datasets import PoseDataset
from databases.joint_sets import Common14Joints, CocoExJoints, MuPoTSJoints
from util.misc import assert_shape, load
from util.pose import remove_root, remove_root_keepscore, combine_pose_and_trans
def preprocess_2d(data, fx, cx, fy, cy, joint_set, root_name):
"""
2D data preprocessing, performing the following:
1. Keeps only COMMON14 joints
2. Normalizes coordinates by multiplying with the inverse of the calibration matrix
3. Converts numbers in a root-relative form
4. Invisible joints are replaced by a single value
5. Convert data into float
:param data: (nPoses, 25, 3[x, y, scores]) - OpenPose detected coordinates
:param fx: ndarray(nPoses) or float, horizontal focal length
:param cx: ndarray(nPoses) or float, horizontal principal point
:param fy: ndarray(nPoses) or float, vertical focal length
:param cy: ndarray(nPoses) or float, horizontal principal point
:param joint_set: the JointSet object describing the order of joints
:param root_name: name of the root joint, must be a COMMON14 joint
:return: ndarray(nPoses, 42), First 39 numbers are the non-root joints, last one is the root
"""
# return data# rest is 60ms
assert_shape(data, ("*", None, joint_set.NUM_JOINTS, 3))
assert not isinstance(fx, np.ndarray) or len(fx) == len(data)
assert not isinstance(fy, np.ndarray) or len(fy) == len(data)
# negligible
if isinstance(fx, np.ndarray):
N = len(data)
shape = [1] * (data.ndim - 1)
shape[0] = N
fx = fx.reshape(shape)
fy = fy.reshape(shape)
cx = cx.reshape(shape)
cy = cy.reshape(shape)
data = data[..., joint_set.TO_COMMON14, :]
# This is 100ms
data[..., :, 0] -= cx
data[..., :, 1] -= cy
data[..., :, 0] /= fx
data[..., :, 1] /= fy
root_ind = np.where(Common14Joints.NAMES == root_name)[0][0]
root2d = data[..., root_ind, :].copy() # negligible
# 70ms
data = remove_root_keepscore(data, root_ind) # (nPoses, 13, 3), modifies data
# print(data.dtype)
# negligible
bad_frames = data[..., 2] < 0.1
# replace joints having low scores with 1700/focus
# this is to prevent leaking cx/cy
# this is 140ms
if isinstance(fx, np.ndarray):
fx = np.tile(fx, (1,) + data.shape[1:-1])
fy = np.tile(fy, (1,) + data.shape[1:-1])
data[bad_frames, 0] = -1700 / fx[bad_frames]
data[bad_frames, 1] = -1700 / fy[bad_frames]
else:
data[bad_frames, 0] = -1700 / fx
data[bad_frames, 1] = -1700 / fy
# print(data.dtype)
# stack root next to the pose
data = data.reshape(data.shape[:-2] + (-1,)) # (nPoses, 13*3)
# negligible/70ms
data = np.concatenate([data, root2d], axis=-1) # (nPoses, 14*3)
return data
def preprocess_3d(data, add_root, log_root_z, joint_set, root_name):
"""
3D preprocessing:
1. Removes the root joint
2. If add_root is True, append the root joint at the end of the pose. The
The logarithm of the z coordinate of the root is taken.
3. Flattens the data.
:param data: ndarray(nFrames, [nPoses], nJoints, 3[x, y, z]) 3D coordinates in MuPoTS order
:param add_root: True if the absolute coordinates of the hip should be included in the output
:param log_root_z:if true, the log of the z coordinate of the root is used
:param root_name: name of the root joint, must be a MuPoTS joint
:return: ndarray(nPoses, 3*nJoints|3*(nJoints-1)), 3*nJoints if add_root is true otherwise 3*(nJoints-1)
"""
assert_shape(data, ("*", joint_set.NUM_JOINTS, 3))
root_ind = joint_set.index_of(root_name)
root3d = data[..., root_ind, :].copy()
if log_root_z:
root3d[..., 2] = np.log(root3d[..., 2])
data = remove_root(data, root_ind) # (nFrames, [nPoses], nJoints-1, 3)
data = data.reshape(data.shape[:-2] + (-1,)) # (nFrames, [nPoses], (nJoints-1)*3)
if add_root:
data = np.concatenate([data, root3d], axis=-1) # (nFrames, [nPoses], nJoints*3)
return data.astype('float32')
class RemoveIndex(object):
"""
Deletes the 'meta' field from the data item, useful for cleaning up for batching.
"""
def __call__(self, sample):
sample.pop('index', None)
return sample
@staticmethod
def from_state(state, dataset):
return RemoveIndex()
class ToTensor(object):
""" Converts ndarrays in sample to pytorch tensors. Expects dicts as inputs. """
def __call__(self, sample):
return {k: torch.from_numpy(v) if isinstance(v, np.ndarray) else torch.tensor(v) for k, v in sample.items()}
class Identity(object):
"""
Does nothing.
"""
def __init__(self, dataset=None):
self.mean = 0
self.std = 1
@staticmethod
def from_file(path):
return Identity()
@staticmethod
def from_state(path):
return Identity()
def state_dict(self):
return {}
def __call__(self, sample):
return sample
class BaseNormalizer(object):
"""
Baseclass for preprocessors that normalize a field.
Subclasses must set the field_name field by themselves, outside the constructor.
They must also have the constructor to accept a single 'None' argument, that does
not preload the parameters.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
@classmethod
def from_file(cls, path):
state = load(path)
return cls.from_state(state)
@classmethod
def from_state(cls, state):
"""
Path is a pkl file that contains mean and std.
"""
instance = cls(None)
instance.mean = state['mean']
instance.std = state['std']
return instance
def state_dict(self):
return {'mean': self.mean, 'std': self.std, 'field_name': self.field_name}
def __call__(self, sample):
sample[self.field_name] = (sample[self.field_name] - self.mean) / self.std
return sample
class MeanNormalize2D(BaseNormalizer):
"""
Normalizes the input 3D pose with mean and std.
"""
def __init__(self, dataset):
"""
Parameters:
dataset: either a numpy array containing the 3D poses or a PanopticSinglePersonDataset
"""
self.field_name = 'pose2d'
if dataset is None:
# mean and std must be set manually later
return
if not isinstance(dataset, np.ndarray):
dataset = dataset.poses2d
assert isinstance(dataset, np.ndarray), "Expected dataset to be either a PanopticSinglePersonDataset or a numpy array, got:" + str(
type(dataset))
# data = dataset.reshape((len(dataset), -1))
# data = dataset.reshape((-1, dataset.shape[-1]))
data = dataset
super().__init__(np.nanmean(data, axis=0), np.nanstd(data, axis=0))
class MeanNormalize3D(BaseNormalizer):
"""
Normalizes the input 3D pose with mean and std.
"""
def __init__(self, dataset):
"""
Parameters:
dataset: either a numpy array containing the 3D poses or a PanopticSinglePersonDataset
"""
self.field_name = 'pose3d'
if dataset is None:
# mean and std must be set manually later
return
if isinstance(dataset, PoseDataset):
dataset = dataset.poses3d
assert isinstance(dataset, np.ndarray), "Expected dataset to be either a PanopticSinglePersonDataset or a numpy array"
# data = dataset.reshape((len(dataset), -1))
data = dataset
super().__init__(np.nanmean(data, axis=0), np.nanstd(data, axis=0))
class SplitToRelativeAbsAndMeanNormalize3D(object):
"""
Splits the 3D poses into relative+absolute and then normalizes it. It is uses the same
preprocessing mechanics as the Depthpose paper did.
"""
def __init__(self, dataset, normalizer=None, cache=False, log_root_z=True):
"""
:param dataset: The full dataset, required if no normalizer is provided or ``cache`` is True.
:param normalizer: the Normalizer object to be applied on the preprocessed data. If None,
the normalizer parameters are calculated from the dataset.
:param cache: If True, preprocessed values are saved and not calculated every time during training.
Potenially speed up training.
"""
if cache or normalizer is None:
assert dataset is not None, "dataset must be defined if cache==true or no normalizer provided"
self.cache = cache
self.log_root_z = log_root_z
if dataset is not None:
self.joint_set = dataset.pose3d_jointset
preprocessed3d = preprocess_3d(dataset.poses3d, True, log_root_z, self.joint_set, 'hip')
if normalizer is None:
normalizer = MeanNormalize3D(preprocessed3d)
if cache:
self.preprocessed3d = (preprocessed3d - normalizer.mean) / normalizer.std
assert isinstance(normalizer, MeanNormalize3D), \
"Unexpected normalizer type: " + str(type(normalizer))
self.normalizer = normalizer
@classmethod
def from_file(cls, path, dataset):
state = load(path)
return cls.from_state(state, dataset)
@classmethod
def from_state(cls, state, dataset):
"""
Path is a pkl file that contains mean and std.
"""
instance = cls(dataset, MeanNormalize3D.from_state(state), cache=False)
if dataset is None:
set_name = state['joint_set']
if "<class '" in set_name: # fixing incorrectly formatted type name
set_name = set_name[set_name.rindex('.') + 1:-2]
instance.joint_set = globals()[set_name]()
return instance
def state_dict(self):
state = self.normalizer.state_dict()
state['joint_set'] = type(self.joint_set).__name__
return state
def __call__(self, sample):
# Note: this algorithm makes iterating over all examples 9s slower, seems acceptable
# pose3d = sample['pose3d'] # shape is (, nJoints*3)
# preprocessed = preprocess_3d(pose3d.reshape((self.num_joints, 3)), True, PanopticJoints(), 'hip')
if self.cache:
preprocessed = self.preprocessed3d[sample['index']]
sample['pose3d'] = preprocessed
else:
pose3d = sample['pose3d'] # shape is ([nPoses],nJoints, 3)
preprocessed = preprocess_3d(pose3d, True, self.log_root_z, self.joint_set, 'hip')
sample['pose3d'] = preprocessed
sample = self.normalizer(sample)
return sample
class DepthposeNormalize2D(object):
"""
Normalizes the 2D pose using the technique in Depthpose.
"""
def __init__(self, dataset, normalizer=None, cache=False):
"""
:param dataset: The full dataset, required if no normalizer is provided or ``cache`` is True.
:param normalizer: the Normalizer object to be applied on the preprocessed data. If None,
the normalizer parameters are calculated from the dataset.
:param cache: If True, preprocessed values are saved and not calculated every time during training.
Potenially speed up training.
"""
if cache or normalizer is None:
assert dataset is not None, "dataset must be defined if cache==true or no normalizer provided"
self.cache = cache
if dataset is not None:
preprocessed2d = preprocess_2d(dataset.poses2d.copy(), dataset.fx, dataset.cx, dataset.fy, dataset.cy,
dataset.pose2d_jointset, 'hip')
if normalizer is None:
normalizer = MeanNormalize2D(preprocessed2d)
if cache:
self.preprocessed2d = (preprocessed2d - normalizer.mean) / normalizer.std
self.normalizer = normalizer
self.dataset = dataset
assert isinstance(self.normalizer, MeanNormalize2D), \
"Unexpected normalizer type: " + str(type(normalizer))
@classmethod
def from_file(cls, path, dataset):
state = load(path)
return cls.from_state(state, dataset)
@classmethod
def from_state(cls, state, dataset):
instance = cls(dataset, MeanNormalize2D.from_state(state), cache=False)
return instance
def state_dict(self):
return self.normalizer.state_dict()
def __call__(self, sample):
if self.cache:
sample['pose2d'] = self.preprocessed2d[sample['index']]
else:
pose2d = sample['pose2d'] # shape is ([nPoses],nJoints, 3)
single_item = sample['pose2d'].ndim == 2
if single_item:
pose2d = np.expand_dims(pose2d, axis=0)
ind = sample['index']
preprocessed = preprocess_2d(pose2d.copy(), self.dataset.fx[ind], sample['cx'],
self.dataset.fy[ind], self.dataset.cy[ind],
self.dataset.pose2d_jointset, 'hip')
if single_item:
preprocessed = preprocessed[0]
sample['pose2d'] = preprocessed
sample = self.normalizer(sample)
return sample
class SaveableCompose(object):
def __init__(self, transforms):
self.transforms = transforms
@staticmethod
def from_file(path, dataset, locals):
state = load(path)
return SaveableCompose.from_state(state, dataset, locals)
@staticmethod
def from_state(state, dataset, locals):
"""
Path is a pkl file that contains mean and std.
"""
transforms = []
for d in state:
if d['name'] == 'function':
t = globals()[d['state']['name']]
elif d['name'] == 'FuncAndNormalizeWrapper':
func = eval(d['state']['func_def'], globals(), locals)
t = FuncAndNormalize.from_state(func, d['state'], dataset)
else:
t = globals()[d['name']].from_state(d['state'], dataset)
transforms.append(t)
return SaveableCompose(transforms)
def state_dict(self):
state = []
for t in self.transforms:
name = type(t).__name__
if name == 'function':
s = {'name': t.__name__}
else:
s = t.state_dict() if hasattr(t, 'state_dict') else None
state.append({'name': name, 'state': s})
return state
def __call__(self, sample):
for t in self.transforms:
sample = t(sample)
return sample
def log_keep_hrnet_c14(data):
return keep_hrnet_c14(np.log(data))
def keep_hrnet_c14(data):
"""
Keeps only COMMON-14 joints from hrnet.
data - ndarray(..., 19), along the last dimension, each slice corresponds to a joint In CocoEx joint order.
"""
assert_shape(data, ('*', None, CocoExJoints.NUM_JOINTS))
data = data[..., CocoExJoints.TO_COMMON14]
return data
def zero_and_log_hrnet_c14(data):
data = keep_hrnet_c14(np.log(data))
data[np.isnan(data)] = 2
return data
def zero_and_hrnet_c14(data):
data = keep_hrnet_c14(data)
data[np.isnan(data)] = 0
return data
def decode_trfrm(transform_name, locals=None):
"""
Converts a description of a transformation name into an actual transformation.
Parameters:
transform_name: Either the name of a Preprocess class, or as string in form 'FN(<field>, <func>)'.
In the second case a FuncAndNormalize class is created.
locals: dict that contains the defined functions in the current scope. Useful for calling this function
from outside preprocess.py where there are additional functions.
"""
names = dict(globals())
if locals is not None:
names.update(locals)
return names[transform_name]
def get_postprocessor(config, test_set, normalizer3d):
if config['preprocess_3d'] == 'SplitToRelativeAbsAndMeanNormalize3D':
def f(x, seq):
scale = 1 if isinstance(test_set.pose3d_jointset, MuPoTSJoints) else 1000
return scale * combine_pose_and_trans(x, normalizer3d.std, normalizer3d.mean, test_set.pose3d_jointset, "hip")
return f
else:
raise NotImplementedError('No unconverter for 3D preprocessing: ' + config['preprocess_3d'])
| 16,694 | 33.853862 | 139 | py |
pose_refinement | pose_refinement-master/src/util/pose.py | import numpy as np
from databases.joint_sets import CocoExJoints
from util.misc import assert_shape
def harmonic_mean(a, b, eps=1e-6):
return 2 / (1 / (a + eps) + 1 / (b + eps))
def _combine(data, target, a, b):
"""
Modifies data by combining (taking average) joints at index a and b at position target.
"""
data[:, target, :2] = (data[:, a, :2] + data[:, b, :2]) / 2
data[:, target, 2] = harmonic_mean(data[:, a, 2], data[:, b, 2])
def extend_hrnet_raw(raw):
"""
Adds the hip and neck to a Coco skeleton by averaging left/right hips and shoulders.
The score will be the harmonic mean of the two.
"""
assert_shape(raw, (None, 17, 3))
js = CocoExJoints()
result = np.zeros((len(raw), 19, 3), dtype='float32')
result[:, :17, :] = raw
_combine(result, js.index_of('hip'), js.index_of('left_hip'), js.index_of('right_hip'))
_combine(result, js.index_of('neck'), js.index_of('left_shoulder'), js.index_of('right_shoulder'))
return result
def insert_zero_joint(data, ind):
""" Adds back a root with zeros in a hip-relative pose.
:param ind: the root will be inserted here
"""
assert data.ndim >= 2
shape = list(data.shape)
shape[-2] += 1
result = np.zeros(shape, dtype=data.dtype)
result[..., :ind, :] = data[..., :ind, :]
result[..., ind + 1:, :] = data[..., ind:, :]
return result
def remove_root(data, root_ind):
"""
Removes a joint from a dataset by moving it to the origin and removing it from the array.
:param data: (..., nJoints, 2|3) array
:param root_ind: index of the joint to be removed
:return: (..., nJoints-1, 2|3) array
"""
assert data.ndim >= 2 and data.shape[-1] in (2, 3)
roots = data[..., [root_ind], :] # (..., 1, [2|3])
data = data - roots
data = np.delete(data, root_ind, axis=-2)
return data
def remove_root_keepscore(data, root_ind):
"""
Removes a joint from a 2D dataset by moving to the origin and removing it from the array.
The difference to remove_root is that the third column stores the confidence score and it is
not changed.
:param data: (nPoses, nJoints, 3[x,y,score]) array
:param root_ind: index of the joint to be removed
:return: (nPoses, nJoints-1, 3[x,y,score]) array
"""
assert data.ndim >= 3 and data.shape[-1] == 3, data.shape
roots = data[..., [root_ind], :2] # ndarray(...,1,2)
# roots = roots.reshape((len(roots), 1, 2))
data[..., :2] = data[..., :2] - roots
data = np.delete(data, root_ind, axis=-2)
return data
def combine_pose_and_trans(data3d, std3d, mean3d, joint_set, root_name, log_root_z=True):
"""
3D result postprocess: unnormalizes data3d and reconstructs the absolute pose from relative + absolute split.
Parameters:
data3d: output of the PyTorch model, ndarray(nPoses, 3*nJoints), in the format created by preprocess3d
std3d: normalization standard deviations
mean3d: normalization means
root_name: name of the root joint
log_root_z: The z coordinate of the depth is in logarithms
Returns:
ndarray(nPoses, nJoints, 3)
"""
assert_shape(data3d, (None, joint_set.NUM_JOINTS * 3))
data3d = data3d * std3d + mean3d
root = data3d[:, -3:]
rel_pose = data3d[:, :-3].reshape((len(data3d), joint_set.NUM_JOINTS - 1, 3))
if log_root_z:
root[:, 2] = np.exp(root[:, 2])
rel_pose += root[:, np.newaxis, :]
result = np.zeros((len(data3d), joint_set.NUM_JOINTS, 3), dtype='float32')
root_ind = joint_set.index_of(root_name)
result[:, :root_ind, :] = rel_pose[:, :root_ind, :]
result[:, root_ind, :] = root
result[:, root_ind + 1:, :] = rel_pose[:, root_ind:, :]
return result
def pose_interp(poses, good_frames):
"""
Interpolates invisible poses.
:param poses: (nPoses, nJoints, 3), the joint coordinates
:param good_frames: (nPoses), true if the pose is detected on the frame, false otherwise
:return: (nPoses, nHoints, 3), the inp
"""
assert len(poses) == len(good_frames)
assert poses.ndim == 3
poses = poses.copy()
frame_inds = np.arange(len(poses))
for i in range(poses.shape[1]):
for j in range(poses.shape[2]):
# interpolate poses[:,i,j]
poses[~good_frames, i, j] = np.interp(
frame_inds[~good_frames], frame_inds[good_frames], poses[good_frames, i, j])
return poses
HEIGHT_BONES = [['left_ankle', 'left_knee'], ['left_hip', 'left_knee'], ['hip', 'spine'], ['spine', 'neck']]
def _calc_limb_length(poses, joint_set, bones):
"""
calculates the length of a limb that contains multiple bones.
:param bones: list of (joint1, joint2) pairs, where joint1 and joint2 determines the bone.
:return: For each pose, the sum of the lengths of the bones in `bones`
"""
assert_shape(poses, ('*', joint_set.NUM_JOINTS, 3))
bone_inds = [[joint_set.index_of(j) for j in b] for b in bones]
height = np.zeros(poses.shape[:-2], dtype='float32')
for bone in bone_inds:
bones = poses[..., bone[0], :] - poses[..., bone[1], :] # (shapePose, 3)
bones = np.linalg.norm(bones, axis=-1) # (shapePose)
height += bones
return height
def pck(pred, gt, thresh):
""" Percentage of keypoints less than thresh mm away from the GT. """
return np.mean(np.linalg.norm(pred - gt, axis=-1) < thresh)
AUC_THRESHOLDS = np.arange(0, 151, 5)
def auc(pred, gt, thresholds=AUC_THRESHOLDS):
""" Calculates AUC of PCK. The default thresholds are the ones used by the MuPoTS evaluation script"""
errors = np.linalg.norm(pred - gt, axis=-1)
return np.mean([np.mean(errors < t) for t in AUC_THRESHOLDS])
def mpjpe(pred, gt):
assert_shape(pred, ('*', None, 3))
assert pred.shape == gt.shape
return np.mean(np.linalg.norm(gt - pred, axis=-1))
def r_mpjpe(pred, gt, joint_set):
pred = remove_root(pred, joint_set.index_of('hip'))
gt = remove_root(gt, joint_set.index_of('hip'))
return mpjpe(pred, gt)
def mrpe(pred, gt, joint_set):
""" Mean Roo Position Error. """
assert_shape(pred, ('*', None, 3))
assert pred.shape == gt.shape
hip_ind = joint_set.index_of('hip')
assert gt[..., hip_ind, :].shape[-1] == 3
return np.nanmean(np.linalg.norm(gt[..., hip_ind, :] - pred[..., hip_ind, :], axis=-1))
def optimal_scaling(pred, gt):
"""
Calculates optimal scaling factor for a given set of points. Optimal scaling is the scalar s,
with which the pred points scaled become the closest to gt points, in L2 sense.
:param pred: array(nFrames, nPoints, 3)
:param gt: array(nFrames, nPoints, 3)
:return: array(nFrames,3)
"""
assert pred.shape == gt.shape
assert_shape(pred, ('*', None, 3))
# Optimal scale transform
dot_pose_pose = np.sum(pred * pred, axis=(-1, -2)) # (nShape) torch.sum(torch.mul(pred,pred),1,keepdim=True)
dot_pose_gt = np.sum(pred * gt, axis=(-1, -2))
return dot_pose_gt / dot_pose_pose # (nShape), the optimal scaling factor s
def rn_mpjpe(pred, gt, root_ind):
"""
N-MPJPE, when optimal scaling factor is calculated on relative pose.
This hsould be a good comparison to height based scaling
Based on https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning, losses/poses.py
"""
assert pred.shape == gt.shape
assert_shape(pred, ('*', None, 3))
s_opt = optimal_scaling(remove_root(pred, root_ind), remove_root(gt, root_ind))
return mpjpe(pred * s_opt[..., np.newaxis, np.newaxis], gt)
def n_mpjpe(pred, gt):
"""
Based on https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning, losses/poses.py
"""
assert pred.shape == gt.shape
assert_shape(pred, ('*', None, 3))
s_opt = optimal_scaling(pred, gt)
return mpjpe(pred * s_opt[..., np.newaxis, np.newaxis], gt)
| 7,952 | 30.939759 | 113 | py |
pose_refinement | pose_refinement-master/src/scripts/maskrcnn_bboxes.py | """ Generates Mask-RCNN bounding boxes. """
import argparse
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import cv2
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.config import get_cfg
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.modeling import build_model
import torch
import os
from torch.utils.data import DataLoader, Dataset
from util.misc import save
def get_kpdetection_conf():
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
return cfg
def get_model(cfg):
model = build_model(cfg)
model.eval()
checkpointer = DetectionCheckpointer(model)
checkpointer.load(cfg.MODEL.WEIGHTS)
return model
class ImgDirDataset(Dataset):
def __init__(self, folder, transform):
self.folder = folder
self.files = sorted(os.listdir(folder))
self.transform = transform
def __len__(self):
return len(self.files)
def __getitem__(self, ind):
img = cv2.imread(os.path.join(self.folder, self.files[ind]))
height, width = img.shape[:2]
image = self.transform.get_transform(img).apply_image(img)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)).contiguous()
return {"image": image, "height": height, "width": width, "name": self.files[ind]}
def predict_dataset(model, dataset, out_folder, batch_size=16):
loader = DataLoader(dataset, batch_size, collate_fn=lambda x: x, num_workers=3)
with torch.no_grad():
for batch in loader:
predictions = model(batch)
for i in range(len(batch)):
boxes = predictions[i]['instances'].pred_boxes.tensor.cpu().numpy()
scores = predictions[i]['instances'].scores.cpu().numpy()[:, np.newaxis]
output = np.concatenate([boxes, scores], axis=1)
assert output.shape[1] == 5
save(os.path.join(out_folder, "%s.pkl" % batch[i]['name']), output)
def predict_imgs(input_path, output_path):
cfg = get_kpdetection_conf()
model = get_model(cfg)
transform_gen = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
assert cfg.INPUT.FORMAT == 'BGR'
dataset = ImgDirDataset(input_path, transform_gen)
predict_dataset(model, dataset, output_path, batch_size=8)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_path', help="the path to the input frames")
parser.add_argument('output_path', help="bboxes will be generated here")
args = parser.parse_args()
predict_imgs(args.input_path, args.output_path)
| 3,049 | 29.19802 | 101 | py |
pose_refinement | pose_refinement-master/src/scripts/hrnet_predict.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('../hrnet/lib')
from scripts import hrnet_dataset
# ------------------------------------------------------------------------------
# pose.pytorch
# Copyright (c) 2018-present Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Marton Veges
# ------------------------------------------------------------------------------
import argparse
import time
import os
import pprint
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import numpy as np
from config import cfg
from config import update_config
from core.function import AverageMeter
from utils.utils import create_logger
from core.inference import get_final_preds
from utils.transforms import flip_back
import models
from util.misc import load, ensuredir
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
parser.add_argument('path', help="the path to the video frames and bboxes", type=str)
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=False,
default='../hrnet/experiments/coco/hrnet/w32_256x192_adam_lr1e-3.yaml',
type=str)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
return parser.parse_args()
def predict_dataset(config, dataset, model):
batch_time = AverageMeter()
# switch to evaluate mode
model.eval()
num_samples = len(dataset)
all_preds = np.zeros((num_samples, config.MODEL.NUM_JOINTS, 3), dtype=np.float32)
all_boxes = np.zeros((num_samples, 6))
image_names = []
orig_boxes = []
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
shuffle=False,
pin_memory=True, num_workers=1
)
idx = 0
with torch.no_grad():
end = time.time()
for i, (input, meta) in enumerate(data_loader):
# compute output
outputs = model(input)
if isinstance(outputs, list):
output = outputs[-1]
else:
output = outputs
if config.TEST.FLIP_TEST:
# this part is ugly, because pytorch has not supported negative index
# input_flipped = model(input[:, :, :, ::-1])
input_flipped = np.flip(input.cpu().numpy(), 3).copy()
input_flipped = torch.from_numpy(input_flipped).cuda()
outputs_flipped = model(input_flipped)
if isinstance(outputs_flipped, list):
output_flipped = outputs_flipped[-1]
else:
output_flipped = outputs_flipped
output_flipped = flip_back(output_flipped.cpu().numpy(),
dataset.flip_pairs)
output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
# feature is not aligned, shift flipped heatmap for higher accuracy
if config.TEST.SHIFT_HEATMAP:
output_flipped[:, :, :, 1:] = \
output_flipped.clone()[:, :, :, 0:-1]
output = (output + output_flipped) * 0.5
num_images = input.size(0)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
c = meta['center'].numpy()
s = meta['scale'].numpy()
score = meta['score'].numpy()
preds, maxvals = get_final_preds(config, output.clone().cpu().numpy(), c, s)
all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
all_preds[idx:idx + num_images, :, 2:3] = maxvals
# double check this all_boxes parts
all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
all_boxes[idx:idx + num_images, 4] = np.prod(s * 200, 1)
all_boxes[idx:idx + num_images, 5] = score
names = meta['image']
image_names.extend(names)
orig_boxes.extend(meta['origbox'])
idx += num_images
if i % config.PRINT_FREQ == 0:
msg = 'Test: [{0}/{1}]\t' \
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(
i, len(data_loader), batch_time=batch_time)
print(msg)
return all_preds, all_boxes, image_names, orig_boxes
def predict_imgs(model, img_folder, bbox_folder, output_file, normalize, detection_thresh):
detections = {}
for file in sorted(os.listdir(bbox_folder)):
dets = load(os.path.join(bbox_folder, file))
assert dets.shape[1] == 5
img_name = file[:-4] # remove extension
detections[img_name] = dets
valid_dataset = hrnet_dataset.ImgFolderDataset(cfg, img_folder, detections,
normalize, detection_thresh)
start = time.time()
preds, boxes, image_names, orig_boxes = predict_dataset(cfg, valid_dataset, model)
end = time.time()
print("Time in prediction: " + str(end - start))
ensuredir(os.path.dirname(output_file))
valid_dataset.rescore_and_save_result(output_file, preds, boxes, image_names, orig_boxes)
def predict(cfg_path, img_dir, bbox_dir, out_file, param_overrides=[]):
# update_config needs some hardcoded params, fake them here
class args:
cfg = cfg_path
opts = param_overrides
modelDir = ''
logDir = ''
dataDir = ''
update_config(cfg, args)
cfg.defrost()
cfg.TEST.MODEL_FILE = '../hrnet/pose_hrnet_w32_256x192.pth'
cfg.TEST.USE_GT_BBOX = False
cfg.TEST.BATCH_SIZE_PER_GPU = 64
cfg.GPUS = (0,)
cfg.freeze()
logger, final_output_dir, tb_log_dir = create_logger(cfg, cfg_path, 'valid')
logger.info(pprint.pformat(args))
logger.info(cfg)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False)
model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
normalize = transforms.Compose([transforms.ToTensor(), normalize])
detection_thresh = 0.8
img_dir = os.path.join(img_dir, '*') # Dataset requires a glob format
predict_imgs(model, img_dir, bbox_dir, out_file, normalize, detection_thresh)
if __name__ == '__main__':
args = parse_args()
img_dir = os.path.join(args.path, 'frames')
bbox_dir = os.path.join(args.path, 'bboxes')
out_file = os.path.join(args.path, 'keypoints.json')
predict(args.cfg, img_dir, bbox_dir, out_file, param_overrides=args.opts) | 7,588 | 33.03139 | 95 | py |
pose_refinement | pose_refinement-master/src/scripts/hrnet_dataset.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Marton Veges
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import logging
import os
from util.misc import ensuredir
import json_tricks as json
import numpy as np
import copy
import cv2
import glob
from torch.utils.data import Dataset
from utils.transforms import get_affine_transform
from nms.nms import oks_nms
from nms.nms import soft_oks_nms
logger = logging.getLogger(__name__)
class BaseDataset(Dataset):
"""
A DataLoader loading bounding boxes for CoCo joints evaluation.
"keypoints": {
0: "nose",
1: "left_eye",
2: "right_eye",
3: "left_ear",
4: "right_ear",
5: "left_shoulder",
6: "right_shoulder",
7: "left_elbow",
8: "right_elbow",
9: "left_wrist",
10: "right_wrist",
11: "left_hip",
12: "right_hip",
13: "left_knee",
14: "right_knee",
15: "left_ankle",
16: "right_ankle"
},
"skeleton": [
[16,14],[14,12],[17,15],[15,13],[12,13],[6,12],[7,13], [6,7],[6,8],
[7,9],[8,10],[9,11],[2,3],[1,2],[1,3],[2,4],[3,5],[4,6],[5,7]]
"""
def __init__(self, cfg):
# Unpack NMS threshold parameters
self.image_thre = cfg.TEST.IMAGE_THRE # bounding boxes lower than this value are not predicted, just thrown away
self.soft_nms = cfg.TEST.SOFT_NMS
self.oks_thre = cfg.TEST.OKS_THRE
self.in_vis_thre = cfg.TEST.IN_VIS_THRE
# Unpack image size parameters
self.image_width = cfg.MODEL.IMAGE_SIZE[0]
self.image_height = cfg.MODEL.IMAGE_SIZE[1]
self.image_size = np.array(cfg.MODEL.IMAGE_SIZE)
self.aspect_ratio = self.image_width * 1.0 / self.image_height
self.pixel_std = 200
self.color_rgb = cfg.DATASET.COLOR_RGB
self.num_joints = 17
self.flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8],
[9, 10], [11, 12], [13, 14], [15, 16]]
def _lurb2cs(self, box): # TODO check!!!!!1111
x, y = box[:2]
w = box[2] - box[0]
h = box[3] - box[1]
return self._xywh2cs(x, y, w, h)
def _box2cs(self, box):
x, y, w, h = box[:4]
return self._xywh2cs(x, y, w, h)
def _xywh2cs(self, x, y, w, h):
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > self.aspect_ratio * h:
h = w * 1.0 / self.aspect_ratio
elif w < self.aspect_ratio * h:
w = h * self.aspect_ratio
scale = np.array([w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std], dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def rescore_and_save_result(self, output_file, preds, all_boxes, img_path, orig_boxes):
assert output_file.endswith('.json') or output_file.endswith('.npy'), "Only json and numpy output is supported"
ensuredir(os.path.dirname(output_file))
# person x (keypoints)
_kpts = []
for idx, kpt in enumerate(preds):
_kpts.append({
'keypoints': kpt,
'center': all_boxes[idx][0:2],
'scale': all_boxes[idx][2:4],
'area': all_boxes[idx][4],
'score': all_boxes[idx][5],
'image': img_path[idx],
'origbox': orig_boxes[idx]
})
# image x person x (keypoints)
kpts = defaultdict(list)
for kpt in _kpts:
kpts[kpt['image']].append(kpt)
# rescoring and oks nms
num_joints = self.num_joints
in_vis_thre = self.in_vis_thre
oks_thre = self.oks_thre
oks_nmsed_kpts = []
nmsed_kpts_by_frame = defaultdict(list)
for img in kpts.keys():
img_kpts = kpts[img]
for n_p in img_kpts:
box_score = n_p['score']
kpt_score = 0
valid_num = 0
for n_jt in range(0, num_joints):
t_s = n_p['keypoints'][n_jt][2]
if t_s > in_vis_thre:
kpt_score = kpt_score + t_s
valid_num = valid_num + 1
if valid_num != 0:
kpt_score = kpt_score / valid_num
# rescoring
n_p['score'] = kpt_score * box_score
if self.soft_nms:
keep = soft_oks_nms([img_kpts[i] for i in range(len(img_kpts))], oks_thre)
else:
keep = oks_nms([img_kpts[i] for i in range(len(img_kpts))], oks_thre)
if len(keep) == 0:
selected_kpts = img_kpts
else:
selected_kpts = [img_kpts[_keep] for _keep in keep]
oks_nmsed_kpts.append(selected_kpts)
nmsed_kpts_by_frame[img] = selected_kpts
self._write_keypoint_results(nmsed_kpts_by_frame, output_file)
def _write_keypoint_results(self, keypoints, output_file):
# TODO turn list into numpy arrays
if output_file.endswith('.json'):
# Convert numpy arrays to Python lists
for img_name, poses in keypoints.items():
for pose in poses:
pose['center'] = pose['center'].tolist()
pose['scale'] = pose['scale'].tolist()
pose['keypoints'] = pose['keypoints'].ravel().tolist()
pose['origbox'] = pose['origbox'].tolist()
with open(output_file, 'w') as f:
json.dump(keypoints, f, sort_keys=True, indent=4)
elif output_file.endswith('npy'):
frame_ind = keypoints.keys()
assert all([f.startswith('videocap#') for f in frame_ind])
frame_ind = sorted(frame_ind, key=lambda x: int(x[len('videocap#'):]))
kps = []
for f in frame_ind:
assert len(keypoints[f]) == 1, 'Only images with a single pose are supported in numpy save mode, found: ' + str(
keypoints[f])
kps.append(keypoints[f][0]['keypoints'])
kps = np.stack(kps, axis=0)
print("shape:" + str(kps.shape))
np.save(output_file, kps)
else:
raise NotImplementedError('Unknown file ending: ' + output_file)
class ImgFolderDataset(BaseDataset):
""" Can be used with a folder of images"""
def __init__(self, cfg, img_path, dets, transform, det_threshold):
"""
:param cfg: config object
:param img_path: path to folder, must be glob (e.g. *.jpg)
:param dets: detections img->boxes
:param transform: transformations to apply on images
"""
super(ImgFolderDataset, self).__init__(cfg)
self.img_paths = sorted(glob.glob(img_path))
self.basedir = os.path.dirname(img_path)
self.dets = dets
self.transform = transform
self.image_thre = det_threshold
# check there is a file for all detections
# img_names = set([os.path.basename(x) for x in self.img_paths])
# for img in dets:
# assert img in img_names, "Could not find " + img
self.db = self._prepare_db()
self.last_idx_read = None
self.last_img_read = None
self.last_img = None
def _prepare_db(self):
"""
Prepares the detections from the self.dets field. Optionally filters out detected bounding boxes if their
score is low.
"""
kpt_db = []
filtered_boxes_num = 0
total_boxes_num = 0
for img_name in sorted(self.dets.keys()):
boxes = self.dets[img_name]
total_boxes_num += len(boxes)
for box in boxes:
score = box[4]
if score < self.image_thre:
continue
filtered_boxes_num = filtered_boxes_num + 1
center, scale = self._lurb2cs(box[:4])
kpt_db.append({
'image': img_name,
'center': center,
'scale': scale,
'score': score,
'origbox': box[:4]
})
logger.info('=> Total boxes: {}'.format(total_boxes_num))
logger.info('=> Total boxes after filter low score@{}: {}'.format(self.image_thre, filtered_boxes_num))
return kpt_db
def _get_img(self, img_name):
# Read from cache
if self.last_img_read == img_name:
return self.last_img
# assert self.last_img_read is None or self.last_frame_read == idx - 1, "Can only read sequentially %d -> %d" % \
# (self.last_frame_read, idx)
img = cv2.imread(os.path.join(self.basedir, img_name))
assert img is not None, "could not find " + img_name
if self.color_rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.last_img_read = img_name
self.last_img = img
return img
def __len__(self):
return len(self.db)
def __getitem__(self, idx):
assert self.last_idx_read is None or self.last_idx_read == idx - 1, "idx junmp: %d -> %d" % (self.last_idx_read, idx)
db_rec = copy.deepcopy(self.db[idx])
self.last_idx_read = idx
image_file = db_rec['image']
frame = self._get_img(image_file)
if frame is None:
logger.error('=> fail to read {}'.format(image_file))
raise ValueError('Fail to read {}'.format(image_file))
c = db_rec['center']
s = db_rec['scale']
score = db_rec['score'] if 'score' in db_rec else 1
r = 0
trans = get_affine_transform(c, s, r, self.image_size)
input = cv2.warpAffine(frame, trans, (int(self.image_size[0]), int(self.image_size[1])), flags=cv2.INTER_LINEAR)
if self.transform:
input = self.transform(input)
meta = {
'image': image_file,
'origbox': db_rec['origbox'],
'center': c,
'scale': s,
'rotation': r,
'score': score
}
return input, meta
| 10,625 | 32.415094 | 128 | py |
pose_refinement | pose_refinement-master/src/scripts/eval.py | #!/usr/bin/python3
"""
Evaluates a (not end2end) model on MuPo-TS
"""
import argparse
import os
import numpy as np
import torch
from util.misc import load
from databases import mupots_3d
from databases.datasets import PersonStackedMuPoTsDataset
from databases.joint_sets import MuPoTSJoints, CocoExJoints
from model.pose_refinement import optimize_poses, StackedArrayAllMupotsEvaluator
from model.videopose import TemporalModel
from training.callbacks import TemporalMupotsEvaluator
from training.preprocess import get_postprocessor, SaveableCompose, MeanNormalize3D
LOG_PATH = '../models'
def unstack_mupots_poses(dataset, predictions):
""" Converts output of the logger to dict of list of ndarrays. """
COCO_TO_MUPOTS = []
for i in range(MuPoTSJoints.NUM_JOINTS):
try:
COCO_TO_MUPOTS.append(CocoExJoints().index_of(MuPoTSJoints.NAMES[i]))
except:
COCO_TO_MUPOTS.append(-1)
COCO_TO_MUPOTS = np.array(COCO_TO_MUPOTS)
assert np.all(COCO_TO_MUPOTS[1:14] >= 0)
pred_2d = {}
pred_3d = {}
for seq in range(1, 21):
gt = mupots_3d.load_gt_annotations(seq)
gt_len = len(gt['annot2'])
pred_2d[seq] = []
pred_3d[seq] = []
seq_inds = (dataset.index.seq_num == seq)
for i in range(gt_len):
frame_inds = (dataset.index.frame == i)
valid = dataset.good_poses & seq_inds & frame_inds
pred_2d[seq].append(dataset.poses2d[valid, :, :2][:, COCO_TO_MUPOTS])
pred_3d[seq].append(predictions[seq][frame_inds[dataset.good_poses & seq_inds]])
return pred_2d, pred_3d
def load_model(model_folder):
config = load(os.path.join(LOG_PATH, model_folder, 'config.json'))
path = os.path.join(LOG_PATH, model_folder, 'model_params.pkl')
# Input/output size calculation is hacky
weights = torch.load(path)
num_in_features = weights['expand_conv.weight'].shape[1]
m = TemporalModel(num_in_features, MuPoTSJoints.NUM_JOINTS, config['model']['filter_widths'],
dropout=config['model']['dropout'], channels=config['model']['channels'])
m.cuda()
m.load_state_dict(weights)
m.eval()
return config, m
def get_dataset(config):
data = PersonStackedMuPoTsDataset(config['pose2d_type'], config.get('pose3d_scaling', 'normal'), pose_validity='all')
return data
def main(model_name, pose_refine):
config, m = load_model(model_name)
test_set = get_dataset(config)
params_path = os.path.join(LOG_PATH, str(model_name), 'preprocess_params.pkl')
transform = SaveableCompose.from_file(params_path, test_set, globals())
test_set.transform = transform
assert isinstance(transform.transforms[1].normalizer, MeanNormalize3D)
normalizer3d = transform.transforms[1].normalizer
post_process_func = get_postprocessor(config, test_set, normalizer3d)
logger = TemporalMupotsEvaluator(m, test_set, config['model']['loss'], True, post_process3d=post_process_func)
logger.eval(calculate_scale_free=not pose_refine, verbose=not pose_refine)
if pose_refine:
refine_config = load('../models/pose_refine_config.json')
pred = np.concatenate([logger.preds[i] for i in range(1,21)])
pred = optimize_poses(pred, test_set, refine_config)
l = StackedArrayAllMupotsEvaluator(pred, test_set, True)
l.eval(calculate_scale_free=True, verbose=True)
pred_by_seq = {}
for seq in range(1, 21):
inds = test_set.index.seq_num == seq
pred_by_seq[seq] = pred[inds]
pred_2d, pred_3d = unstack_mupots_poses(test_set, pred_by_seq)
else:
pred_2d, pred_3d = unstack_mupots_poses(test_set, logger.preds)
print("\nR-PCK R-AUC A-PCK A-AUC")
for relative in [True, False]:
pcks, aucs = mupots_3d.eval_poses(False, relative, 'annot3' if config['pose3d_scaling'] == 'normal' else 'univ_annot3',
pred_2d, pred_3d, keep_matching=True)
pck = np.mean(list(pcks.values()))
auc = np.mean(list(aucs.values()))
print(" %4.1f %4.1f " % (pck, auc), end='')
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('model_name', help="Name of the model (either 'normal' or 'universal')")
parser.add_argument('-r', '--pose-refine', action='store_true', help='Apply pose-refinement after TPN')
args = parser.parse_args()
main(args.model_name, args.pose_refine)
| 4,512 | 34.81746 | 127 | py |
pose_refinement | pose_refinement-master/src/scripts/train.py | import argparse
import os
from databases.datasets import Mpi3dTestDataset, Mpi3dTrainDataset, PersonStackedMucoTempDataset, ConcatPoseDataset
from model.videopose import TemporalModel, TemporalModelOptimized1f
from training.callbacks import preds_from_logger, ModelCopyTemporalEvaluator
from training.loaders import ChunkedGenerator
from training.preprocess import *
from training.torch_tools import torch_train
from util.misc import save, ensuredir
def calc_loss(model, batch, config):
if config['model']['loss'] == 'l1_nan':
pose2d = batch['temporal_pose2d']
gt_3d = batch['pose3d']
if config['ignore_invisible']:
pose2d = pose2d[batch['valid_pose']]
gt_3d = gt_3d[batch['valid_pose']]
if isinstance(pose2d, torch.Tensor):
inds = torch.all(torch.all(~torch.isnan(pose2d), dim=(-1)), dim=-1)
pose2d = pose2d[inds]
gt_3d = gt_3d[inds]
pose2d = pose2d.to('cuda')
gt_3d = gt_3d.to('cuda')
else:
inds = np.all(~np.isnan(pose2d), axis=(-1, -2))
pose2d = pose2d[inds]
gt_3d = gt_3d[inds]
pose2d = torch.from_numpy(pose2d).to('cuda')
gt_3d = torch.from_numpy(gt_3d).to('cuda')
elif config['model']['loss'] == 'l1':
pose2d = batch['temporal_pose2d']
gt_3d = batch['pose3d']
if config['ignore_invisible']:
pose2d = pose2d[batch['valid_pose']]
gt_3d = gt_3d[batch['valid_pose']]
pose2d = pose2d.to('cuda')
gt_3d = gt_3d.to('cuda')
# forward pass
pred_3d = model(pose2d)
if config['model']['loss'] == 'l1':
loss_3d = torch.nn.functional.l1_loss(pred_3d, gt_3d)
elif config['model']['loss'] == 'l1_nan':
loss_3d = torch.nn.functional.l1_loss(pred_3d, gt_3d)
else:
raise Exception('Unknown pose loss: ' + str(config['model']['loss']))
return loss_3d, {'loss_3d': loss_3d.item()}
def run_experiment(output_path, _config):
save(os.path.join(output_path , 'config.json'), _config)
ensuredir(output_path)
if _config['train_data'] == 'mpii_train':
print("Training data is mpii-train")
train_data = Mpi3dTrainDataset(_config['pose2d_type'], _config['pose3d_scaling'],
_config['cap_25fps'], _config['stride'])
elif _config['train_data'] == 'mpii+muco':
print("Training data is mpii-train and muco_temp concatenated")
mpi_data = Mpi3dTrainDataset(_config['pose2d_type'], _config['pose3d_scaling'],
_config['cap_25fps'], _config['stride'])
muco_data = PersonStackedMucoTempDataset(_config['pose2d_type'], _config['pose3d_scaling'])
train_data = ConcatPoseDataset(mpi_data, muco_data)
elif _config['train_data'].startswith('muco_temp'):
train_data = PersonStackedMucoTempDataset(_config['pose2d_type'], _config['pose3d_scaling'])
test_data = Mpi3dTestDataset(_config['pose2d_type'], _config['pose3d_scaling'], eval_frames_only=True)
if _config['simple_aug']:
train_data.augment(False)
# Load the preprocessing steps
train_data.transform = None
transforms_train = [decode_trfrm(_config['preprocess_2d'], globals())(train_data, cache=False),
decode_trfrm(_config['preprocess_3d'], globals())(train_data, cache=False)]
normalizer2d = transforms_train[0].normalizer
normalizer3d = transforms_train[1].normalizer
transforms_test = [decode_trfrm(_config['preprocess_2d'], globals())(test_data, normalizer2d),
decode_trfrm(_config['preprocess_3d'], globals())(test_data, normalizer3d)]
transforms_train.append(RemoveIndex())
transforms_test.append(RemoveIndex())
train_data.transform = SaveableCompose(transforms_train)
test_data.transform = SaveableCompose(transforms_test)
# save normalisation params
save(output_path+'/preprocess_params.pkl', train_data.transform.state_dict())
print("Length of training data:", len(train_data))
print("Length of test data:", len(test_data))
model = TemporalModelOptimized1f(train_data[[0]]['pose2d'].shape[-1],
MuPoTSJoints.NUM_JOINTS, _config['model']['filter_widths'],
dropout=_config['model']['dropout'], channels=_config['model']['channels'],
layernorm=_config['model']['layernorm'])
test_model = TemporalModel(train_data[[0]]['pose2d'].shape[-1],
MuPoTSJoints.NUM_JOINTS, _config['model']['filter_widths'],
dropout=_config['model']['dropout'], channels=_config['model']['channels'],
layernorm=_config['model']['layernorm'])
model.cuda()
test_model.cuda()
save(output_path+'/model_summary.txt', str(model))
pad = (model.receptive_field() - 1) // 2
train_loader = ChunkedGenerator(train_data, _config['batch_size'], pad, _config['train_time_flip'], shuffle=True)
tester = ModelCopyTemporalEvaluator(test_model, test_data, _config['model']['loss'], _config['test_time_flip'],
post_process3d=get_postprocessor(_config, test_data, normalizer3d), prefix='test')
torch_train(train_loader, model, lambda m, b: calc_loss(m, b, _config), _config, callbacks=[tester])
torch.save(model.state_dict(), os.path.join(output_path, 'model_params.pkl'))
save(output_path+'/test_results.pkl', {'index': test_data.index, 'pred': preds_from_logger(test_data, tester),
'pose3d': test_data.poses3d})
def main(output_path):
params = {
'num_epochs': 80,
'preprocess_2d': 'DepthposeNormalize2D',
'preprocess_3d': 'SplitToRelativeAbsAndMeanNormalize3D',
# training
'optimiser': 'adam',
'adam_amsgrad': True,
'learning_rate': 1e-3,
'sgd_momentum': 0,
'batch_size': 1024,
'train_time_flip': True,
'test_time_flip': True,
'lr_scheduler': {
'type': 'multiplicative',
'multiplier': 0.95,
'step_size': 1,
},
# dataset
'ignore_invisible': True,
'train_data': 'mpii+muco',
'pose2d_type': 'hrnet',
'pose3d_scaling': 'normal',
'megadepth_type': 'megadepth_at_hrnet',
'cap_25fps': True,
'stride': 2,
'simple_aug': True, # augments data by duplicating each frame
'model': {
'loss': 'l1',
'channels': 1024,
'dropout': 0.25,
'filter_widths': [3, 3, 3, 3],
'layernorm': False,
},
}
run_experiment(output_path, params)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', default='../output', help='folder to save the model to')
args = parser.parse_args()
main(args.output)
| 7,059 | 38.222222 | 122 | py |
pose_refinement | pose_refinement-master/src/databases/datasets.py | import os
import h5py
import numpy as np
from torch.utils.data import Dataset
from databases import mupots_3d, mpii_3dhp, muco_temp
from databases.joint_sets import CocoExJoints, OpenPoseJoints, MuPoTSJoints
class PoseDataset(Dataset):
""" Subclasses should have the attributes poses2d/3d, pred_cdepths, pose[2|3]d_jointset defined."""
def filter_dataset(self, inds):
"""
Filters the dataset by ``inds``.
:param inds: anything that can be used for numpy masking
"""
if hasattr(self, 'coord_depths'):
self.coord_depths = self.coord_depths[inds]
if hasattr(self, 'width'):
self.width = self.width[inds]
self.pred_cdepths = self.pred_cdepths[inds]
self.poses2d = self.poses2d[inds]
self.poses3d = self.poses3d[inds]
self.gt_depth_2d = None # self.gt_depth_2d[good_poses], gt_depth_2d is not implemented for compressed videos,
# and is is ignored for CoordDepthDataset anywat
self.index = self.index[inds]
self.fx = self.fx[inds]
self.fy = self.fy[inds]
self.cx = self.cx[inds]
self.cy = self.cy[inds]
class AugmentMixin:
def augment(self, scale_by_dist, scales=None):
"""
Augments the data in a pose dataset. It simulates moving the poses
closer and further away from the camera. The method takes the dataset D, applies a transformation T,
and concatenates the transformed data to the original data.
:param scale_by_dist: If true, during augmentation it scales values with l2 distance from camera,
otherwise with z coordinate (depth).
:param scales: if defined, values in this array used for scaling instead of random values
"""
assert isinstance(self.pose3d_jointset, MuPoTSJoints), "only implemented for MupoTS joints"
orig_size = len(self.poses2d)
root_ind = MuPoTSJoints().index_of('hip')
# Calculating minimum scale to avoid joints behind camera
if scales is None:
limb_vec = self.poses3d[:, :, 2] - self.poses3d[:, [root_ind], 2]
min_scale = np.nanmax(-limb_vec / self.poses3d[:, [root_ind], 2], axis=1)
scales = np.random.normal(1, 0.25, orig_size)
scales[scales < 0.6] = 1
scales = np.maximum(scales, min_scale + 1e-5)
scales[scales > 1.5] = 1
scales = scales.reshape((-1, 1))
else:
assert scales.ndim == 2, "scales is expected to be a column vector"
self.scales = scales.copy()
# Duplicate all the training data, the first half is the original unchanged,
# the second half is augmented
for field in ['poses2d', 'poses3d', 'pred_cdepths', 'fx', 'fy', 'cx', 'cy', 'width', 'valid_2d_pred']:
if hasattr(self, field):
data = self.__getattribute__(field)
self.__setattr__(field, np.concatenate([data, data.copy()]))
if hasattr(self, 'index'):
self.index = np.concatenate([self.index, self.index.copy()])
# Calculate the new 3D coordinates of the poses
orig_roots = np.expand_dims(self.poses3d[orig_size:, root_ind, :].copy(), 1) # (nPoses, 1, 3)
new_roots = orig_roots * np.expand_dims(scales, 1)
self.poses3d[orig_size:, :, :] = self.poses3d[orig_size:, :, :] - orig_roots + new_roots
pose2d_root_ind = self.pose2d_jointset.index_of('hip')
self.poses2d[orig_size:, :, :2] = (self.poses2d[orig_size:, :, :2]
- self.poses2d[orig_size:, [pose2d_root_ind], :2]) / scales[:, :, None] \
+ self.poses2d[orig_size:, [pose2d_root_ind], :2]
assert np.all((self.poses3d[:, :, 2] >= 0) | np.isnan(self.poses3d[:, :, 2])), "Joint behind camera"
class TemporalAugmentMixin(AugmentMixin):
def augment(self, scale_by_dist, scales=None):
orig_len = len(self.poses2d)
if scales is None:
# creating scales such that poses on a single frame have the same scale
root_ind = self.pose3d_jointset.index_of('hip')
limb_vec = self.poses3d[:, :, 2] - self.poses3d[:, [root_ind], 2]
min_scales = np.nanmax(-limb_vec / self.poses3d[:, [root_ind], 2], axis=1)
scales = np.ones(len(self.poses2d), dtype='float32')
seqs = sorted(np.unique(self.index.seq))
for seq in seqs:
inds = self.index.seq == seq
# print(np.sum(inds), seq, self.index.seq)
min_scale = np.max(min_scales[inds])
scale = np.random.normal(1, 0.2)
scale = max(scale, 0.6)
scale = max(scale, min_scale + 1e-5)
scale = min(scale, 1.5)
scales[inds] = scale
scales = scales[:, np.newaxis]
super().augment(scale_by_dist, scales)
self.index = np.rec.array(self.index)
for i in range(orig_len, 2 * orig_len):
self.index.seq[i] = self.index.seq[i] + 'A'
class FlippableDataset(PoseDataset):
def __len__(self):
return len(self.poses2d)
def get_samples(self, ind, flip):
"""
:param ind: indices of the elements to extract
:param flip: true if elements should be flipped all of them
"""
sample = self.prepare_sample(ind)
if isinstance(flip, np.ndarray) or flip:
if not isinstance(flip, np.ndarray):
flip = np.full(len(ind), flip, dtype='bool')
pose2d = sample['pose2d'].copy()
pose2d[flip, ..., 0] = np.expand_dims(sample['width'][flip], 1) - pose2d[flip, ..., 0]
pose2d[flip] = self.pose2d_jointset.flip(pose2d[flip])
sample['pose2d'] = pose2d
pose3d = sample['pose3d'].copy()
pose3d[flip, ..., 0] *= -1
pose3d[flip] = self.pose3d_jointset.flip(pose3d[flip])
sample['pose3d'] = pose3d
cx = sample['cx'].copy()
cx[flip] = sample['width'][flip] - cx[flip]
sample['cx'] = cx
if self.transform:
sample = self.transform(sample)
return sample
def __getitem__(self, ind):
return self.get_samples(ind, False)
class ConcatPoseDataset(FlippableDataset, TemporalAugmentMixin):
def __init__(self, data1, data2):
self.data1 = data1
self.data2 = data2
fields = ['poses2d', 'poses3d', 'fx', 'fy', 'cx', 'cy', 'valid_2d_pred']
for field in fields:
field1 = data1.__getattribute__(field)
field2 = data2.__getattribute__(field)
self.__setattr__(field, np.concatenate([field1, field2]))
seqs = np.concatenate([data1.index.seq, data2.index.seq])
self.index = np.recarray(len(seqs), [('seq', seqs.dtype)])
self.index.seq = seqs
assert type(data1.pose2d_jointset) == type(data2.pose2d_jointset)
assert type(data1.pose3d_jointset) == type(data2.pose3d_jointset)
self.pose2d_jointset = data1.pose2d_jointset
self.pose3d_jointset = data1.pose3d_jointset
self.transform = None
def prepare_sample(self, ind):
if isinstance(ind, (list, tuple, np.ndarray)):
width = np.full(len(ind), 2048, dtype='int32')
else:
width = 2048
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.valid_2d_pred[ind], 'cx': self.cx[ind], 'width': width}
return sample
def _column_stack(data):
""" Columnwise stacks an ndarray"""
return data.reshape((-1,) + data.shape[2:], order='F').copy()
class PersonStackedMuPoTsDataset(FlippableDataset):
def __init__(self, pose2d_type, pose3d_scaling, pose_validity='detected_only', hip_threshold=-1):
"""
Loads MuPoTS dataset but only those images where at least one person was detected. Each person on a frame
is loaded separately.
:param pose_validity: one of 'all', 'detected_only', 'valid_only'; specifies which poses are marked valid
all - all of them; valid_only - those that are valid according to the GT annotations
detected_only - those that were successfuly detected by the 2D algon and also valid
:param hip_threshold: only those poses are loaded, where the score of the hip is larger than this value
:param filter_incorrect_match: MuPoTS's pose matching script has some erroneous matching. If filter_incorrect_match is True,
these are not loaded.
"""
assert pose_validity in ['all', 'detected_only', 'valid_only']
assert pose3d_scaling in ['univ', 'normal']
self.pose2d_jointset = PersonStackedMuPoTsDataset.get_jointset(pose2d_type)
self.pose3d_jointset = MuPoTSJoints()
self.pose3d_scaling = pose3d_scaling
pred2d_root_ind = self.pose2d_jointset.index_of('hip')
poses2d = []
poses3d = []
joint3d_visible = []
all_good_poses = []
valid_annotations = []
width = []
index = []
for seq in range(1, 21):
img_width, img_height = mupots_3d.image_size(seq)
gt = mupots_3d.load_gt_annotations(seq)
pred2d = mupots_3d.load_2d_predictions(seq, pose2d_type)
pose2d = pred2d['pose']
pose3d = gt['annot3' if pose3d_scaling == 'normal' else 'univ_annot3']
visibility = ~gt['occlusions']
if pose_validity == 'all':
good_poses = np.full(pose3d.shape[:2], True, dtype='bool')
elif pose_validity == 'valid_only':
good_poses = gt['isValidFrame'].squeeze()
elif pose_validity == 'detected_only':
good_poses = gt['isValidFrame'].squeeze()
good_poses = np.logical_and(good_poses, pred2d['valid_pose'])
good_poses = np.logical_and(good_poses, pose2d[:, :, pred2d_root_ind, 2] > hip_threshold)
else:
raise NotImplementedError("Unknown pose_validity value:" + pose_validity)
orig_frame = np.tile(np.arange(len(good_poses)).reshape(-1, 1), (1, good_poses.shape[1]))
orig_pose = np.tile(np.arange(good_poses.shape[1]).reshape(1, -1), (good_poses.shape[0], 1))
assert pose2d.shape[:2] == good_poses.shape # (nFrames, nPeople)
assert pose3d.shape[:2] == good_poses.shape
assert orig_frame.shape == good_poses.shape
assert orig_pose.shape == good_poses.shape
assert pose2d.shape[2:] == (self.pose2d_jointset.NUM_JOINTS, 3)
assert pose3d.shape[2:] == (17, 3)
assert visibility.shape[2] == 17
assert good_poses.ndim == 2
orig_frame = _column_stack(orig_frame)
orig_pose = _column_stack(orig_pose)
index.extend([('%d/%d' % (seq, orig_pose[i]), seq, orig_frame[i], orig_pose[i]) for i in range(len(orig_frame))])
poses2d.append(_column_stack(pose2d))
poses3d.append(_column_stack(pose3d))
joint3d_visible.append(_column_stack(visibility))
all_good_poses.append(_column_stack(good_poses))
valid_annotations.append(_column_stack(gt['isValidFrame']))
width.extend([img_width] * len(orig_frame))
self.poses2d = np.concatenate(poses2d).astype('float32')
self.poses3d = np.concatenate(poses3d).astype('float32')
self.joint3d_visible = np.concatenate(joint3d_visible)
self.good_poses = np.concatenate(all_good_poses)
self.valid_annotations = np.concatenate(valid_annotations)
self.width = np.array(width)
self.index = np.rec.array(index, dtype=[('seq', 'U5'), ('seq_num', 'int32'), ('frame', 'int32'), ('pose', 'int32')])
assert self.valid_annotations.shape == self.good_poses.shape
assert len(self.valid_annotations) == len(self.poses2d)
# Load calibration matrices
N = len(self.poses2d)
self.fx = np.zeros(N, dtype='float32')
self.fy = np.zeros(N, dtype='float32')
self.cx = np.zeros(N, dtype='float32')
self.cy = np.zeros(N, dtype='float32')
mupots_calibs = mupots_3d.get_calibration_matrices()
for seq in range(1, 21):
inds = (self.index.seq_num == seq)
self.fx[inds] = mupots_calibs[seq][0, 0]
self.fy[inds] = mupots_calibs[seq][1, 1]
self.cx[inds] = mupots_calibs[seq][0, 2]
self.cy[inds] = mupots_calibs[seq][1, 2]
assert np.all(self.fx > 0), "Some fields were not filled"
assert np.all(self.fy > 0), "Some fields were not filled"
assert np.all(np.abs(self.cx) > 0), "Some fields were not filled"
assert np.all(np.abs(self.cy) > 0), "Some fields were not filled"
self.transform = None
@staticmethod
def get_jointset(pose2d_type):
if pose2d_type == 'openpose':
return OpenPoseJoints()
elif pose2d_type == 'hrnet':
return CocoExJoints()
else:
raise Exception("Unknown 2D pose type: " + pose2d_type)
def prepare_sample(self, ind):
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.good_poses[ind], 'cx': self.cx[ind], 'width': self.width[ind]}
return sample
class Mpi3dTestDataset(FlippableDataset):
def __init__(self, pose2d_type, pose3d_scaling, eval_frames_only=False):
assert pose2d_type == 'hrnet', "Only hrnet 2d is implemented"
assert pose3d_scaling in ['normal', 'univ'], \
"Unexpected pose3d scaling type: " + str(pose3d_scaling)
self.transform = None
self.eval_frames_only = eval_frames_only
pose3d_key = 'annot3' if pose3d_scaling == 'normal' else 'univ_annot3'
poses2d = []
poses3d = []
valid_2d_pred = [] # True if HR-net found a pose
valid_frame = [] # True if MPI-INF-3DHP marked the frame as valid
fx = []
fy = []
cx = []
cy = []
width = []
index = []
for seq in range(1, 7):
gt = h5py.File(os.path.join(mpii_3dhp.MPII_3DHP_PATH,
'mpi_inf_3dhp_test_set', 'TS%d' % seq, 'annot_data.mat'), 'r')
poses3d.append(gt[pose3d_key][:, 0])
valid_frame.append(gt['valid_frame'][()] == 1)
num_frames = len(poses3d[-1]) # The annotations are shorter than the number of images
tmp = mpii_3dhp.test_poses_hrnet(seq)
poses2d.append(tmp['poses'])
valid_2d_pred.append(tmp['is_valid'])
assert len(poses3d[-1]) == len(poses2d[-1]), "Gt and predicted frames are not aligned, seq:" + str(seq)
index.extend([(seq, i) for i in range(num_frames)])
calibration_mx = mpii_3dhp.get_test_calib(seq)
fx.extend([calibration_mx[0, 0]] * num_frames)
fy.extend([calibration_mx[1, 1]] * num_frames)
cx.extend([calibration_mx[0, 2]] * num_frames)
cy.extend([calibration_mx[1, 2]] * num_frames)
width.extend([2048 if seq < 5 else 1920] * num_frames)
self.pose2d_jointset = CocoExJoints()
self.pose3d_jointset = MuPoTSJoints()
self.poses2d = np.concatenate(poses2d)
self.poses3d = np.concatenate(poses3d)
self.valid_2d_pred = np.concatenate(valid_2d_pred)
valid_frame = np.concatenate(valid_frame)
assert valid_frame.shape[1] == 1, valid_frame.shape
valid_frame = valid_frame[:, 0]
self.index = np.rec.array(index, dtype=[('seq', 'int32'), ('frame', 'int32')])
self.fx = np.array(fx, dtype='float32')
self.fy = np.array(fy, dtype='float32')
self.cx = np.array(cx, dtype='float32')
self.cy = np.array(cy, dtype='float32')
self.width = np.array(width, dtype='int32')
assert len(self.poses2d) == len(self.index), len(self.index)
# keep only those frame where a pose was detected
good_poses = self.valid_2d_pred.copy()
if eval_frames_only:
good_poses = good_poses & valid_frame
self.good_poses = good_poses
assert len(self.poses2d) == len(self.poses3d)
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.valid_2d_pred), len(self.valid_2d_pred)
assert len(self.poses2d) == len(self.fx), len(self.fx)
assert len(self.poses2d) == len(self.fy), len(self.fy)
assert len(self.poses2d) == len(self.cx), len(self.cx)
assert len(self.poses2d) == len(self.cy), len(self.cy)
assert len(self.poses2d) == len(self.width), len(self.width)
assert len(self.poses2d) == len(self.good_poses), len(self.good_poses)
def prepare_sample(self, ind):
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.good_poses[ind], 'cx': self.cx[ind], 'width': self.width[ind]}
return sample
class Mpi3dTrainDataset(FlippableDataset, TemporalAugmentMixin):
def __init__(self, pose2d_type, pose3d_scaling, cap_at_25fps, stride=1):
assert pose2d_type == 'hrnet', "Only hrnet 2d is implemented"
assert pose3d_scaling in ['normal', 'univ'], \
"Unexpected pose3d scaling type: " + str(pose3d_scaling)
self.transform = None
pose3d_key = 'annot3' if pose3d_scaling == 'normal' else 'univ_annot3'
poses2d = []
poses3d = []
valid_2d_pred = [] # True if HR-net found a pose
fx = []
fy = []
cx = []
cy = []
index = []
sequences = []
calibs = mpii_3dhp.get_calibration_matrices()
for sub in range(1, 9):
for seq in range(1, 3):
gt = mpii_3dhp.train_ground_truth(sub, seq)
for cam in range(11):
# In S3/Seq2 cam2 there are some frame between 9400-9900 where the pose is
# behind the camera/nearly in the camera plane. This breaks training.
# For simplicity, ignore the whole set but ignoring frames 9400-9900
# would also work
if seq == 2 and sub == 3 and cam == 2:
continue
# Find indices that are selected for the dataset
inds = np.arange(len(gt[pose3d_key][cam]))
if cap_at_25fps and mpii_3dhp.get_train_fps(sub, seq) == 50:
inds = inds[::2]
inds = inds[::stride]
num_frames = len(inds)
poses3d.append(gt[pose3d_key][cam][inds])
tmp = mpii_3dhp.train_poses_hrnet(sub, seq, cam)
poses2d.append(tmp['poses'][inds])
valid_2d_pred.append(tmp['is_valid'][inds])
assert len(poses3d[-1]) == len(poses2d[-1]
), "Gt and predicted frames are not aligned, seq:" + str(seq)
seq_name = 'S%d/Seq%d/%d' % (sub, seq, cam)
sequences.append(seq_name)
index.extend([(seq_name, sub, seq, cam, i) for i in inds])
calibration_mx = calibs[(sub, seq, cam)]
fx.extend([calibration_mx[0, 0]] * num_frames)
fy.extend([calibration_mx[1, 1]] * num_frames)
cx.extend([calibration_mx[0, 2]] * num_frames)
cy.extend([calibration_mx[1, 2]] * num_frames)
self.pose2d_jointset = CocoExJoints()
self.pose3d_jointset = MuPoTSJoints()
self.poses2d = np.concatenate(poses2d)
self.poses3d = np.concatenate(poses3d)
self.valid_2d_pred = np.concatenate(valid_2d_pred)
self.index = np.rec.array(index, dtype=[('seq', 'U12'), ('sub', 'int32'), ('subseq', 'int32'),
('cam', 'int32'), ('frame', 'int32')])
self.fx = np.array(fx, dtype='float32')
self.fy = np.array(fy, dtype='float32')
self.cx = np.array(cx, dtype='float32')
self.cy = np.array(cy, dtype='float32')
self.sequences = sorted(sequences)
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.poses3d)
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.valid_2d_pred), len(self.valid_2d_pred)
assert len(self.poses2d) == len(self.fx), len(self.fx)
assert len(self.poses2d) == len(self.fy), len(self.fy)
assert len(self.poses2d) == len(self.cx), len(self.cx)
assert len(self.poses2d) == len(self.cy), len(self.cy)
def filter_dataset(self, inds):
super().filter_dataset(inds)
self.sequences = sorted(np.unique(self.index.seq))
def prepare_sample(self, ind):
if isinstance(ind, (list, tuple, np.ndarray)):
width = np.full(len(ind), 2048, dtype='int32')
else:
width = 2048
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.valid_2d_pred[ind], 'cx': self.cx[ind], 'width': width}
return sample
class PersonStackedMucoTempDataset(FlippableDataset, TemporalAugmentMixin):
""" This dataset contains Muco-Temp poses, poses on the same frame are separated. """
def __init__(self, pose2d_type, pose3d_scaling):
assert pose2d_type == 'hrnet', "only hrnet is implemented"
assert pose3d_scaling in ['univ', 'normal']
self.transform = None
self.pose2d_jointset = PersonStackedMuPoTsDataset.get_jointset(pose2d_type)
self.pose3d_jointset = MuPoTSJoints()
pose3d_key = 'annot3' if pose3d_scaling == 'normal' else 'univ_annot3'
poses2d = []
poses3d = []
valid_2d_pred = [] # True if HR-net found a pose
fx = []
fy = []
cx = []
cy = []
index = []
calibs = mpii_3dhp.get_calibration_matrices()
meta_data = muco_temp.get_metadata()
for cam in range(11):
gt = muco_temp.load_gt(cam)
for vid in range(7):
orig_shape = gt[vid][pose3d_key].shape # (nFrames, nPoses, nJoints, 3)
poses3d.append(_column_stack(gt[vid][pose3d_key]))
kp = muco_temp.load_hrnet(cam, vid)
poses2d.append(_column_stack(kp['poses']))
valid_2d_pred.append(_column_stack(kp['is_valid']))
assert len(poses3d[-1]) == len(poses2d[-1]), \
"Gt and predicted frames are not aligned, cam:" + str(cam)
orig_frame = np.tile(np.arange(orig_shape[0]).reshape(-1, 1), (1, orig_shape[1]))
orig_pose = np.tile(np.arange(orig_shape[1]).reshape(1, -1), (orig_shape[0], 1))
orig_frame = _column_stack(orig_frame) # (nFrames*nPoses,)
orig_pose = _column_stack(orig_pose)
index.extend([('%d/%d/%d' % (cam, vid, orig_pose[i]), cam, vid, orig_frame[i], orig_pose[i])
for i in range(len(orig_frame))])
for pose_ind in range(orig_shape[1]):
sub, seq, _ = meta_data[cam][vid][pose_ind]
calibration_mx = calibs[(sub, seq, cam)]
fx.extend([calibration_mx[0, 0]] * orig_shape[0])
fy.extend([calibration_mx[1, 1]] * orig_shape[0])
cx.extend([calibration_mx[0, 2]] * orig_shape[0])
cy.extend([calibration_mx[1, 2]] * orig_shape[0])
self.poses2d = np.concatenate(poses2d)
self.poses3d = np.concatenate(poses3d)
self.valid_2d_pred = np.concatenate(valid_2d_pred)
self.index = np.rec.array(index, dtype=[('seq', 'U12'), ('cam', 'int32'), ('vid', 'int32'),
('frame', 'int32'), ('pose', 'int32')])
self.fx = np.array(fx, dtype='float32')
self.fy = np.array(fy, dtype='float32')
self.cx = np.array(cx, dtype='float32')
self.cy = np.array(cy, dtype='float32')
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.poses3d)
assert len(self.poses2d) == len(self.index), len(self.index)
assert len(self.poses2d) == len(self.valid_2d_pred), len(self.valid_2d_pred)
assert len(self.poses2d) == len(self.fx), len(self.fx)
assert len(self.poses2d) == len(self.fy), len(self.fy)
assert len(self.poses2d) == len(self.cx), len(self.cx)
assert len(self.poses2d) == len(self.cy), len(self.cy)
def prepare_sample(self, ind):
if isinstance(ind, (list, tuple, np.ndarray)):
width = np.full(len(ind), 2048, dtype='int32')
else:
width = 2048
sample = {'pose2d': self.poses2d[ind], 'pose3d': self.poses3d[ind],
'index': ind, 'valid_pose': self.valid_2d_pred[ind], 'cx': self.cx[ind], 'width': width}
return sample
def pose_grid_from_index(keys):
"""
From an array of frame ids returns the id of the frame and the pose in that frame.
These can be used to reshape arrays containing stacked poses.
Parameters:
keys: ids of frames. It is expected that poses in the same frame are next to each other (in other words,
if keys[i]==keys[j], then for all i<=k<=j keys[k]==keys[i])
"""
different = keys[1:] != keys[:-1]
different = np.concatenate([[True], different]) # True if the current record is on a new frame compared to the previous record
# frame_start will hold the index of the first pose of the current frame
frame_start = -np.ones(len(different), dtype='int64')
frame_start[different] = np.arange(len(different))[different] # if different True, then contains the index, otherwise a -1
# frame_start[i]==-1 if it is on the same frame as the previous pose, so max copies the prev value
frame_start = np.maximum.accumulate(frame_start)
pose_ind = np.arange(len(different)) - frame_start
return keys, pose_ind
def reshape_posearray(frame_ind, pose_ind, array):
"""
Reshapes an array that is aligned with a stacked pose array into one
that is aligned to a by-frame grouped array. Unused places in the output are nan-ed out for
floating point types. Other types are kept.
NOTE: uses hardcoded number of poses, as in this project max people on a frame is 6.
"""
assert np.max(pose_ind) < 6, "In this code the maximum number of poses per frame is hardcoded to 6"
num_frames = np.max(frame_ind) + 1
shape = (num_frames, 6) + array.shape[1:]
result = np.zeros(shape, dtype=array.dtype)
if array.dtype == 'float32' or array.dtype == 'float64':
result = result * np.nan
elif isinstance(array, np.recarray):
result = np.rec.array(result)
result[frame_ind, pose_ind] = array
return result
| 27,399 | 42.149606 | 132 | py |
pose_refinement | pose_refinement-master/src/model/pose_refinement.py | import numpy as np
import torch
from scipy import ndimage
from databases.joint_sets import MuPoTSJoints
from training.callbacks import BaseMPJPECalculator
from training.torch_tools import get_optimizer
from util.misc import assert_shape
from util.pose import remove_root, insert_zero_joint
def pose_error(pred, init):
return torch.sum((pred - init) ** 2)
def euc_err(pred, gt):
""" Calculates the euclidean distance between each joint (not squared). """
if isinstance(pred, np.ndarray):
return np.linalg.norm(pred - gt, axis=-1)
else:
return torch.norm(pred - gt, dim=-1)
def zero_velocity_loss(pred, step=1):
return torch.sum((pred[step:] - pred[:-step]) ** 2)
def step_zero_velocity_loss(pred, step=1):
return torch.sum((pred[step:] - pred[:-step]) ** 2, dim=(1, 2))
def const_velocity_loss(pred, step=1):
velocity = pred[step:] - pred[:-step]
return torch.sum((velocity[step:] - velocity[:-step]) ** 2)
def step_const_velocity_loss(pred, step):
velocity = pred[step:] - pred[:-step]
return torch.sum((velocity[step:] - velocity[:-step]) ** 2, dim=(1, 2))
def gmloss(err, a):
""" Geman-McClure cost function"""
square = err * err
return square / (square + a)
def capped_l2(err, a):
""" calculates min(err*2, a) """
if isinstance(err, np.ndarray):
return np.minimum(err * err, a)
else:
err2 = err * err
# err2 = err**2
return torch.where(err2 < a, err2, a)
def capped_l2_euc_err(pred, gt, a):
""" calculates min(err*2, a) """
if isinstance(pred, np.ndarray):
err = np.sum((pred - gt) ** 2, axis=-1)
return np.minimum(err * err, a)
else:
diff = pred - gt
err = torch.sum(diff * diff, dim=-1)
return torch.where(err < a, err, a)
def abs_to_hiprel(poses, joint_set):
""" Converts an absolute pose into [hi]+relative_pose. """
assert_shape(poses, (None, joint_set.NUM_JOINTS, 3))
root = poses[:, [joint_set.index_of('hip')]].copy()
rel = remove_root(poses, joint_set.index_of('hip'))
return np.concatenate([root, rel], axis=-2)
def add_back_hip(poses, joint_set):
""" Inverse of abs_to_hiprel """
assert_shape(poses, (None, joint_set.NUM_JOINTS, 3))
root = poses[:, [0]].copy()
hip_ind = joint_set.index_of('hip')
result = insert_zero_joint(poses[:, 1:], hip_ind)
result += root
return result
class StackedArrayAllMupotsEvaluator(BaseMPJPECalculator):
"""
An evaluator that expects a stacked numpy array as prediction results.
Uses all poses, no masking out invisible poses.
"""
def __init__(self, pred, dataset, ignore_invalid, post_process3d=None, prefix='test'):
self.prediction = pred
self.dataset = dataset
self.ignore_invalid = ignore_invalid
data_3d_mm = {}
for seq in range(1, 21):
inds = self.dataset.index.seq_num == seq
if self.ignore_invalid:
inds = inds & self.dataset.valid_annotations
data_3d_mm[seq] = dataset.poses3d[inds]
super().__init__(data_3d_mm, dataset.pose3d_jointset, post_process3d=post_process3d, csv=None, prefix=prefix)
def pred_and_calc_loss(self, model):
assert model is None, "StackedArrayAllMupotsEvaluator does not handle model evaluation"
preds = {}
losses = {}
for seq in range(1, 21):
inds = self.dataset.index.seq_num == seq
if self.ignore_invalid:
inds = inds & self.dataset.valid_annotations
preds[seq] = self.prediction[inds]
losses[seq] = np.zeros_like(preds[seq])
return losses, preds
def optimize_poses(pred3d, data, _config, **kwargs):
"""
Runs the optimisation process on the dataset defined by resulsts.
Parameters:
pred3d: poses predicted by VideoPose, aligned with dataset
dataset: dataset describing
_config: dictionary of additional parameters
"""
_config = dict(_config)
_config.update(kwargs)
joint_set = MuPoTSJoints()
seqs = np.unique(data.index.seq)
smoothed_pred = np.zeros_like(pred3d)
losses = []
for seq in seqs:
inds = data.index.seq == seq
poses_init = abs_to_hiprel(pred3d[inds].copy(), joint_set).astype('float32') / 1000
# interpolate invisible poses, if required
poses_pred = poses_init.copy()
kp_score = np.mean(data.poses2d[inds, :, 2], axis=-1)
if _config['smooth_visibility']:
kp_score = ndimage.median_filter(kp_score, 9)
kp_score = torch.from_numpy(kp_score).cuda()
poses_init = torch.from_numpy(poses_init).cuda()
poses_pred = torch.from_numpy(poses_pred).cuda()
scale = torch.ones((len(kp_score), 1, 1))
poses_init.requires_grad = False
poses_pred.requires_grad = True
kp_score.requires_grad = False
scale.requires_grad = False
optimizer = get_optimizer([poses_pred], _config)
for i in range(_config['num_iter']):
# smoothing formulation
if _config['pose_loss'] == 'gm':
pose_loss = torch.sum(kp_score.view(-1, 1, 1) * gmloss(poses_pred - poses_init, _config['gm_alpha']))
elif _config['pose_loss'] == 'capped_l2':
pose_loss = torch.sum(kp_score.view(-1, 1, 1) * capped_l2(poses_pred - poses_init,
torch.tensor(_config['l2_cap']).float().cuda()))
elif _config['pose_loss'] == 'capped_l2_euc_err':
pose_loss = torch.sum(kp_score.view(-1, 1) * capped_l2_euc_err(poses_pred, poses_init,
torch.tensor(_config['l2_cap']).float().cuda()))
else:
raise NotImplementedError('Unknown pose_loss' + _config['pose_loss'])
velocity_loss_hip = torch.sum(globals()[_config['smoothness_loss_hip']](poses_pred[:, [0], :], 1))
step = _config['smoothness_loss_hip_largestep']
vel_loss = globals()[_config['smoothness_loss_hip']](poses_pred[:, [0], :], step)
velocity_loss_hip_large = torch.sum((1 - kp_score[-len(vel_loss):]) * vel_loss)
velocity_loss_rel = torch.sum(globals()[_config['smoothness_loss_rel']](poses_pred[:, 1:, :], 1))
vel_loss = globals()[_config['smoothness_loss_rel']](poses_pred[:, 1:, :], step)
velocity_loss_rel_large = torch.sum((1 - kp_score[-len(vel_loss):]) * vel_loss)
total_loss = pose_loss + _config['smoothness_weight_hip'] * velocity_loss_hip \
+ _config['smoothness_weight_hip_large'] * velocity_loss_hip_large \
+ _config['smoothness_weight_rel'] * velocity_loss_rel \
+ _config['smoothness_weight_rel_large'] * velocity_loss_rel_large
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
poses_init = poses_init.detach().cpu().numpy() * 1000
poses_pred = poses_pred.detach().cpu().numpy() * 1000
poses_init = add_back_hip(poses_init, joint_set)
poses_pred = add_back_hip(poses_pred, joint_set)
smoothed_pred[inds] = poses_pred
losses.append(total_loss.item())
if _config.get('print_loss', False):
print('Avg loss:', np.mean(losses))
return smoothed_pred
| 7,511 | 34.267606 | 127 | py |
pose_refinement | pose_refinement-master/src/model/videopose.py | # Based on https://github.com/facebookresearch/VideoPose3D
#
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
class TemporalModelBase(nn.Module):
"""
Do not instantiate this class.
"""
def __init__(self, in_features, num_joints_out,
filter_widths, causal, dropout, channels, layernorm):
super().__init__()
# Validate input
for fw in filter_widths:
assert fw % 2 != 0, 'Only odd filter widths are supported'
self.in_features = in_features
self.num_joints_out = num_joints_out
self.filter_widths = filter_widths
self.layernorm = layernorm
self.channels = channels
self.drop = nn.Dropout(dropout)
self.relu = nn.ReLU(inplace=True)
self.pad = [filter_widths[0] // 2] # list of padding sizes
self.shrink = nn.Conv1d(channels, num_joints_out * 3, 1)
def set_bn_momentum(self, momentum):
# if not self.layernorm:
self.expand_bn.momentum = momentum
for bn in self.layers_bn:
bn.momentum = momentum
def receptive_field(self):
"""
Return the total receptive field of this model as # of frames.
"""
frames = 0
for f in self.pad:
frames += f
return 1 + 2 * frames
def create_norm_layer(self, frame_num):
""" frame_num is the spatial dimension """
if self.layernorm:
# return nn.LayerNorm([self.channels, frame_num], elementwise_affine=False)
return nn.InstanceNorm1d(self.channels, momentum=0.1, affine=True)
else:
return nn.BatchNorm1d(self.channels, momentum=0.1)
def total_causal_shift(self):
"""
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
"""
frames = self.causal_shift[0]
next_dilation = self.filter_widths[0]
for i in range(1, len(self.filter_widths)):
frames += self.causal_shift[i] * next_dilation
next_dilation *= self.filter_widths[i]
return frames
def forward(self, x):
assert len(x.shape) == 3, x.shape
assert x.shape[-1] == self.in_features
# sz = x.shape
# x = x.view(x.shape[0], x.shape[1], -1) # (nBatch,nFrames,nJoints*2) - unroll a single pose
x = x.permute(0, 2, 1) # (nBatch, nFeatures, nFrames)
x = self._forward_blocks(x)
x = x.permute(0, 2, 1) # (nBatch, nFrames, nFeatures)
# x = x.view(sz[0], -1, self.num_joints_out, 3)
return x
class TemporalModel(TemporalModelBase):
"""
Reference 3D pose estimation model with temporal convolutions.
This implementation can be used for all use-cases.
"""
def __init__(self, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False, layernorm=False):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
"""
super().__init__(in_features, num_joints_out, filter_widths, causal, dropout, channels, layernorm)
self.expand_conv = nn.Conv1d(in_features, channels, filter_widths[0], bias=False)
conv_num_frames = 1 # spatial dimension of the output of the conv layer; works only for [3,3,3,...shaped]
for f in filter_widths:
conv_num_frames *= f
conv_num_frames = conv_num_frames - (filter_widths[0]-1)
self.expand_bn = self.create_norm_layer(conv_num_frames)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0]) // 2 if causal else 0] # nonzero only for causal model
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2 * next_dilation) if causal else 0)
conv_num_frames = conv_num_frames - (filter_widths[i] - 1) * next_dilation
layers_conv.append(nn.Conv1d(channels, channels,
filter_widths[i] if not dense else (2 * self.pad[-1] + 1),
dilation=next_dilation if not dense else 1,
bias=False))
layers_bn.append(self.create_norm_layer(conv_num_frames))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
layers_bn.append(self.create_norm_layer(conv_num_frames))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
def _forward_blocks(self, x):
x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
for i in range(len(self.pad) - 1):
pad = self.pad[i + 1]
shift = self.causal_shift[i + 1]
res = x[:, :, pad + shift: x.shape[2] - pad + shift]
x = self.drop(self.relu(self.layers_bn[2 * i](self.layers_conv[2 * i](x))))
x = res + self.drop(self.relu(self.layers_bn[2 * i + 1](self.layers_conv[2 * i + 1](x))))
x = self.shrink(x)
return x
class TemporalModelOptimized1f(TemporalModelBase):
"""
3D pose estimation model optimized for single-frame batching, i.e.
where batches have input length = receptive field, and output length = 1.
This scenario is only used for training when stride == 1.
This implementation replaces dilated convolutions with strided convolutions
to avoid generating unused intermediate results. The weights are interchangeable
with the reference implementation.
"""
def __init__(self, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, layernorm=False):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
"""
super().__init__(in_features, num_joints_out, filter_widths, causal, dropout, channels, layernorm)
self.expand_conv = nn.Conv1d(in_features, channels, filter_widths[0], stride=filter_widths[0], bias=False)
conv_num_frames = 1 # spatial dimesnsion of the output of the conv layer; works only for [3,3,3,...shaped]
for f in filter_widths[1:]:
conv_num_frames *= f
self.expand_bn = self.create_norm_layer(conv_num_frames)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0] // 2) if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2) if causal else 0)
conv_num_frames = conv_num_frames // filter_widths[i]
layers_conv.append(nn.Conv1d(channels, channels, filter_widths[i], stride=filter_widths[i], bias=False))
layers_bn.append(self.create_norm_layer(conv_num_frames))
layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
layers_bn.append(self.create_norm_layer(conv_num_frames))
next_dilation *= filter_widths[i]
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn)
def _forward_blocks(self, x):
x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
for i in range(len(self.pad) - 1):
res = x[:, :, self.causal_shift[i + 1] + self.filter_widths[i + 1] // 2:: self.filter_widths[i + 1]]
x = self.drop(self.relu(self.layers_bn[2 * i](self.layers_conv[2 * i](x))))
x = res + self.drop(self.relu(self.layers_bn[2 * i + 1](self.layers_conv[2 * i + 1](x))))
x = self.shrink(x)
return x
| 9,265 | 40.927602 | 116 | py |
UltraNest | UltraNest-master/docs/conf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ultranest documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_ext'))
import sphinx_rtd_theme
import ultranest
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosectionlabel',
'nbsphinx',
'sphinx_rtd_theme',
'sphinx.ext.napoleon',
'edit_on_github',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'UltraNest'
copyright = u"2014-2022, Johannes Buchner"
author = u"Johannes Buchner"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ultranest.__version__
# The full version, including alpha/beta/rc tags.
release = ultranest.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['build', 'Thumbs.db', '.DS_Store',
'_build', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
autosectionlabel_prefix_document = True
# avoid time-out when running the doc
nbsphinx_timeout = 4 * 60 * 60
nbsphinx_execute_arguments = [
"--InlineBackend.figure_formats={'svg', 'pdf'}",
"--InlineBackend.rc=figure.dpi=96",
]
autodoc_member_order = 'bysource'
autoclass_content = 'both'
edit_on_github_project = 'JohannesBuchner/UltraNest'
edit_on_github_branch = 'master'
#edit_on_github_url
edit_on_github_src = 'docs/' # optional. default: ''
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_baseurl = 'https://johannesbuchner.github.io/UltraNest/'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'style_external_links': True,
# 'vcs_pageview_mode': 'edit',
'style_nav_header_background': '#2980B9',
#'only_logo': False,
}
html_logo = "static/logo.svg"
html_show_sourcelink = False
html_favicon = "static/icon.ico"
html_show_sphinx = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ultranestdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ultranest.tex',
u'UltraNest Documentation',
u'Johannes Buchner', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ultranest',
u'UltraNest Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ultranest',
u'UltraNest Documentation',
author,
'ultranest',
'One line description of project.',
'Miscellaneous'),
]
| 5,899 | 27.780488 | 77 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.