text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
from typing import Any, Callable, Iterator, List, Optional, Tuple, Union, cast
from d3rlpy.metrics.scorer import AlgoProtocol, _make_batches
from d3rlpy.dataset import Episode
from rl4rs.policy.policy_model import policy_model
WINDOW_SIZE = 1024
# modify from https://github.com/takuseno/d3rlpy/blob/master/d3rlpy/metrics/scorer.py
def soft_opc_scorer(
return_threshold: float,
) -> Callable[[policy_model, List[Episode]], float]:
r"""Returns Soft Off-Policy Classification metrics.
This function returns scorer function, which is suitable to the standard
scikit-learn scorer function style.
The metrics of the scorer funciton is evaluating gaps of action-value
estimation between the success episodes and the all episodes.
If the learned Q-function is optimal, action-values in success episodes
are expected to be higher than the others.
The success episode is defined as an episode with a return above the given
threshold.
.. math::
\mathbb{E}_{s, a \sim D_{success}} [Q(s, a)]
- \mathbb{E}_{s, a \sim D} [Q(s, a)]
.. code-block:: python
from d3rlpy.datasets import get_cartpole
from d3rlpy.algos import DQN
from d3rlpy.metrics.scorer import soft_opc_scorer
from sklearn.model_selection import train_test_split
dataset, _ = get_cartpole()
train_episodes, test_episodes = train_test_split(dataset, test_size=0.2)
scorer = soft_opc_scorer(return_threshold=180)
dqn = DQN()
dqn.fit(train_episodes,
eval_episodes=test_episodes,
scorers={'soft_opc': scorer})
References:
* `Irpan et al., Off-Policy Evaluation via Off-Policy Classification.
<https://arxiv.org/abs/1906.01624>`_
Args:
return_threshold: threshold of success episodes.
Returns:
scorer function.
"""
def scorer(algo: policy_model, episodes: List[Episode]) -> float:
success_values = []
all_values = []
for episode in episodes:
is_success = episode.compute_return() >= return_threshold
for batch in _make_batches(episode, WINDOW_SIZE, algo.policy.n_frames):
values = algo.predict_q(batch.observations, batch.actions)
values = cast(np.ndarray, values)
all_values += values.reshape(-1).tolist()
if is_success:
success_values += values.reshape(-1).tolist()
return float(np.mean(success_values) - np.mean(all_values))
return scorer
def dynamics_reward_prediction_mean_error_scorer(
dynamics: policy_model, episodes: List[Episode]
) -> float:
r"""Returns MSE of reward prediction (in negative scale).
This metrics suggests how dynamics model is generalized to test sets.
If the MSE is large, the dynamics model are overfitting.
.. math::
\mathbb{E}_{s_t, a_t, r_{t+1} \sim D} [(r_{t+1} - r')]
where :math:`r' \sim T(s_t, a_t)`.
Args:
dynamics: dynamics model.
episodes: list of episodes.
Returns:
negative mean squared error.
"""
total_errors = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, dynamics.policy.n_frames):
pred = dynamics.predict_q(batch.observations, batch.actions)
rewards = batch.next_rewards
errors = (rewards - pred[1]).reshape(-1)
total_errors += errors.tolist()
# smaller is better
return float(np.mean(total_errors))
def dynamics_reward_prediction_abs_mean_error_scorer(
dynamics: policy_model, episodes: List[Episode]
) -> float:
r"""Returns MSE of reward prediction (in negative scale).
This metrics suggests how dynamics model is generalized to test sets.
If the MSE is large, the dynamics model are overfitting.
.. math::
\mathbb{E}_{s_t, a_t, r_{t+1} \sim D} [abs(r_{t+1} - r')]
where :math:`r' \sim T(s_t, a_t)`.
Args:
dynamics: dynamics model.
episodes: list of episodes.
Returns:
negative mean squared error.
"""
total_errors = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, dynamics.policy.n_frames):
pred = dynamics.predict_q(batch.observations, batch.actions)
rewards = batch.next_rewards
errors = np.abs(rewards - pred[1]).reshape(-1)
total_errors += errors.tolist()
# smaller is better
return float(np.mean(total_errors))
def discrete_action_match_scorer(
algo: policy_model, episodes: List[Episode]
) -> float:
r"""Returns percentage of identical actions between algorithm and dataset.
This metrics suggests how different the greedy-policy is from the given
episodes in discrete action-space.
If the given episdoes are near-optimal, the large percentage would be
better.
.. math::
\frac{1}{N} \sum^N \parallel
\{a_t = \text{argmax}_a Q_\theta (s_t, a)\}
Args:
algo: algorithm.
episodes: list of episodes.
Returns:
percentage of identical actions.
"""
total_matches = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, algo.policy.n_frames):
actions = algo.predict_with_mask(batch.observations)
match = (batch.actions.reshape(-1) == actions).tolist()
total_matches += match
return float(np.mean(total_matches))
|
{"hexsha": "7593fa1314c392d3f7661163cd03fe7f2984ca9e", "size": 5551, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl4rs/utils/d3rlpy_scorer.py", "max_stars_repo_name": "fuxiAIlab/RL4RS", "max_stars_repo_head_hexsha": "e26ee5d068eaffd0f04779067614e34e313b1200", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 77, "max_stars_repo_stars_event_min_datetime": "2022-02-17T01:51:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T23:34:01.000Z", "max_issues_repo_path": "rl4rs/utils/d3rlpy_scorer.py", "max_issues_repo_name": "fuxiAIlab/RL4RS", "max_issues_repo_head_hexsha": "e26ee5d068eaffd0f04779067614e34e313b1200", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-03-15T11:38:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T08:12:46.000Z", "max_forks_repo_path": "rl4rs/utils/d3rlpy_scorer.py", "max_forks_repo_name": "fuxiAIlab/RL4RS", "max_forks_repo_head_hexsha": "e26ee5d068eaffd0f04779067614e34e313b1200", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2022-02-26T18:14:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T21:29:10.000Z", "avg_line_length": 32.8461538462, "max_line_length": 85, "alphanum_fraction": 0.6562781481, "include": true, "reason": "import numpy", "num_tokens": 1281}
|
#pragma once
#include <boost/program_options.hpp>
#include <iostream>
using namespace boost::program_options;
using namespace std;
namespace utils {
/**
* Prepare command line arguments processing.
*/
options_description prepareCommandLineOptions ();
/**
* Parse command line.
* @param argumentsCount Number of command line arguments:
* first parameter in entry point function (main).
* @param arguments Arguments with parameters:
* second parameter in entry point function (main).
* @param description Command line parameters description.
*/
variables_map parseCommandLine (int argumentsCount, char* arguments[],
options_description& description);
/**
* Display help menu.
* @param out Stream to write help to.
* @param description Parameters description to describe.
* @param app Application name for usage text.
*/
void displayUsage (ostream& out, options_description& description,
char app[]);
/**
* Get parameters' values from variables map.
* @param variablesMap Variables map to read parameters from it.
* @param login Reference to write login to it.
* @param param Reference to write password to it.
* @param server_name Reference to write server name to it.
* @return Returns `true' in the case of success reading,
* returns `false' otherwise.
*/
bool getParameters (variables_map& variablesMap, string& login,
string& password, string& server_name);
}
|
{"hexsha": "60f3b6507db65b1ee9a5b20fc9841252ffbd175a", "size": 1589, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "utils/command_line.hpp", "max_stars_repo_name": "char-lie/pop3_client", "max_stars_repo_head_hexsha": "fceb4266443177358e2f0bdf2d3ebc78ab000601", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/command_line.hpp", "max_issues_repo_name": "char-lie/pop3_client", "max_issues_repo_head_hexsha": "fceb4266443177358e2f0bdf2d3ebc78ab000601", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/command_line.hpp", "max_forks_repo_name": "char-lie/pop3_client", "max_forks_repo_head_hexsha": "fceb4266443177358e2f0bdf2d3ebc78ab000601", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9534883721, "max_line_length": 74, "alphanum_fraction": 0.663310258, "num_tokens": 306}
|
import os
import yaml
import time
import shutil
import torch
import random
import argparse
import numpy as np
import matplotlib.pyplot as plt
from torch.utils import data
from tqdm import tqdm
from torchvision.utils import save_image, make_grid
from tifffile import imsave
from functools import reduce
from ptsemseg.models import get_model
from ptsemseg.loss import get_loss_function
from ptsemseg.loader import get_loader
from ptsemseg.utils import get_logger
from ptsemseg.metrics import runningScore, averageMeter
from ptsemseg.augmentations import get_composed_augmentations
from ptsemseg.schedulers import get_scheduler
from ptsemseg.optimizers import get_optimizer
from torch.utils.tensorboard import SummaryWriter
def train(cfg, writer, logger):
# Setup seeds
torch.manual_seed(cfg.get("seed", 1337))
torch.cuda.manual_seed(cfg.get("seed", 1337))
np.random.seed(cfg.get("seed", 1337))
random.seed(cfg.get("seed", 1337))
# Setup device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # Setup Augmentations
# augmentations = cfg["training"].get("augmentations", None)
# data_aug = get_composed_augmentations(augmentations)
# Setup Dataloader
data_loader = get_loader(cfg["data"]["dataset"])
data_path = cfg["data"]["path"]
t_loader = data_loader(
data_path,
split=cfg["data"]["train_split"],
img_size=(cfg["data"]["img_rows"], cfg["data"]["img_cols"]),
)
v_loader = data_loader(
data_path,
split=cfg["data"]["val_split"],
img_size=(cfg["data"]["img_rows"], cfg["data"]["img_cols"]),
)
n_classes = t_loader.n_classes
trainloader = data.DataLoader(
t_loader,
batch_size=cfg["training"]["batch_size"],
num_workers=cfg["training"]["n_workers"],
shuffle=True,
drop_last=True,
)
valloader = data.DataLoader(
v_loader,
batch_size=cfg["training"]["batch_size"],
num_workers=cfg["training"]["n_workers"],
shuffle=True,
drop_last=True,
)
# Setup Metrics
running_metrics_val = runningScore(n_classes)
# Setup Model
model_orig = get_model(cfg["model"], n_classes).to(device)
if cfg["training"]["pretrain"] == True:
# Load a pretrained model
model_orig.load_pretrained_model(
model_path="pretrained/pspnet101_cityscapes.caffemodel"
)
logger.info("Loaded pretrained model.")
else:
# No pretrained model
logger.info("No pretraining.")
model = torch.nn.DataParallel(model_orig, device_ids=range(torch.cuda.device_count()))
# Setup optimizer, lr_scheduler and loss function
optimizer_cls = get_optimizer(cfg)
optimizer_params = {k: v for k, v in cfg["training"]["optimizer"].items() if k != "name"}
optimizer = optimizer_cls(model.parameters(), **optimizer_params)
logger.info("Using optimizer {}".format(optimizer))
scheduler = get_scheduler(optimizer, cfg["training"]["lr_schedule"])
loss_fn = get_loss_function(cfg)
logger.info("Using loss {}".format(loss_fn))
start_iter = 0
if cfg["training"]["resume"] is not None:
if os.path.isfile(cfg["training"]["resume"]):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg["training"]["resume"])
)
checkpoint = torch.load(cfg["training"]["resume"])
model.load_state_dict(checkpoint["model_state"])
optimizer.load_state_dict(checkpoint["optimizer_state"])
scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
logger.info(
"Loaded checkpoint '{}' (iter {})".format(
cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
logger.info("No checkpoint found at '{}'".format(cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
### Visualize model training
# helper function to show an image
# (used in the `plot_classes_preds` function below)
def matplotlib_imshow(data, is_image):
if is_image: #for images
data = data / 4 + 0.5 # unnormalize
npimg = data.numpy()
plt.imshow(npimg, cmap="gray")
else: # for labels
nplbl = data.numpy()
plt.imshow(t_loader.decode_segmap(nplbl))
def plot_classes_preds(data, batch_size, iter, is_image=True):
fig = plt.figure(figsize=(12, 48))
for idx in np.arange(batch_size):
ax = fig.add_subplot(1, batch_size, idx+1, xticks=[], yticks=[])
matplotlib_imshow(data[idx], is_image)
ax.set_title("Iteration Number "+str(iter))
return fig
best_iou = -100.0
#best_val_loss = -100.0
i = start_iter
flag = True
#Check if params trainable
print('CHECK PARAMETER TRAINING:')
for name, param in model.named_parameters():
if param.requires_grad == False:
print(name, param.data)
while i <= cfg["training"]["train_iters"] and flag:
for (images_orig, labels_orig, weights_orig, nuc_weights_orig) in trainloader:
i += 1
start_ts = time.time()
scheduler.step()
model.train() #convert model into training mode
images = images_orig.to(device)
labels = labels_orig.to(device)
weights = weights_orig.to(device)
nuc_weights = nuc_weights_orig.to(device)
optimizer.zero_grad()
outputs = model(images)
# Transform output to calculate meaningful loss
out = outputs[0]
# Resize output of network to same size as labels
target_size = (labels.size()[1],labels.size()[2])
out = torch.nn.functional.interpolate(out,size=target_size,mode='bicubic')
# Multiply weights by loss output
loss = loss_fn(input=out, target=labels)
loss = torch.mul(loss,weights) # add contour weights
loss = torch.mul(loss,nuc_weights) # add nuclei weights
loss = loss.mean() # average over all pixels to obtain scaler for loss
loss.backward() # computes gradients over network
optimizer.step() #updates parameters
time_meter.update(time.time() - start_ts)
if (i + 1) % cfg["training"]["print_interval"] == 0: # frequency with which visualize training update
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / cfg["training"]["batch_size"],
)
#Show mini-batches during training
# #Visualize only DAPI
# writer.add_figure('Inputs',
# plot_classes_preds(images_orig.squeeze(), cfg["training"]["batch_size"], i, True),
# global_step=i)
# writer.add_figure('Targets',
# plot_classes_preds(labels_orig, cfg["training"]["batch_size"], i, False),
# global_step=i)
#Take max across classes (of probability maps) and assign class label to visualize semantic map
#1)
out_orig = torch.nn.functional.softmax(outputs[0],dim=1).max(1).indices.cpu()
#out_orig = out_orig.cpu().detach()
#2)
#out_orig = torch.argmax(outputs[0],dim=1)
#3)
#out_orig = outputs[0].data.max(1)[1].cpu()
# #Visualize predictions
# writer.add_figure('Predictions',
# plot_classes_preds(out_orig, cfg["training"]["batch_size"], i, False),
# global_step=i)
#Save probability map
prob_maps_folder = os.path.join(writer.file_writer.get_logdir(),"probability_maps")
os.makedirs(prob_maps_folder,exist_ok=True)
#Downsample original images to target size for visualization
images = torch.nn.functional.interpolate(images,size=target_size,mode='bicubic')
out = torch.nn.functional.softmax(out,dim=1)
contours = (out[:,1,:,:]).unsqueeze(dim=1)
nuclei = (out[:,2,:,:]).unsqueeze(dim=1)
background = (out[:,0,:,:]).unsqueeze(dim=1)
#imageTensor = torch.cat((images, contours, nuclei, background),dim=0)
# Save images side by side: nrow is how many images per row
#save_image(make_grid(imageTensor, nrow=2), os.path.join(prob_maps_folder,"Prob_maps_%d.tif" % i))
# Targets visualization below
nplbl = labels_orig.numpy()
targets = [] #each element is RGB target label in batch
for bs in np.arange(cfg["training"]["batch_size"]):
target_bs = t_loader.decode_segmap(nplbl[bs])
target_bs = 255*target_bs
target_bs = target_bs.astype('uint8')
target_bs = torch.from_numpy(target_bs)
target_bs = target_bs.unsqueeze(dim=0)
targets.append(target_bs) #uint8 labels, shape (N,N,3)
target = reduce(lambda x,y: torch.cat((x,y), dim = 0), targets)
target = target.permute(0,3,1,2) # size=(Batch, Channels, N, N)
target = target.type(torch.FloatTensor)
save_image(make_grid(target, nrow=cfg["training"]["batch_size"]), os.path.join(prob_maps_folder,"Target_labels_%d.tif" % i))
# Weights visualization below:
#wgts = weights_orig.type(torch.FloatTensor)
#save_image(make_grid(wgts, nrow=2), os.path.join(prob_maps_folder,"Weights_%d.tif" % i))
# Probability maps visualization below
t1 = []
t2 = []
t3 = []
t4 = []
# Normalize individual images in batch
for bs in np.arange(cfg["training"]["batch_size"]):
t1.append( (images[bs][0] - images[bs][0].min()) / (images[bs][0].max() - images[bs][0].min()) )
t2.append( contours[bs] )
t3.append( nuclei[bs] )
t4.append( background[bs] )
t1 = [torch.unsqueeze(elem,dim=0) for elem in t1] #expand dim=0 for images in batch
# Convert normalized batch to Tensor
tensor1 = torch.cat((t1),dim=0)
tensor2 = torch.cat((t2),dim=0)
tensor3 = torch.cat((t3),dim=0)
tensor4 = torch.cat((t4),dim=0)
tTensor = torch.cat((tensor1, tensor2, tensor3, tensor4),dim=0)
tTensor = tTensor.unsqueeze(dim=1)
save_image(make_grid(tTensor, nrow=cfg["training"]["batch_size"]), os.path.join(prob_maps_folder,"Prob_maps_%d.tif" % i), normalize=False)
logger.info(print_str)
writer.add_scalar("loss/train_loss", loss.item(), i + 1) # adds value to history (title, loss, iter index)
time_meter.reset()
if (i + 1) % cfg["training"]["val_interval"] == 0 or (i + 1) == cfg["training"][
"train_iters"
]: # evaluate model on validation set at these intervals
model.eval() # evaluate mode for model
with torch.no_grad():
for i_val, (images_val, labels_val, weights_val, nuc_weights_val) in tqdm(enumerate(valloader)):
images_val = images_val.to(device)
labels_val = labels_val.to(device)
weights_val = weights_val.to(device)
nuc_weights_val = nuc_weights_val.to(device)
outputs_val = model(images_val)
# Resize output of network to same size as labels
target_val_size = (labels_val.size()[1],labels_val.size()[2])
outputs_val = torch.nn.functional.interpolate(outputs_val,size=target_val_size,mode='bicubic')
# Multiply weights by loss output
val_loss = loss_fn(input=outputs_val, target=labels_val)
val_loss = torch.mul(val_loss,weights_val)
val_loss = torch.mul(val_loss,nuc_weights_val)
val_loss = val_loss.mean() # average over all pixels to obtain scaler for loss
outputs_val = torch.nn.functional.softmax(outputs_val,dim=1)
#Save probability map
val_prob_maps_folder = os.path.join(writer.file_writer.get_logdir(),"val_probability_maps")
os.makedirs(val_prob_maps_folder,exist_ok=True)
#Downsample original images to target size for visualization
images_val = torch.nn.functional.interpolate(images_val,size=target_val_size,mode='bicubic')
contours_val = (outputs_val[:,1,:,:]).unsqueeze(dim=1)
nuclei_val = (outputs_val[:,2,:,:]).unsqueeze(dim=1)
background_val = (outputs_val[:,0,:,:]).unsqueeze(dim=1)
# Targets visualization below
nplbl_val = labels_val.cpu().numpy()
targets_val = [] #each element is RGB target label in batch
for bs in np.arange(cfg["training"]["batch_size"]):
target_bs = v_loader.decode_segmap(nplbl_val[bs])
target_bs = 255*target_bs
target_bs = target_bs.astype('uint8')
target_bs = torch.from_numpy(target_bs)
target_bs = target_bs.unsqueeze(dim=0)
targets_val.append(target_bs) #uint8 labels, shape (N,N,3)
target_val = reduce(lambda x,y: torch.cat((x,y), dim = 0), targets_val)
target_val = target_val.permute(0,3,1,2) # size=(Batch, Channels, N, N)
target_val = target_val.type(torch.FloatTensor)
save_image(make_grid(target_val, nrow=cfg["training"]["batch_size"]), os.path.join(val_prob_maps_folder,"Target_labels_%d_val_%d.tif" % (i,i_val)))
# Weights visualization below:
#wgts_val = weights_val.type(torch.FloatTensor)
#save_image(make_grid(wgts_val, nrow=2), os.path.join(val_prob_maps_folder,"Weights_val_%d.tif" % i_val))
# Probability maps visualization below
t1_val = []
t2_val = []
t3_val = []
t4_val = []
# Normalize individual images in batch
for bs in np.arange(cfg["training"]["batch_size"]):
t1_val.append( (images_val[bs][0] - images_val[bs][0].min()) / (images_val[bs][0].max() - images_val[bs][0].min()) )
t2_val.append( contours_val[bs] )
t3_val.append( nuclei_val[bs] )
t4_val.append( background_val[bs] )
t1_val = [torch.unsqueeze(elem,dim=0) for elem in t1_val] #expand dim=0 for images_val in batch
# Convert normalized batch to Tensor
tensor1_val = torch.cat((t1_val),dim=0)
tensor2_val = torch.cat((t2_val),dim=0)
tensor3_val = torch.cat((t3_val),dim=0)
tensor4_val = torch.cat((t4_val),dim=0)
tTensor_val = torch.cat((tensor1_val, tensor2_val, tensor3_val, tensor4_val),dim=0)
tTensor_val = tTensor_val.unsqueeze(dim=1)
save_image(make_grid(tTensor_val, nrow=cfg["training"]["batch_size"]), os.path.join(val_prob_maps_folder,"Prob_maps_%d_val_%d.tif" % (i,i_val)), normalize=False)
pred = outputs_val.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
writer.add_scalar("loss/val_loss", val_loss_meter.avg, i + 1)
logger.info("Iter %d Loss: %.4f" % (i + 1, val_loss_meter.avg))
### Save best validation loss model
# if val_loss_meter.avg >= best_val_loss:
# best_val_loss = val_loss_meter.avg
# state = {
# "epoch": i + 1,
# "model_state": model.state_dict(),
# "optimizer_state": optimizer.state_dict(),
# "scheduler_state": scheduler.state_dict(),
# "best_val_loss": best_val_loss,
# }
# save_path = os.path.join(
# writer.file_writer.get_logdir(),
# "{}_{}_best_model.pkl".format(cfg["model"]["arch"], cfg["data"]["dataset"]),
# )
# torch.save(state, save_path)
###
score, class_iou = running_metrics_val.get_scores() # best model chosen via IoU
for k, v in score.items():
print(k, v)
logger.info("{}: {}".format(k, v))
writer.add_scalar("val_metrics/{}".format(k), v, i + 1)
for k, v in class_iou.items():
logger.info("{}: {}".format(k, v))
writer.add_scalar("val_metrics/cls_{}".format(k), v, i + 1)
val_loss_meter.reset()
running_metrics_val.reset()
### Save best mean IoU model
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i + 1,
"model_state": model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(cfg["model"]["arch"], cfg["data"]["dataset"]),
)
torch.save(state, save_path)
###
if (i + 1) == cfg["training"]["train_iters"]:
flag = False
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="config")
parser.add_argument(
"--config",
nargs="?",
type=str,
default="configs/psp_segmenter.yml",
help="Configuration file to use",
)
args = parser.parse_args()
with open(args.config) as fp:
cfg = yaml.load(fp)
run_id = random.randint(1, 100000)
logdir = os.path.join("runs", os.path.basename(args.config)[:-4], str(run_id))
writer = SummaryWriter(log_dir=logdir)
print("RUNDIR: {}".format(logdir))
print("IMAGE SHAPE: {}".format(cfg["data"]["img_rows"]))
print("BATCH SIZE: {}".format(cfg["training"]["batch_size"]))
print("LEARNING RATE: {}".format(cfg["training"]["optimizer"]["lr"]))
shutil.copy(args.config, logdir)
logger = get_logger(logdir)
logger.info("Here we go!")
train(cfg, writer, logger)
|
{"hexsha": "c1dfeae9590b533517225c56a514a8ec5a87ac6a", "size": 20322, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/UnMICST-P/train.py", "max_stars_repo_name": "Yu-AnChen/UnMICST-info", "max_stars_repo_head_hexsha": "9bcc8a408f3c0c8fab2f58778152ae47ee10ad59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/UnMICST-P/train.py", "max_issues_repo_name": "Yu-AnChen/UnMICST-info", "max_issues_repo_head_hexsha": "9bcc8a408f3c0c8fab2f58778152ae47ee10ad59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/UnMICST-P/train.py", "max_forks_repo_name": "Yu-AnChen/UnMICST-info", "max_forks_repo_head_hexsha": "9bcc8a408f3c0c8fab2f58778152ae47ee10ad59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3304904051, "max_line_length": 185, "alphanum_fraction": 0.5447298494, "include": true, "reason": "import numpy", "num_tokens": 4287}
|
# convert binary into HDF5 data
using HDF5
datasets = [("train", ["data_batch_$i.bin" for i in 1:5]),
("test", ["test_batch.bin"])]
const width = 32
const height = 32
const channels = 3
const batch_size = 10000
mean_model = zeros(Float32, width, height, channels, 1)
for (key, sources) in datasets
h5open("$key.hdf5", "w") do h5
dset_data = d_create(h5, "data", datatype(Float32),
dataspace(width, height, channels, batch_size * length(sources)))
dset_label = d_create(h5, "label", datatype(Float32),
dataspace(1, batch_size * length(sources)))
for n = 1:length(sources)
open("cifar-10-batches-bin/$(sources[n])") do f
println("Processing $(sources[n])...")
mat = readbytes(f, (1 + width*height*channels) * batch_size)
mat = reshape(mat, 1+width*height*channels, batch_size)
# random shuffle within batch
rp = randperm(batch_size)
label = convert(Array{Float32},mat[1, rp])
# If I divide by 256 as in the MNIST example, then
# training on the giving DNN gives me random
# performance: objective function not changing,
# and test performance is always 10%...
# The same results could be observed when
# running Caffe, as our HDF5 dataset is
# compatible with Caffe.
img = convert(Array{Float32},mat[2:end, rp])
img = reshape(img, width, height, channels, batch_size)
if key == "train"
# only accumulate mean from the training data
global mean_model
mean_model = (batch_size*mean_model + sum(img, 4)) / (n*batch_size)
end
index = (n-1)*batch_size+1:n*batch_size
dset_data[:,:,:,index] = img
dset_label[:,index] = label
end
end
# but apply mean subtraction for both training and testing data
println("Subtracting the mean...")
for n = 1:length(sources)
index = (n-1)*batch_size+1:n*batch_size
dset_data[:,:,:,index] = dset_data[:,:,:,index] .- mean_model
end
end
end
|
{"hexsha": "fa6171de36b914b2e46a9efd8b492a5c422f89ab", "size": 2067, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/cifar10/convert.jl", "max_stars_repo_name": "baajur/Mocha.jl", "max_stars_repo_head_hexsha": "5e15b882d7dd615b0c5159bb6fde2cc040b2d8ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1397, "max_stars_repo_stars_event_min_datetime": "2015-01-04T02:35:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T19:26:14.000Z", "max_issues_repo_path": "examples/cifar10/convert.jl", "max_issues_repo_name": "baajur/Mocha.jl", "max_issues_repo_head_hexsha": "5e15b882d7dd615b0c5159bb6fde2cc040b2d8ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 197, "max_issues_repo_issues_event_min_datetime": "2015-01-09T20:15:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-06T17:46:39.000Z", "max_forks_repo_path": "examples/cifar10/convert.jl", "max_forks_repo_name": "baajur/Mocha.jl", "max_forks_repo_head_hexsha": "5e15b882d7dd615b0c5159bb6fde2cc040b2d8ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 357, "max_forks_repo_forks_event_min_datetime": "2015-01-05T00:40:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T15:01:20.000Z", "avg_line_length": 32.8095238095, "max_line_length": 77, "alphanum_fraction": 0.6178035801, "num_tokens": 565}
|
\section{Nutrition}
It is of my genuine belief that nutrition is of utmost importance. From personal experience, I've felt what a bad nutrition can make you think. Since what we eat is directly correlated with the compounds ours bodies are able to produce, from amino acids to hormones, it becomes clear that a good nutrition can be key to a proper development.
Moreover, given the importance of good habits, Nutrition is one of the most important aspects to know early in life and apply everywhere.
This will certainly be a hard chapter to write but is certainly a distinguishing factor since barely anyone pays close attention to their nutrition. So far, only very high level athletes and a very small percentage of vegans actually do the research and testing.
Therefore, this will be a chapter that will be written over the years.
A few general guidelines and their explanation:
\begin{itemize}
\item Don't `overeat'. Eat until you are almost full and then have a fruit. The signal of fullness takes a bit to get to the brain... if we eat too fast we may be eating much more than we need.
\item From the pre-historic man %INSERT SOME DATEEEEES and start a chapter from here..
we know that we don't have to eat meat everyday. The man as survived very well eating almost whatever..
\item
\end{itemize}
Nutrition has an impact not only in yourself but in the environment around you.
Today, diets such as vegetarianism or veganism seem to be good for the environment and seem to have a moral background as well.
This section objective is to give a proper guide on what is the purpose of every nutrient, what one should eat and how often, in terms of macro and micro nutrients and in terms of actual foods and finally to demystify the rumours related with morals behind omnivore diet vs vegan diet and impacts on the environment.
\subsection{Categorize: Nutrients vs Vitamins vs what else?}
What are the main categories?
\subsection{Nutrients}
\subsection{Vitamins}
\subsection{Amino Acids}
\subsection{Macros: active vs non-active}
If someone wants to have energy to perform 2 hours of exercise everyday and have a proper muscular maintenance, it is normal that it needs a diet slightly different that someone who doesn't exercise at all. This section will present the main differences, mainly evident on protein intake.
|
{"hexsha": "ad7b9dd6794ecb95dc6d2d6320c3c4bab2be9add", "size": 2354, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "content/Nutrition.tex", "max_stars_repo_name": "jmoraispk/TheDocument", "max_stars_repo_head_hexsha": "ef14eaaec34cb09a0945ff4647e87ff77eac6890", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "content/Nutrition.tex", "max_issues_repo_name": "jmoraispk/TheDocument", "max_issues_repo_head_hexsha": "ef14eaaec34cb09a0945ff4647e87ff77eac6890", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "content/Nutrition.tex", "max_forks_repo_name": "jmoraispk/TheDocument", "max_forks_repo_head_hexsha": "ef14eaaec34cb09a0945ff4647e87ff77eac6890", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.6216216216, "max_line_length": 342, "alphanum_fraction": 0.7892948173, "num_tokens": 501}
|
import logging
import os
import pickle
import tempfile
import shutil
import operator
import pandas as pd
import numpy as np
def loadData(currency, interval):
logging.info('Data: loading {0} at {1}...'.format(currency, interval))
df = pd.read_csv(
r'{0}/../../data/{1}e{2}.csv'.format(os.path.realpath(os.path.dirname(__file__)), currency, interval),
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
parse_dates=[[0, 1]],
index_col=0,
).astype(float)
logging.info('Data: {0} rows loaded'.format(len(df)))
return df
def loadQ(currency, interval):
logging.info('Q: loading...')
try:
with open('{0}/models/{1}_{2}.q'.format(os.path.realpath(os.path.dirname(__file__)), currency, interval), 'rb') as f:
q = pickle.load(f)
except (IOError, EOFError) as e:
logging.error('Could not load Q for {0}'.format(currency))
q = {}
logging.info('Q: loaded {0}'.format(len(q)))
return q
def saveQ(currency, interval, q):
logging.info('Q: saving...')
filename = '{0}/models/{1}_{2}.q'.format(os.path.realpath(os.path.dirname(__file__)), currency, interval)
with tempfile.NamedTemporaryFile('wb', dir=os.path.dirname(filename), delete=False) as tf:
pickle.dump(q, tf)
tempname = tf.name
os.rename(tempname, filename)
logging.info('Q: saved {0}'.format(len(q)))
def getBackgroundKnowledge(df, periods):
logging.info('Background knowledge: retrieving...')
# HLC
hlc = df.apply(lambda x: (x['high'] + x['low'] + x['close']) / 3, axis=1)
for x in periods:
avg_x = pd.rolling_mean(hlc, x)
avg_x_yesterday = avg_x.shift(+1)
df['ma_{0}_bullish'.format(x)] = avg_x >= avg_x_yesterday
avg_x_delta = abs(avg_x - avg_x_yesterday)
avg_x_delta_yesterday = avg_x_delta.shift(+1)
df['ma_{0}_divergence'.format(x)] = avg_x_delta >= avg_x_delta_yesterday
df['ma_{0}_magnitude'.format(x)] = avg_x_delta > avg_x_delta.mean()
for x in periods:
for y in periods:
if y <= x:
continue
logging.info('MA for {0} and {1}'.format(x, y))
avg_x = pd.rolling_mean(hlc, x)
avg_y = pd.rolling_mean(hlc, y)
df['ma_{0}_crossover_{1}_bullish'.format(x, y)] = avg_x >= avg_y
ma_diff = avg_x - avg_y
avg_x_yesterday = avg_x.shift(+1)
avg_y_yesterday = avg_y.shift(+1)
ma_diff_yesterday = avg_x_yesterday - avg_y_yesterday
df['ma_{0}_crossover_{1}_divergence'.format(x, y)] = ma_diff >= ma_diff_yesterday
df['ma_{0}_crossover_{1}_magnitude'.format(x, y)] = ma_diff >= ma_diff.mean()
logging.info('Background knowledge: retrieved')
return df
def copyBatch():
src = '{0}/models'.format(os.path.realpath(os.path.dirname(__file__)))
dest = '{0}/batch'.format(os.path.realpath(os.path.dirname(__file__)))
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest)
def summarizeActions(q):
summary_total = {}
summary_count = {}
for (state, action), value in q.iteritems():
# total
action_total = summary_total.get(action, 0)
action_total += value
action_total /= 2
summary_total[action] = action_total
action_count = summary_count.get(action, 0)
action_count += 1
summary_count[action] = action_count
summary_sorted = sorted(summary_total.items(), key=operator.itemgetter(1))
for action, info in summary_sorted:
logging.error('{0:10s} after {2} states with {1:.4f} avg'.format(action, info, summary_count[action]))
def calculateActions(min_trail):
actions = []
for n in xrange(min_trail, min_trail+36, 5):
actions.append('buy-{0}'.format(n))
actions.append('sell-{0}'.format(n))
return actions
|
{"hexsha": "9c74b89a295f131310f7100de41d93a0bfb6249d", "size": 4034, "ext": "py", "lang": "Python", "max_stars_repo_path": "16_rf_ma/stable-35-6/main.py", "max_stars_repo_name": "Tjorriemorrie/trading", "max_stars_repo_head_hexsha": "aafa15a6c564bfa86948ab30e33d554172b38a3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-07-02T09:06:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-11T04:23:14.000Z", "max_issues_repo_path": "16_rf_ma/stable-35-6/main.py", "max_issues_repo_name": "Tjorriemorrie/trading", "max_issues_repo_head_hexsha": "aafa15a6c564bfa86948ab30e33d554172b38a3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-03-31T19:14:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-01T23:34:32.000Z", "max_forks_repo_path": "16_rf_ma/stable-35-6/main.py", "max_forks_repo_name": "Tjorriemorrie/trading", "max_forks_repo_head_hexsha": "aafa15a6c564bfa86948ab30e33d554172b38a3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-03-29T07:51:16.000Z", "max_forks_repo_forks_event_max_datetime": "2016-10-30T04:53:58.000Z", "avg_line_length": 34.775862069, "max_line_length": 125, "alphanum_fraction": 0.6234506693, "include": true, "reason": "import numpy", "num_tokens": 1059}
|
#!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan
Last updated: May 2015
File name: IRIS_DF_Controller.py
Organization: RISC Lab, Utah State University
Notes:
======================================================'''
import roslib; roslib.load_manifest('risc_msgs')
import rospy
from math import *
import numpy as np
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
from std_msgs.msg import Bool
from roscopter.msg import Status
#=====================#
# Gain Matrices #
#=====================#
K = np.matrix([[ 1.8, 0, 0, 1.4, 0, 0, 0],\
[ 0, 1.8, 0, 0, 1.4, 0, 0],\
[ 0, 0, 3, 0, 0, 5, 0],\
[ 0, 0, 0, 0, 0, 0,.5]])
#========================#
# Globals #
#========================#
nominal_thrust = 0 # thrust necessary to maintain hover given battery level
phi_scale = 3.053261127645355
phi_trim = 0.0#0.058941904209906
theta_scale = 3.815398742249453
theta_trim = 0.0#-0.091216767651723
ctrl_status = False
states = Cortex()
states.Obj = [States()]*1
traj = Trajectories()
traj.Obj = [Trajectory()]*1
euler_max = 45*np.pi/180
max_yaw_rate = .3490659 #in radians/sec
rate = 45 # Hz
image = 0
start_time = 0
#==================#
# Publishers #
#==================#
pub_ctrl = rospy.Publisher('/controls', Controls, queue_size = 1)
#========================#
# Get Cortex States #
#========================#
def GetStates(S):
global states
states = S
#=====================#
# Get Trajectory #
#=====================#
def GetTraj(S):
global traj
traj = S
#=========================#
# Get Battery Status #
#=========================#
def GetBatt(S):
global nominal_thrust
B = S.battery_remaining
# coefficients for fourth order fit
# determined 11 May 2015 by Spencer Maughan and Ishmaal Erekson
c0 = 0.491674747062374
c1 = -0.024809293286468
c2 = 0.000662710609466
c3 = -0.000008160593348
c4 = 0.000000033699651
nominal_thrust = c0+c1*B+c2*B**2+c3*B**3+c4*B**4
#============================#
# Get Controller Status #
#============================#
def GetStatus(S):
global ctrl_status
ctrl_status = S.data
#========================#
# Basic Controller #
#========================#
def Basic_Controller():
global states, euler_max, max_yaw_rate, pub_ctrl,K,traj
Ctrl = Controls()
Ctrl.Obj = [Control()]*1
Ctrl.header.stamp = states.header.stamp
g = 9.80665 # average value of earth's gravitational constant m/s^2
m = 1.282 # IRIS mass in kg
#===================================#
# Get State Trajectory Errors #
#===================================#
if states.Obj[0].visible:
X = np.asmatrix(np.zeros((7,1)))
X[0] = traj.Obj[0].x-states.Obj[0].x
X[1] = traj.Obj[0].y-states.Obj[0].y
X[2] = traj.Obj[0].z-states.Obj[0].z
X[3] = traj.Obj[0].xdot-states.Obj[0].u
X[4] = traj.Obj[0].ydot-states.Obj[0].v
X[5] = traj.Obj[0].zdot-states.Obj[0].w
X[6] = traj.Obj[0].psi-states.Obj[0].psi*np.pi/180
#============================================#
# Differential Flatness Control Input #
#============================================#
# LQR input
utilde = -K*X
# required input
u_r = np.asmatrix(np.zeros((4,1)))
u = utilde+u_r-np.matrix([[0],[0],[9.81],[0]])
#==================================#
# Rotate to Vehicle 1 Frame #
#==================================#
psi = states.Obj[0].psi*np.pi/180
rotZ = np.matrix([[cos(psi), sin(psi), 0],[-sin(psi), cos(psi), 0],[0, 0, 1]])
Cart = np.matrix([[1, 0, 0],[0, -1, 0],[0, 0, -1]])
u[:-1] = Cart*rotZ*u[:-1]
#===================================#
# Normalize given the Thrust #
#===================================#
T = sqrt(u[0:3].T*u[0:3])
u[:-1] = np.divide(u[:-1],-T)
#==================#
# Set Controls #
#==================#
# Controls for Ardrone
# -phi = right... +phi = left
# -theta = back... +theta = forward
# -psi = right... +psi = left
global phi_trim,theta_trim,phi_scale,theta_scale
phi_d = (asin(u[1,-1]))
theta_d = (-asin(u[0,-1]))
ctrl = Control()
ctrl.name = states.Obj[0].name
ctrl.phi = phi_trim + phi_scale*phi_d
ctrl.theta = theta_trim + theta_scale*theta_d
ctrl.psi = -u[3,-1]/max_yaw_rate
global nominal_thrust
T_d = nominal_thrust+(T-g)/g
ctrl.T = T_d
Ctrl.Obj[0] = ctrl
Ctrl.header = states.header
#rospy.loginfo("latency = %f",states.header.stamp.to_sec()-rospy.get_time())
pub_ctrl.publish(Ctrl)
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('IRIS_DF_Controller')
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_cortex = rospy.Subscriber('/cortex_raw' , Cortex, GetStates, queue_size=1, buff_size=2**24)
sub_traj = rospy.Subscriber('/trajectory' , Trajectories, GetTraj, queue_size=1, buff_size=2**24)
sub_Batt = rospy.Subscriber('/apm/status' , Status, GetBatt)
sub_status = rospy.Subscriber('/controller_status' , Bool, GetStatus)
Basic_Controller()
r.sleep()
|
{"hexsha": "40029c8f1caeff06f4611276973391395a0d3d7b", "size": 6110, "ext": "py", "lang": "Python", "max_stars_repo_path": "risc_control/src/IRIS_DF_Controller.py", "max_stars_repo_name": "riscmaster/risc_maap", "max_stars_repo_head_hexsha": "48b0ab79c1938bc3ed36442894dd4bf3091a2942", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-04-30T19:33:37.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-30T19:33:37.000Z", "max_issues_repo_path": "risc_control/src/IRIS_DF_Controller.py", "max_issues_repo_name": "riscmaster/risc_maap", "max_issues_repo_head_hexsha": "48b0ab79c1938bc3ed36442894dd4bf3091a2942", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-09-12T02:29:39.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-12T02:29:39.000Z", "max_forks_repo_path": "risc_control/src/IRIS_DF_Controller.py", "max_forks_repo_name": "riscmaster/risc_maap", "max_forks_repo_head_hexsha": "48b0ab79c1938bc3ed36442894dd4bf3091a2942", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-03-07T00:47:59.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-06T17:43:10.000Z", "avg_line_length": 30.0985221675, "max_line_length": 106, "alphanum_fraction": 0.4428805237, "include": true, "reason": "import numpy", "num_tokens": 1693}
|
[STATEMENT]
lemma [simp]:
"(- grd (step ClassicMark)) loop = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (- grd (step ClassicMark)) loop = {}
[PROOF STEP]
apply safe
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a aa ab ac ad b. (a, aa, ab, ac, ad, b) \<in> (- grd (step ClassicMark)) loop \<Longrightarrow> (a, aa, ab, ac, ad, b) \<in> {}
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a aa ab ac ad b. \<forall>j. (a, aa, ab, ac, ad, b) \<notin> grd (ClassicMark (loop, j)) \<Longrightarrow> False
[PROOF STEP]
apply (frule_tac x = "final" in spec)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a aa ab ac ad b. \<lbrakk>\<forall>j. (a, aa, ab, ac, ad, b) \<notin> grd (ClassicMark (loop, j)); (a, aa, ab, ac, ad, b) \<notin> grd (ClassicMark (loop, final))\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
apply (drule_tac x = "loop" in spec)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a aa ab ac ad b. \<lbrakk>(a, aa, ab, ac, ad, b) \<notin> grd (ClassicMark (loop, final)); (a, aa, ab, ac, ad, b) \<notin> grd (ClassicMark (loop, loop))\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
apply (unfold ClassicMark_def QQ1_a_def QQ2_a_def QQ3_a_def QQ4_a_def QQ5_a_def QQ6_a_def QQ7_a_def QQ8_a_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a aa ab ac ad b. \<lbrakk>(a, aa, ab, ac, ad, b) \<notin> grd (case (loop, final) of (i, j) \<Rightarrow> case (i, j) of (init, loop) \<Rightarrow> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . root = nil \<and> p' = nil \<and> t' = nil \<and> mrk' = mrk \<and> left' = left \<and> right' = right \<and> atom' = atom :] \<sqinter> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . root \<noteq> nil \<and> p' = root \<and> t' = nil \<and> mrk' = mrk \<union> {root} \<and> left' = left \<and> right' = right \<and> atom' = atom :] | (init, _) \<Rightarrow> \<top> | (loop, init) \<Rightarrow> \<top> | (loop, loop) \<Rightarrow> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> gg mrk atom left p \<and> p' = left p \<and> t' = p \<and> mrk' = mrk \<union> {left p} \<and> left' = left(p := t) \<and> right' = right \<and> atom' = atom :] \<sqinter> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> gg mrk atom right p \<and> p' = right p \<and> t' = p \<and> mrk' = mrk \<union> {right p} \<and> left' = left \<and> right' = right(p := t) \<and> atom' = atom(p := True) :] \<sqinter> ([: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> \<not> gg mrk atom left p \<and> \<not> gg mrk atom right p \<and> t \<noteq> nil \<and> \<not> atom t \<and> p' = t \<and> t' = left t \<and> mrk' = mrk \<and> left' = left(t := p) \<and> right' = right \<and> atom' = atom :] \<sqinter> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> \<not> gg mrk atom left p \<and> \<not> gg mrk atom right p \<and> t \<noteq> nil \<and> atom t \<and> p' = t \<and> t' = right t \<and> mrk' = mrk \<and> left' = left \<and> right' = right(t := p) \<and> atom' = atom(t := False) :] \<sqinter> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> \<not> gg mrk atom left p \<and> \<not> gg mrk atom right p \<and> t = nil \<and> p' = nil \<and> t' = t \<and> mrk' = mrk \<and> left' = left \<and> right' = right \<and> atom' = atom :]) | (loop, final) \<Rightarrow> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p = nil \<and> p' = p \<and> t' = t \<and> mrk' = mrk \<and> left' = left \<and> right' = right \<and> atom' = atom :] | (final, b) \<Rightarrow> \<top>); (a, aa, ab, ac, ad, b) \<notin> grd (case (loop, loop) of (i, j) \<Rightarrow> case (i, j) of (init, loop) \<Rightarrow> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . root = nil \<and> p' = nil \<and> t' = nil \<and> mrk' = mrk \<and> left' = left \<and> right' = right \<and> atom' = atom :] \<sqinter> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . root \<noteq> nil \<and> p' = root \<and> t' = nil \<and> mrk' = mrk \<union> {root} \<and> left' = left \<and> right' = right \<and> atom' = atom :] | (init, _) \<Rightarrow> \<top> | (loop, init) \<Rightarrow> \<top> | (loop, loop) \<Rightarrow> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> gg mrk atom left p \<and> p' = left p \<and> t' = p \<and> mrk' = mrk \<union> {left p} \<and> left' = left(p := t) \<and> right' = right \<and> atom' = atom :] \<sqinter> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> gg mrk atom right p \<and> p' = right p \<and> t' = p \<and> mrk' = mrk \<union> {right p} \<and> left' = left \<and> right' = right(p := t) \<and> atom' = atom(p := True) :] \<sqinter> ([: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> \<not> gg mrk atom left p \<and> \<not> gg mrk atom right p \<and> t \<noteq> nil \<and> \<not> atom t \<and> p' = t \<and> t' = left t \<and> mrk' = mrk \<and> left' = left(t := p) \<and> right' = right \<and> atom' = atom :] \<sqinter> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> \<not> gg mrk atom left p \<and> \<not> gg mrk atom right p \<and> t \<noteq> nil \<and> atom t \<and> p' = t \<and> t' = right t \<and> mrk' = mrk \<and> left' = left \<and> right' = right(t := p) \<and> atom' = atom(t := False) :] \<sqinter> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p \<noteq> nil \<and> \<not> gg mrk atom left p \<and> \<not> gg mrk atom right p \<and> t = nil \<and> p' = nil \<and> t' = t \<and> mrk' = mrk \<and> left' = left \<and> right' = right \<and> atom' = atom :]) | (loop, final) \<Rightarrow> [: p, t, left, right, atom, mrk \<leadsto> p', t', left', right', atom', mrk' . p = nil \<and> p' = p \<and> t' = t \<and> mrk' = mrk \<and> left' = left \<and> right' = right \<and> atom' = atom :] | (final, b) \<Rightarrow> \<top>)\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a aa ab ac ad b. \<lbrakk>a \<noteq> nil; \<not> gg b ad ab a \<and> \<not> gg b ad ac a \<and> (gg b ad ab a \<or> gg b ad ac a \<or> aa = nil \<or> ad aa) \<and> (ad aa \<longrightarrow> gg b ad ab a \<or> gg b ad ac a \<or> aa = nil) \<and> (aa = nil \<longrightarrow> gg b ad ab a \<or> gg b ad ac a)\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
apply (case_tac "a \<noteq> nil")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>a aa ab ac ad b. \<lbrakk>a \<noteq> nil; \<not> gg b ad ab a \<and> \<not> gg b ad ac a \<and> (gg b ad ab a \<or> gg b ad ac a \<or> aa = nil \<or> ad aa) \<and> (ad aa \<longrightarrow> gg b ad ab a \<or> gg b ad ac a \<or> aa = nil) \<and> (aa = nil \<longrightarrow> gg b ad ab a \<or> gg b ad ac a); a \<noteq> nil\<rbrakk> \<Longrightarrow> False
2. \<And>a aa ab ac ad b. \<lbrakk>a \<noteq> nil; \<not> gg b ad ab a \<and> \<not> gg b ad ac a \<and> (gg b ad ab a \<or> gg b ad ac a \<or> aa = nil \<or> ad aa) \<and> (ad aa \<longrightarrow> gg b ad ab a \<or> gg b ad ac a \<or> aa = nil) \<and> (aa = nil \<longrightarrow> gg b ad ab a \<or> gg b ad ac a); \<not> a \<noteq> nil\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
by auto
|
{"llama_tokens": 3135, "file": "GraphMarkingIBP_DSWMark", "length": 8}
|
[GOAL]
p : ℕ
inst✝ : Fact (Prime p)
hp : p % 4 ≠ 3
⊢ ∃ a b, a ^ 2 + b ^ 2 = p
[PROOFSTEP]
apply sq_add_sq_of_nat_prime_of_not_irreducible p
[GOAL]
p : ℕ
inst✝ : Fact (Prime p)
hp : p % 4 ≠ 3
⊢ ¬Irreducible ↑p
[PROOFSTEP]
rwa [PrincipalIdealRing.irreducible_iff_prime, prime_iff_mod_four_eq_three_of_nat_prime p]
[GOAL]
R : Type u_1
inst✝ : CommRing R
a b x y u v : R
ha : a = x ^ 2 + y ^ 2
hb : b = u ^ 2 + v ^ 2
⊢ a * b = (x * u - y * v) ^ 2 + (x * v + y * u) ^ 2
[PROOFSTEP]
rw [ha, hb]
[GOAL]
R : Type u_1
inst✝ : CommRing R
a b x y u v : R
ha : a = x ^ 2 + y ^ 2
hb : b = u ^ 2 + v ^ 2
⊢ (x ^ 2 + y ^ 2) * (u ^ 2 + v ^ 2) = (x * u - y * v) ^ 2 + (x * v + y * u) ^ 2
[PROOFSTEP]
ring
[GOAL]
a b x y u v : ℕ
ha : a = x ^ 2 + y ^ 2
hb : b = u ^ 2 + v ^ 2
⊢ ∃ r s, a * b = r ^ 2 + s ^ 2
[PROOFSTEP]
zify at ha hb ⊢
[GOAL]
a b x y u v : ℕ
ha : ↑a = ↑x ^ 2 + ↑y ^ 2
hb : ↑b = ↑u ^ 2 + ↑v ^ 2
⊢ ∃ r s, ↑a * ↑b = ↑r ^ 2 + ↑s ^ 2
[PROOFSTEP]
obtain ⟨r, s, h⟩ := _root_.sq_add_sq_mul ha hb
[GOAL]
case intro.intro
a b x y u v : ℕ
ha : ↑a = ↑x ^ 2 + ↑y ^ 2
hb : ↑b = ↑u ^ 2 + ↑v ^ 2
r s : ℤ
h : ↑a * ↑b = r ^ 2 + s ^ 2
⊢ ∃ r s, ↑a * ↑b = ↑r ^ 2 + ↑s ^ 2
[PROOFSTEP]
refine' ⟨r.natAbs, s.natAbs, _⟩
[GOAL]
case intro.intro
a b x y u v : ℕ
ha : ↑a = ↑x ^ 2 + ↑y ^ 2
hb : ↑b = ↑u ^ 2 + ↑v ^ 2
r s : ℤ
h : ↑a * ↑b = r ^ 2 + s ^ 2
⊢ ↑a * ↑b = ↑(Int.natAbs r) ^ 2 + ↑(Int.natAbs s) ^ 2
[PROOFSTEP]
simpa only [Int.coe_natAbs, sq_abs]
[GOAL]
m n : ℕ
hd : m ∣ n
hs : IsSquare (-1)
⊢ IsSquare (-1)
[PROOFSTEP]
let f : ZMod n →+* ZMod m := ZMod.castHom hd _
[GOAL]
m n : ℕ
hd : m ∣ n
hs : IsSquare (-1)
f : ZMod n →+* ZMod m := castHom hd (ZMod m)
⊢ IsSquare (-1)
[PROOFSTEP]
rw [← RingHom.map_one f, ← RingHom.map_neg]
[GOAL]
m n : ℕ
hd : m ∣ n
hs : IsSquare (-1)
f : ZMod n →+* ZMod m := castHom hd (ZMod m)
⊢ IsSquare (↑f (-1))
[PROOFSTEP]
exact hs.map f
[GOAL]
m n : ℕ
hc : Nat.coprime m n
hm : IsSquare (-1)
hn : IsSquare (-1)
⊢ IsSquare (-1)
[PROOFSTEP]
have : IsSquare (-1 : ZMod m × ZMod n) :=
by
rw [show (-1 : ZMod m × ZMod n) = ((-1 : ZMod m), (-1 : ZMod n)) from rfl]
obtain ⟨x, hx⟩ := hm
obtain ⟨y, hy⟩ := hn
rw [hx, hy]
exact ⟨(x, y), rfl⟩
[GOAL]
m n : ℕ
hc : Nat.coprime m n
hm : IsSquare (-1)
hn : IsSquare (-1)
⊢ IsSquare (-1)
[PROOFSTEP]
rw [show (-1 : ZMod m × ZMod n) = ((-1 : ZMod m), (-1 : ZMod n)) from rfl]
[GOAL]
m n : ℕ
hc : Nat.coprime m n
hm : IsSquare (-1)
hn : IsSquare (-1)
⊢ IsSquare (-1, -1)
[PROOFSTEP]
obtain ⟨x, hx⟩ := hm
[GOAL]
case intro
m n : ℕ
hc : Nat.coprime m n
hn : IsSquare (-1)
x : ZMod m
hx : -1 = x * x
⊢ IsSquare (-1, -1)
[PROOFSTEP]
obtain ⟨y, hy⟩ := hn
[GOAL]
case intro.intro
m n : ℕ
hc : Nat.coprime m n
x : ZMod m
hx : -1 = x * x
y : ZMod n
hy : -1 = y * y
⊢ IsSquare (-1, -1)
[PROOFSTEP]
rw [hx, hy]
[GOAL]
case intro.intro
m n : ℕ
hc : Nat.coprime m n
x : ZMod m
hx : -1 = x * x
y : ZMod n
hy : -1 = y * y
⊢ IsSquare (x * x, y * y)
[PROOFSTEP]
exact ⟨(x, y), rfl⟩
[GOAL]
m n : ℕ
hc : Nat.coprime m n
hm : IsSquare (-1)
hn : IsSquare (-1)
this : IsSquare (-1)
⊢ IsSquare (-1)
[PROOFSTEP]
simpa only [RingEquiv.map_neg_one] using this.map (ZMod.chineseRemainder hc).symm
[GOAL]
p n : ℕ
hpp : Prime p
hp : p ∣ n
hs : IsSquare (-1)
⊢ p % 4 ≠ 3
[PROOFSTEP]
obtain ⟨y, h⟩ := ZMod.isSquare_neg_one_of_dvd hp hs
[GOAL]
case intro
p n : ℕ
hpp : Prime p
hp : p ∣ n
hs : IsSquare (-1)
y : ZMod p
h : -1 = y * y
⊢ p % 4 ≠ 3
[PROOFSTEP]
rw [← sq, eq_comm, show (-1 : ZMod p) = -1 ^ 2 by ring] at h
[GOAL]
p n : ℕ
hpp : Prime p
hp : p ∣ n
hs : IsSquare (-1)
y : ZMod p
h : y ^ 2 = -1
⊢ -1 = -1 ^ 2
[PROOFSTEP]
ring
[GOAL]
case intro
p n : ℕ
hpp : Prime p
hp : p ∣ n
hs : IsSquare (-1)
y : ZMod p
h : y ^ 2 = -1 ^ 2
⊢ p % 4 ≠ 3
[PROOFSTEP]
haveI : Fact p.Prime := ⟨hpp⟩
[GOAL]
case intro
p n : ℕ
hpp : Prime p
hp : p ∣ n
hs : IsSquare (-1)
y : ZMod p
h : y ^ 2 = -1 ^ 2
this : Fact (Prime p)
⊢ p % 4 ≠ 3
[PROOFSTEP]
exact ZMod.mod_four_ne_three_of_sq_eq_neg_sq' one_ne_zero h
[GOAL]
n : ℕ
hn : Squarefree n
⊢ IsSquare (-1) ↔ ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
[PROOFSTEP]
refine' ⟨fun H q hqp hqd => hqp.mod_four_ne_three_of_dvd_isSquare_neg_one hqd H, fun H => _⟩
[GOAL]
n : ℕ
hn : Squarefree n
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
⊢ IsSquare (-1)
[PROOFSTEP]
induction' n using induction_on_primes with p n hpp ih
[GOAL]
case h₀
n : ℕ
hn✝ : Squarefree n
H✝ : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
hn : Squarefree 0
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ 0 → q % 4 ≠ 3
⊢ IsSquare (-1)
[PROOFSTEP]
exact False.elim (hn.ne_zero rfl)
[GOAL]
case h₁
n : ℕ
hn✝ : Squarefree n
H✝ : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
hn : Squarefree 1
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ 1 → q % 4 ≠ 3
⊢ IsSquare (-1)
[PROOFSTEP]
exact ⟨0, by simp only [Fin.zero_mul, neg_eq_zero, Fin.one_eq_zero_iff]⟩
[GOAL]
n : ℕ
hn✝ : Squarefree n
H✝ : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
hn : Squarefree 1
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ 1 → q % 4 ≠ 3
⊢ -1 = 0 * 0
[PROOFSTEP]
simp only [Fin.zero_mul, neg_eq_zero, Fin.one_eq_zero_iff]
[GOAL]
case h
n✝ : ℕ
hn✝ : Squarefree n✝
H✝ : ∀ {q : ℕ}, Nat.Prime q → q ∣ n✝ → q % 4 ≠ 3
p n : ℕ
hpp : Nat.Prime p
ih : Squarefree n → (∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3) → IsSquare (-1)
hn : Squarefree (p * n)
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ p * n → q % 4 ≠ 3
⊢ IsSquare (-1)
[PROOFSTEP]
haveI : Fact p.Prime := ⟨hpp⟩
[GOAL]
case h
n✝ : ℕ
hn✝ : Squarefree n✝
H✝ : ∀ {q : ℕ}, Nat.Prime q → q ∣ n✝ → q % 4 ≠ 3
p n : ℕ
hpp : Nat.Prime p
ih : Squarefree n → (∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3) → IsSquare (-1)
hn : Squarefree (p * n)
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ p * n → q % 4 ≠ 3
this : Fact (Nat.Prime p)
⊢ IsSquare (-1)
[PROOFSTEP]
have hcp : p.coprime n := by
by_contra hc
exact hpp.not_unit (hn p <| mul_dvd_mul_left p <| hpp.dvd_iff_not_coprime.mpr hc)
[GOAL]
n✝ : ℕ
hn✝ : Squarefree n✝
H✝ : ∀ {q : ℕ}, Nat.Prime q → q ∣ n✝ → q % 4 ≠ 3
p n : ℕ
hpp : Nat.Prime p
ih : Squarefree n → (∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3) → IsSquare (-1)
hn : Squarefree (p * n)
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ p * n → q % 4 ≠ 3
this : Fact (Nat.Prime p)
⊢ Nat.coprime p n
[PROOFSTEP]
by_contra hc
[GOAL]
n✝ : ℕ
hn✝ : Squarefree n✝
H✝ : ∀ {q : ℕ}, Nat.Prime q → q ∣ n✝ → q % 4 ≠ 3
p n : ℕ
hpp : Nat.Prime p
ih : Squarefree n → (∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3) → IsSquare (-1)
hn : Squarefree (p * n)
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ p * n → q % 4 ≠ 3
this : Fact (Nat.Prime p)
hc : ¬Nat.coprime p n
⊢ False
[PROOFSTEP]
exact hpp.not_unit (hn p <| mul_dvd_mul_left p <| hpp.dvd_iff_not_coprime.mpr hc)
[GOAL]
case h
n✝ : ℕ
hn✝ : Squarefree n✝
H✝ : ∀ {q : ℕ}, Nat.Prime q → q ∣ n✝ → q % 4 ≠ 3
p n : ℕ
hpp : Nat.Prime p
ih : Squarefree n → (∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3) → IsSquare (-1)
hn : Squarefree (p * n)
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ p * n → q % 4 ≠ 3
this : Fact (Nat.Prime p)
hcp : Nat.coprime p n
⊢ IsSquare (-1)
[PROOFSTEP]
have hp₁ := ZMod.exists_sq_eq_neg_one_iff.mpr (H hpp (dvd_mul_right p n))
[GOAL]
case h
n✝ : ℕ
hn✝ : Squarefree n✝
H✝ : ∀ {q : ℕ}, Nat.Prime q → q ∣ n✝ → q % 4 ≠ 3
p n : ℕ
hpp : Nat.Prime p
ih : Squarefree n → (∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3) → IsSquare (-1)
hn : Squarefree (p * n)
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ p * n → q % 4 ≠ 3
this : Fact (Nat.Prime p)
hcp : Nat.coprime p n
hp₁ : IsSquare (-1)
⊢ IsSquare (-1)
[PROOFSTEP]
exact ZMod.isSquare_neg_one_mul hcp hp₁ (ih hn.of_mul_right fun hqp hqd => H hqp <| dvd_mul_of_dvd_right hqd _)
[GOAL]
n : ℕ
hn : Squarefree n
⊢ IsSquare (-1) ↔ ∀ {q : ℕ}, q ∣ n → q % 4 ≠ 3
[PROOFSTEP]
have help : ∀ a b : ZMod 4, a ≠ 3 → b ≠ 3 → a * b ≠ 3 := by decide
[GOAL]
n : ℕ
hn : Squarefree n
⊢ ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
[PROOFSTEP]
decide
[GOAL]
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
⊢ IsSquare (-1) ↔ ∀ {q : ℕ}, q ∣ n → q % 4 ≠ 3
[PROOFSTEP]
rw [ZMod.isSquare_neg_one_iff hn]
[GOAL]
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
⊢ (∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3) ↔ ∀ {q : ℕ}, q ∣ n → q % 4 ≠ 3
[PROOFSTEP]
refine' ⟨_, fun H q _ => H⟩
[GOAL]
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
⊢ (∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3) → ∀ {q : ℕ}, q ∣ n → q % 4 ≠ 3
[PROOFSTEP]
intro H
[GOAL]
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
⊢ ∀ {q : ℕ}, q ∣ n → q % 4 ≠ 3
[PROOFSTEP]
refine' @induction_on_primes _ _ _ (fun p q hp hq hpq => _)
[GOAL]
case refine'_1
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
⊢ 0 ∣ n → 0 % 4 ≠ 3
[PROOFSTEP]
exact fun _ => by norm_num
[GOAL]
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
x✝ : 0 ∣ n
⊢ 0 % 4 ≠ 3
[PROOFSTEP]
norm_num
[GOAL]
case refine'_2
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
⊢ 1 ∣ n → 1 % 4 ≠ 3
[PROOFSTEP]
exact fun _ => by norm_num
[GOAL]
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
x✝ : 1 ∣ n
⊢ 1 % 4 ≠ 3
[PROOFSTEP]
norm_num
[GOAL]
case refine'_3
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
p q : ℕ
hp : Nat.Prime p
hq : q ∣ n → q % 4 ≠ 3
hpq : p * q ∣ n
⊢ p * q % 4 ≠ 3
[PROOFSTEP]
replace hp := H hp (dvd_of_mul_right_dvd hpq)
[GOAL]
case refine'_3
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
p q : ℕ
hq : q ∣ n → q % 4 ≠ 3
hpq : p * q ∣ n
hp : p % 4 ≠ 3
⊢ p * q % 4 ≠ 3
[PROOFSTEP]
replace hq := hq (dvd_of_mul_left_dvd hpq)
[GOAL]
case refine'_3
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
p q : ℕ
hpq : p * q ∣ n
hp : p % 4 ≠ 3
hq : q % 4 ≠ 3
⊢ p * q % 4 ≠ 3
[PROOFSTEP]
rw [show 3 = 3 % 4 by norm_num, Ne.def, ← ZMod.nat_cast_eq_nat_cast_iff'] at hp hq ⊢
[GOAL]
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
p q : ℕ
hpq : p * q ∣ n
hp : p % 4 ≠ 3
hq : q % 4 ≠ 3
⊢ 3 = 3 % 4
[PROOFSTEP]
norm_num
[GOAL]
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
p q : ℕ
hpq : p * q ∣ n
hp : p % 4 ≠ 3 % 4
hq : q % 4 ≠ 3
⊢ 3 = 3 % 4
[PROOFSTEP]
norm_num
[GOAL]
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
p q : ℕ
hpq : p * q ∣ n
hp : p % 4 ≠ 3 % 4
hq : q % 4 ≠ 3 % 4
⊢ 3 = 3 % 4
[PROOFSTEP]
norm_num
[GOAL]
case refine'_3
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
p q : ℕ
hpq : p * q ∣ n
hp : ¬↑p = ↑3
hq : ¬↑q = ↑3
⊢ ¬↑(p * q) = ↑3
[PROOFSTEP]
rw [Nat.cast_mul]
[GOAL]
case refine'_3
n : ℕ
hn : Squarefree n
help : ∀ (a b : ZMod 4), a ≠ 3 → b ≠ 3 → a * b ≠ 3
H : ∀ {q : ℕ}, Nat.Prime q → q ∣ n → q % 4 ≠ 3
p q : ℕ
hpq : p * q ∣ n
hp : ¬↑p = ↑3
hq : ¬↑q = ↑3
⊢ ¬↑p * ↑q = ↑3
[PROOFSTEP]
exact help p q hp hq
[GOAL]
n : ℕ
h : IsSquare (-1)
⊢ ∃ x y, n = x ^ 2 + y ^ 2
[PROOFSTEP]
induction' n using induction_on_primes with p n hpp ih
[GOAL]
case h₀
n : ℕ
h✝ : IsSquare (-1)
h : IsSquare (-1)
⊢ ∃ x y, 0 = x ^ 2 + y ^ 2
[PROOFSTEP]
exact ⟨0, 0, rfl⟩
[GOAL]
case h₁
n : ℕ
h✝ : IsSquare (-1)
h : IsSquare (-1)
⊢ ∃ x y, 1 = x ^ 2 + y ^ 2
[PROOFSTEP]
exact ⟨0, 1, rfl⟩
[GOAL]
case h
n✝ : ℕ
h✝ : IsSquare (-1)
p n : ℕ
hpp : Prime p
ih : IsSquare (-1) → ∃ x y, n = x ^ 2 + y ^ 2
h : IsSquare (-1)
⊢ ∃ x y, p * n = x ^ 2 + y ^ 2
[PROOFSTEP]
haveI : Fact p.Prime := ⟨hpp⟩
[GOAL]
case h
n✝ : ℕ
h✝ : IsSquare (-1)
p n : ℕ
hpp : Prime p
ih : IsSquare (-1) → ∃ x y, n = x ^ 2 + y ^ 2
h : IsSquare (-1)
this : Fact (Prime p)
⊢ ∃ x y, p * n = x ^ 2 + y ^ 2
[PROOFSTEP]
have hp : IsSquare (-1 : ZMod p) := ZMod.isSquare_neg_one_of_dvd ⟨n, rfl⟩ h
[GOAL]
case h
n✝ : ℕ
h✝ : IsSquare (-1)
p n : ℕ
hpp : Prime p
ih : IsSquare (-1) → ∃ x y, n = x ^ 2 + y ^ 2
h : IsSquare (-1)
this : Fact (Prime p)
hp : IsSquare (-1)
⊢ ∃ x y, p * n = x ^ 2 + y ^ 2
[PROOFSTEP]
obtain ⟨u, v, huv⟩ := Nat.Prime.sq_add_sq (ZMod.exists_sq_eq_neg_one_iff.mp hp)
[GOAL]
case h.intro.intro
n✝ : ℕ
h✝ : IsSquare (-1)
p n : ℕ
hpp : Prime p
ih : IsSquare (-1) → ∃ x y, n = x ^ 2 + y ^ 2
h : IsSquare (-1)
this : Fact (Prime p)
hp : IsSquare (-1)
u v : ℕ
huv : u ^ 2 + v ^ 2 = p
⊢ ∃ x y, p * n = x ^ 2 + y ^ 2
[PROOFSTEP]
obtain ⟨x, y, hxy⟩ := ih (ZMod.isSquare_neg_one_of_dvd ⟨p, mul_comm _ _⟩ h)
[GOAL]
case h.intro.intro.intro.intro
n✝ : ℕ
h✝ : IsSquare (-1)
p n : ℕ
hpp : Prime p
ih : IsSquare (-1) → ∃ x y, n = x ^ 2 + y ^ 2
h : IsSquare (-1)
this : Fact (Prime p)
hp : IsSquare (-1)
u v : ℕ
huv : u ^ 2 + v ^ 2 = p
x y : ℕ
hxy : n = x ^ 2 + y ^ 2
⊢ ∃ x y, p * n = x ^ 2 + y ^ 2
[PROOFSTEP]
exact Nat.sq_add_sq_mul huv.symm hxy
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
⊢ IsSquare (-1)
[PROOFSTEP]
obtain ⟨u, v, huv⟩ : IsCoprime x n :=
by
have hc2 : IsCoprime (x ^ 2) (y ^ 2) := hc.pow
rw [show y ^ 2 = n + -1 * x ^ 2 by rw [h]; ring] at hc2
exact (IsCoprime.pow_left_iff zero_lt_two).mp hc2.of_add_mul_right_right
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
⊢ IsCoprime x n
[PROOFSTEP]
have hc2 : IsCoprime (x ^ 2) (y ^ 2) := hc.pow
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
hc2 : IsCoprime (x ^ 2) (y ^ 2)
⊢ IsCoprime x n
[PROOFSTEP]
rw [show y ^ 2 = n + -1 * x ^ 2 by rw [h]; ring] at hc2
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
hc2 : IsCoprime (x ^ 2) (y ^ 2)
⊢ y ^ 2 = n + -1 * x ^ 2
[PROOFSTEP]
rw [h]
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
hc2 : IsCoprime (x ^ 2) (y ^ 2)
⊢ y ^ 2 = x ^ 2 + y ^ 2 + -1 * x ^ 2
[PROOFSTEP]
ring
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
hc2 : IsCoprime (x ^ 2) (n + -1 * x ^ 2)
⊢ IsCoprime x n
[PROOFSTEP]
exact (IsCoprime.pow_left_iff zero_lt_two).mp hc2.of_add_mul_right_right
[GOAL]
case intro.intro
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
⊢ IsSquare (-1)
[PROOFSTEP]
have H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v) := by
linear_combination -u ^ 2 * h + (n * v - u * x - 1) * huv
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
⊢ u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
[PROOFSTEP]
linear_combination -u ^ 2 * h + (n * v - u * x - 1) * huv
[GOAL]
case intro.intro
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
⊢ IsSquare (-1)
[PROOFSTEP]
refine' ⟨u * y, _⟩
[GOAL]
case intro.intro
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
⊢ -1 = ↑u * ↑y * (↑u * ↑y)
[PROOFSTEP]
conv_rhs => tactic => norm_cast
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
| ↑u * ↑y * (↑u * ↑y)
[PROOFSTEP]
tactic => norm_cast
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
| ↑u * ↑y * (↑u * ↑y)
[PROOFSTEP]
tactic => norm_cast
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
| ↑u * ↑y * (↑u * ↑y)
[PROOFSTEP]
tactic => norm_cast
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
⊢ ↑u * ↑y * (↑u * ↑y) = ?m.37150
[PROOFSTEP]
norm_cast
[GOAL]
case intro.intro
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
⊢ -1 = ↑(u * y * (u * y))
[PROOFSTEP]
rw [(by norm_cast : (-1 : ZMod n.natAbs) = (-1 : ℤ))]
[GOAL]
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
⊢ -1 = ↑(-1)
[PROOFSTEP]
norm_cast
[GOAL]
case intro.intro
n x y : ℤ
h : n = x ^ 2 + y ^ 2
hc : IsCoprime x y
u v : ℤ
huv : u * x + v * n = 1
H : u * y * (u * y) - -1 = n * (-v ^ 2 * n + u ^ 2 + 2 * v)
⊢ ↑(-1) = ↑(u * y * (u * y))
[PROOFSTEP]
exact (ZMod.int_cast_eq_int_cast_iff_dvd_sub _ _ _).mpr (Int.natAbs_dvd.mpr ⟨_, H⟩)
[GOAL]
n x y : ℕ
h : n = x ^ 2 + y ^ 2
hc : Nat.coprime x y
⊢ IsSquare (-1)
[PROOFSTEP]
zify at h
[GOAL]
n x y : ℕ
hc : Nat.coprime x y
h : ↑n = ↑x ^ 2 + ↑y ^ 2
⊢ IsSquare (-1)
[PROOFSTEP]
exact ZMod.isSquare_neg_one_of_eq_sq_add_sq_of_isCoprime h hc.isCoprime
[GOAL]
n : ℕ
⊢ (∃ x y, n = x ^ 2 + y ^ 2) ↔ ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
[PROOFSTEP]
constructor
[GOAL]
case mp
n : ℕ
⊢ (∃ x y, n = x ^ 2 + y ^ 2) → ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
[PROOFSTEP]
rintro ⟨x, y, h⟩
[GOAL]
case mp.intro.intro
n x y : ℕ
h : n = x ^ 2 + y ^ 2
⊢ ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
[PROOFSTEP]
by_cases hxy : x = 0 ∧ y = 0
[GOAL]
case pos
n x y : ℕ
h : n = x ^ 2 + y ^ 2
hxy : x = 0 ∧ y = 0
⊢ ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
[PROOFSTEP]
exact
⟨0, 1, by rw [h, hxy.1, hxy.2, zero_pow zero_lt_two, add_zero, zero_mul],
⟨0, by rw [zero_mul, neg_eq_zero, Fin.one_eq_zero_iff]⟩⟩
[GOAL]
n x y : ℕ
h : n = x ^ 2 + y ^ 2
hxy : x = 0 ∧ y = 0
⊢ n = 0 ^ 2 * 1
[PROOFSTEP]
rw [h, hxy.1, hxy.2, zero_pow zero_lt_two, add_zero, zero_mul]
[GOAL]
n x y : ℕ
h : n = x ^ 2 + y ^ 2
hxy : x = 0 ∧ y = 0
⊢ -1 = 0 * 0
[PROOFSTEP]
rw [zero_mul, neg_eq_zero, Fin.one_eq_zero_iff]
[GOAL]
case neg
n x y : ℕ
h : n = x ^ 2 + y ^ 2
hxy : ¬(x = 0 ∧ y = 0)
⊢ ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
[PROOFSTEP]
have hg := Nat.pos_of_ne_zero (mt Nat.gcd_eq_zero_iff.mp hxy)
[GOAL]
case neg
n x y : ℕ
h : n = x ^ 2 + y ^ 2
hxy : ¬(x = 0 ∧ y = 0)
hg : 0 < gcd x y
⊢ ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
[PROOFSTEP]
obtain ⟨g, x₁, y₁, _, h₂, h₃, h₄⟩ := Nat.exists_coprime' hg
[GOAL]
case neg.intro.intro.intro.intro.intro.intro
n x y : ℕ
h : n = x ^ 2 + y ^ 2
hxy : ¬(x = 0 ∧ y = 0)
hg : 0 < gcd x y
g : ℕ
x₁ y₁ : ℕ
left✝ : 0 < g
h₂ : coprime x₁ y₁
h₃ : x = x₁ * g
h₄ : y = y₁ * g
⊢ ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
[PROOFSTEP]
exact ⟨g, x₁ ^ 2 + y₁ ^ 2, by rw [h, h₃, h₄]; ring, ZMod.isSquare_neg_one_of_eq_sq_add_sq_of_coprime rfl h₂⟩
[GOAL]
n x y : ℕ
h : n = x ^ 2 + y ^ 2
hxy : ¬(x = 0 ∧ y = 0)
hg : 0 < gcd x y
g : ℕ
x₁ y₁ : ℕ
left✝ : 0 < g
h₂ : coprime x₁ y₁
h₃ : x = x₁ * g
h₄ : y = y₁ * g
⊢ n = g ^ 2 * (x₁ ^ 2 + y₁ ^ 2)
[PROOFSTEP]
rw [h, h₃, h₄]
[GOAL]
n x y : ℕ
h : n = x ^ 2 + y ^ 2
hxy : ¬(x = 0 ∧ y = 0)
hg : 0 < gcd x y
g : ℕ
x₁ y₁ : ℕ
left✝ : 0 < g
h₂ : coprime x₁ y₁
h₃ : x = x₁ * g
h₄ : y = y₁ * g
⊢ (x₁ * g) ^ 2 + (y₁ * g) ^ 2 = g ^ 2 * (x₁ ^ 2 + y₁ ^ 2)
[PROOFSTEP]
ring
[GOAL]
case mpr
n : ℕ
⊢ (∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)) → ∃ x y, n = x ^ 2 + y ^ 2
[PROOFSTEP]
rintro ⟨a, b, h₁, h₂⟩
[GOAL]
case mpr.intro.intro.intro
n a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
⊢ ∃ x y, n = x ^ 2 + y ^ 2
[PROOFSTEP]
obtain ⟨x', y', h⟩ := Nat.eq_sq_add_sq_of_isSquare_mod_neg_one h₂
[GOAL]
case mpr.intro.intro.intro.intro.intro
n a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
x' y' : ℕ
h : b = x' ^ 2 + y' ^ 2
⊢ ∃ x y, n = x ^ 2 + y ^ 2
[PROOFSTEP]
exact ⟨a * x', a * y', by rw [h₁, h]; ring⟩
[GOAL]
n a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
x' y' : ℕ
h : b = x' ^ 2 + y' ^ 2
⊢ n = (a * x') ^ 2 + (a * y') ^ 2
[PROOFSTEP]
rw [h₁, h]
[GOAL]
n a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
x' y' : ℕ
h : b = x' ^ 2 + y' ^ 2
⊢ a ^ 2 * (x' ^ 2 + y' ^ 2) = (a * x') ^ 2 + (a * y') ^ 2
[PROOFSTEP]
ring
[GOAL]
n : ℕ
⊢ (∃ x y, n = x ^ 2 + y ^ 2) ↔ ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
[PROOFSTEP]
rcases n.eq_zero_or_pos with (rfl | hn₀)
[GOAL]
case inl
⊢ (∃ x y, 0 = x ^ 2 + y ^ 2) ↔ ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q 0)
[PROOFSTEP]
exact
⟨fun _ q _ _ => (@padicValNat.zero q).symm ▸ even_zero, fun _ => ⟨0, 0, rfl⟩⟩
-- now `0 < n`
[GOAL]
case inr
n : ℕ
hn₀ : n > 0
⊢ (∃ x y, n = x ^ 2 + y ^ 2) ↔ ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
[PROOFSTEP]
rw [Nat.eq_sq_add_sq_iff_eq_sq_mul]
[GOAL]
case inr
n : ℕ
hn₀ : n > 0
⊢ (∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)) ↔ ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
[PROOFSTEP]
refine' ⟨fun H q hq h => _, fun H => _⟩
[GOAL]
case inr.refine'_1
n : ℕ
hn₀ : n > 0
H : ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
q : ℕ
hq : Prime q
h : q % 4 = 3
⊢ Even (padicValNat q n)
[PROOFSTEP]
obtain ⟨a, b, h₁, h₂⟩ := H
[GOAL]
case inr.refine'_1.intro.intro.intro
n : ℕ
hn₀ : n > 0
q : ℕ
hq : Prime q
h : q % 4 = 3
a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
⊢ Even (padicValNat q n)
[PROOFSTEP]
have hqb := padicValNat.eq_zero_of_not_dvd fun hf => (hq.mod_four_ne_three_of_dvd_isSquare_neg_one hf h₂) h
[GOAL]
case inr.refine'_1.intro.intro.intro
n : ℕ
hn₀ : n > 0
q : ℕ
hq : Prime q
h : q % 4 = 3
a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
hqb : padicValNat q b = 0
⊢ Even (padicValNat q n)
[PROOFSTEP]
have hab : a ^ 2 * b ≠ 0 := h₁ ▸ hn₀.ne'
[GOAL]
case inr.refine'_1.intro.intro.intro
n : ℕ
hn₀ : n > 0
q : ℕ
hq : Prime q
h : q % 4 = 3
a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
hqb : padicValNat q b = 0
hab : a ^ 2 * b ≠ 0
⊢ Even (padicValNat q n)
[PROOFSTEP]
have ha₂ := left_ne_zero_of_mul hab
[GOAL]
case inr.refine'_1.intro.intro.intro
n : ℕ
hn₀ : n > 0
q : ℕ
hq : Prime q
h : q % 4 = 3
a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
hqb : padicValNat q b = 0
hab : a ^ 2 * b ≠ 0
ha₂ : a ^ 2 ≠ 0
⊢ Even (padicValNat q n)
[PROOFSTEP]
have ha := mt sq_eq_zero_iff.mpr ha₂
[GOAL]
case inr.refine'_1.intro.intro.intro
n : ℕ
hn₀ : n > 0
q : ℕ
hq : Prime q
h : q % 4 = 3
a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
hqb : padicValNat q b = 0
hab : a ^ 2 * b ≠ 0
ha₂ : a ^ 2 ≠ 0
ha : ¬a = 0
⊢ Even (padicValNat q n)
[PROOFSTEP]
have hb := right_ne_zero_of_mul hab
[GOAL]
case inr.refine'_1.intro.intro.intro
n : ℕ
hn₀ : n > 0
q : ℕ
hq : Prime q
h : q % 4 = 3
a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
hqb : padicValNat q b = 0
hab : a ^ 2 * b ≠ 0
ha₂ : a ^ 2 ≠ 0
ha : ¬a = 0
hb : b ≠ 0
⊢ Even (padicValNat q n)
[PROOFSTEP]
haveI hqi : Fact q.Prime := ⟨hq⟩
[GOAL]
case inr.refine'_1.intro.intro.intro
n : ℕ
hn₀ : n > 0
q : ℕ
hq : Prime q
h : q % 4 = 3
a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
hqb : padicValNat q b = 0
hab : a ^ 2 * b ≠ 0
ha₂ : a ^ 2 ≠ 0
ha : ¬a = 0
hb : b ≠ 0
hqi : Fact (Prime q)
⊢ Even (padicValNat q n)
[PROOFSTEP]
simp_rw [h₁, padicValNat.mul ha₂ hb, padicValNat.pow 2 ha, hqb, add_zero]
[GOAL]
case inr.refine'_1.intro.intro.intro
n : ℕ
hn₀ : n > 0
q : ℕ
hq : Prime q
h : q % 4 = 3
a b : ℕ
h₁ : n = a ^ 2 * b
h₂ : IsSquare (-1)
hqb : padicValNat q b = 0
hab : a ^ 2 * b ≠ 0
ha₂ : a ^ 2 ≠ 0
ha : ¬a = 0
hb : b ≠ 0
hqi : Fact (Prime q)
⊢ Even (2 * padicValNat q a)
[PROOFSTEP]
exact even_two_mul _
[GOAL]
case inr.refine'_2
n : ℕ
hn₀ : n > 0
H : ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
⊢ ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
[PROOFSTEP]
obtain ⟨b, a, hb₀, ha₀, hab, hb⟩ := Nat.sq_mul_squarefree_of_pos hn₀
[GOAL]
case inr.refine'_2.intro.intro.intro.intro.intro
n : ℕ
hn₀ : n > 0
H : ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
b a : ℕ
hb₀ : 0 < b
ha₀ : 0 < a
hab : a ^ 2 * b = n
hb : Squarefree b
⊢ ∃ a b, n = a ^ 2 * b ∧ IsSquare (-1)
[PROOFSTEP]
refine' ⟨a, b, hab.symm, (ZMod.isSquare_neg_one_iff hb).mpr fun {q} hqp hqb hq4 => _⟩
[GOAL]
case inr.refine'_2.intro.intro.intro.intro.intro
n : ℕ
hn₀ : n > 0
H : ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
b a : ℕ
hb₀ : 0 < b
ha₀ : 0 < a
hab : a ^ 2 * b = n
hb : Squarefree b
q : ℕ
hqp : Prime q
hqb : q ∣ b
hq4 : q % 4 = 3
⊢ False
[PROOFSTEP]
refine' Nat.odd_iff_not_even.mp _ (H hqp hq4)
[GOAL]
case inr.refine'_2.intro.intro.intro.intro.intro
n : ℕ
hn₀ : n > 0
H : ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
b a : ℕ
hb₀ : 0 < b
ha₀ : 0 < a
hab : a ^ 2 * b = n
hb : Squarefree b
q : ℕ
hqp : Prime q
hqb : q ∣ b
hq4 : q % 4 = 3
⊢ Odd (padicValNat q n)
[PROOFSTEP]
have hqb' : padicValNat q b = 1 :=
b.factorization_def hqp ▸
le_antisymm (Nat.Squarefree.factorization_le_one _ hb) ((hqp.dvd_iff_one_le_factorization hb₀.ne').mp hqb)
[GOAL]
case inr.refine'_2.intro.intro.intro.intro.intro
n : ℕ
hn₀ : n > 0
H : ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
b a : ℕ
hb₀ : 0 < b
ha₀ : 0 < a
hab : a ^ 2 * b = n
hb : Squarefree b
q : ℕ
hqp : Prime q
hqb : q ∣ b
hq4 : q % 4 = 3
hqb' : padicValNat q b = 1
⊢ Odd (padicValNat q n)
[PROOFSTEP]
haveI hqi : Fact q.Prime := ⟨hqp⟩
[GOAL]
case inr.refine'_2.intro.intro.intro.intro.intro
n : ℕ
hn₀ : n > 0
H : ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
b a : ℕ
hb₀ : 0 < b
ha₀ : 0 < a
hab : a ^ 2 * b = n
hb : Squarefree b
q : ℕ
hqp : Prime q
hqb : q ∣ b
hq4 : q % 4 = 3
hqb' : padicValNat q b = 1
hqi : Fact (Prime q)
⊢ Odd (padicValNat q n)
[PROOFSTEP]
simp_rw [← hab, padicValNat.mul (pow_ne_zero 2 ha₀.ne') hb₀.ne', hqb', padicValNat.pow 2 ha₀.ne']
[GOAL]
case inr.refine'_2.intro.intro.intro.intro.intro
n : ℕ
hn₀ : n > 0
H : ∀ {q : ℕ}, Prime q → q % 4 = 3 → Even (padicValNat q n)
b a : ℕ
hb₀ : 0 < b
ha₀ : 0 < a
hab : a ^ 2 * b = n
hb : Squarefree b
q : ℕ
hqp : Prime q
hqb : q ∣ b
hq4 : q % 4 = 3
hqb' : padicValNat q b = 1
hqi : Fact (Prime q)
⊢ Odd (2 * padicValNat q a + 1)
[PROOFSTEP]
exact odd_two_mul_add_one _
|
{"mathlib_filename": "Mathlib.NumberTheory.SumTwoSquares", "llama_tokens": 14858}
|
cdis Forecast Systems Laboratory
cdis NOAA/OAR/ERL/FSL
cdis 325 Broadway
cdis Boulder, CO 80303
cdis
cdis Forecast Research Division
cdis Local Analysis and Prediction Branch
cdis LAPS
cdis
cdis This software and its documentation are in the public domain and
cdis are furnished "as is." The United States government, its
cdis instrumentalities, officers, employees, and agents make no
cdis warranty, express or implied, as to the usefulness of the software
cdis and documentation for any purpose. They assume no responsibility
cdis (1) for the use of the software and documentation; or (2) to provide
cdis technical support to users.
cdis
cdis Permission to use, copy, modify, and distribute this software is
cdis hereby granted, provided that the entire disclaimer notice appears
cdis in all copies. All modifications to this software must be clearly
cdis documented, and are solely the responsibility of the agent making
cdis the modifications. If significant modifications or enhancements
cdis are made to this software, the FSL Software Policy Manager
cdis (softwaremgr@fsl.noaa.gov) should be notified.
cdis
cdis
cdis
cdis
cdis
cdis
cdis
c
c
subroutine read_obs_i(infile,maxstns,atime,num_meso,
& num_saos,num_sfc,stations,lat_s,lon_s,elev_s,wx_s,cover_s,
& hgt_ceil,hgt_low,t_s,td_s,dd_s,ff_s,ddg_s,ffg_s,pr_s,sr_s,
& istatus)
c
c*******************************************************************************
c
c Routine to read SAO and Mesonet surface obs written by the LAPS
c 'lapsdata' program.
c
c Changes:
c P.A. Stamus 11-29-88 Original version.
c 02-01-90 Version for interactive MDAT.
c 02-14-91 Add solar radiation.
c
c Input/Output:
c
c Variable Var type I/O Description
c ---------- ---------- ----- -------------
c infile A*70 I Directory where input data is.
c maxstns I I Max number of stations allowed
c atime A*24 O Data time: dd-mmm-yyyy hh:mm
c num_meso I O Number of mesonet stations in file
c num_saos I O Number of SAO stations in file
c num_sfc I O Total number of surface obs.
c stations A*3 A O Array of the station names
c lat_s RA O Latitude of the stations
c lon_s RA O Longitude of the stations
c elev_s RA O Elevation of the stations (m)
c wx_s A*8 A O Array of observed weather
c cover_s RA O Cloud cover (tenths)
c hgt_ceil RA O Ceiling height (m)
c hgt_low RA O Height lowest cloud (m)
c t_s RA O Temperature (F)
c td_s RA O Dewpoint (F)
c dd_s RA O Wind direction (deg)
c ff_s RA O Wind speed (kt)
c ddg_s RA O Gust wind direction (deg)
c ffg_s RA O Gust wind speed (kt)
c pr_s RA O Pressure variable - see note #2.
c sr_s RA O Solar radiation.
c istatus I O Status flag: 1 = normal
c -1 = file not found
c
c User Notes:
c
c 1. Arrays should be dimensioned 'maxstns' in the calling program,
c with maxstns >= 60 for this routine.
c
c 2. The pressure variable for the Mesonet data is station pressure
c in millibars. The pressure variable for SAOs is altimeter setting
c in millibars. The mesonet stations are always the first 'num_meso'
c stations in the pr_s array, and SAOs are the rest. No corrections,
c changes, reductions, etc., are made to the data in this routine.
c
c 3. INFILE is now passed in, complete and ready for the open statement.
c
c*******************************************************************************
c
real lat_s(maxstns), lon_s(maxstns), elev_s(maxstns)
real cover_s(maxstns), hgt_ceil(maxstns), hgt_low(maxstns)
real t_s(maxstns), td_s(maxstns), pr_s(maxstns), sr_s(maxstns)
real dd_s(maxstns), ff_s(maxstns), ddg_s(maxstns), ffg_s(maxst
1ns)
c
character stations(maxstns)*3, wx_s(maxstns)*8
character atime*24, infile*256
c
istatus = 0
c
c..... Open the OBS data file. Check for a
c..... 'file not found', and notify the user if necessary.
c
open(1,iostat=ios,file=infile,status='old')
if(ios .eq. 29) then !file not found
print *,
& ' +++++ OBS file (SAO & Mesonet data) not available. +++++'
istatus = -1
return
endif
c
c..... Now read the time and number of stations in the file, then read
c..... the data.
c
read(1,901) atime,num_meso,num_sfc
901 format(1x,a24,2i6)
c
do k=1,num_sfc
read(1,902)stations(k),lat_s(k),lon_s(k),elev_s(k),wx_s(k),
& cover_s(k),hgt_ceil(k),hgt_low(k),t_s(k),td_s(k),
& dd_s(k),ff_s(k),ddg_s(k),ffg_s(k),pr_s(k),sr_s(k)
enddo !k
902 format(1x,A3,2f7.2,1x,f5.0,1x,a8,1x,f5.1,2(1x,f7.1),1x,2f6.1,
& 1X,4(1X,f5.0),1X,F6.1,1x,f6.1)
c
num_saos = num_sfc - num_meso
istatus = 1 ! normal return
c
return
end
|
{"hexsha": "dcaf96acc585c7e3a71aec0d0861b07d9cc3aff9", "size": 5767, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/lib/newreadobs.f", "max_stars_repo_name": "maxinye/laps-mirror", "max_stars_repo_head_hexsha": "b3f7c08273299a9e19b2187f96bd3eee6e0aa01b", "max_stars_repo_licenses": ["Intel", "Unlicense", "OLDAP-2.2.1", "NetCDF"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-04-05T12:28:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-29T06:37:29.000Z", "max_issues_repo_path": "src/lib/newreadobs.f", "max_issues_repo_name": "longwosion/laps-mirror", "max_issues_repo_head_hexsha": "b3f7c08273299a9e19b2187f96bd3eee6e0aa01b", "max_issues_repo_licenses": ["Intel", "NetCDF", "OLDAP-2.2.1", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/newreadobs.f", "max_forks_repo_name": "longwosion/laps-mirror", "max_forks_repo_head_hexsha": "b3f7c08273299a9e19b2187f96bd3eee6e0aa01b", "max_forks_repo_licenses": ["Intel", "NetCDF", "OLDAP-2.2.1", "Unlicense"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-04-27T12:51:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-19T13:57:44.000Z", "avg_line_length": 43.0373134328, "max_line_length": 80, "alphanum_fraction": 0.5536674181, "num_tokens": 1571}
|
Sandra and Joe Proudman are a husband a wife duo that are available throughout Davis and surrounding areas for event, portrait, and pet photography.
|
{"hexsha": "eb9c8b2347393d6381d7aa0526ba55fefd178498", "size": 157, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Sandra_and_Joe_Proudman_Photography.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Sandra_and_Joe_Proudman_Photography.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Sandra_and_Joe_Proudman_Photography.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.7, "max_line_length": 148, "alphanum_fraction": 0.7770700637, "num_tokens": 31}
|
from functools import reduce
import numpy as np
def _fired_rules(instance, rule_list, threshold=0.001):
"""Returns the rules fired by the instance given a threshold
Parameters
----------
instance : dict, {feature: {set_1: pert_1, set_2: pert_2, ...}, ...}
Fuzzy representation of the instance with all the features and pertenence
degrees to each fuzzy set
rule_list : list(Rule)
List of candidate rules to form part of the factual
threshold : float, optional
Activation threshold with which a rule is
considered to be fired by the instance, by default 0.01
Returns
-------
list(Rule)
List of fired rules
"""
return [rule for rule in rule_list if rule.matching(instance) > threshold]
def _get_class_value_rules(rule_list, class_val):
return [rule for rule in rule_list if rule.consequent == class_val]
def _robust_threshold(instance, rule_list, class_val):
"""Obtain the robust threshold as explained in [ref]"""
other_classes = np.unique([rule.consequent for rule in rule_list if rule.consequent != class_val])
all_th = []
for cv in other_classes:
th = 0
for rule in rule_list:
if rule.consequent == cv:
th += rule.matching(instance) * rule.weight
all_th.append(th)
return max(all_th)
def FID3_factual(instance, rule_list, threshold=0.01):
"""Returns the factual extracted for the Fuzzy ID3
tree in this package
Parameters
----------
instance : dict, {feature: {set_1: pert_1, set_2: pert_2, ...}, ...}
Fuzzy representation of the instance with all the features and pertenence
degrees to each fuzzy set
rule_list : list(Rule)
List of candidate rules to form part of the factual
threshold : float, optional
Activation threshold with which a rule is
considered to be fired by the instance, by default 0.01
Returns
-------
list(Rule)
List of factual rules
"""
fired_rules = _fired_rules(instance, rule_list, threshold)
return max(fired_rules, key=lambda rule: rule.matching(instance))
def m_factual(instance, rule_list, class_val):
"""Returns the factual associated to the mean generated
as explained in [ref]
Parameters
----------
instance : dict, {feature: {set_1: pert_1, set_2: pert_2, ...}, ...}
Fuzzy representation of the instance with all the features and pertenence
degrees to each fuzzy set
rule_list : list(Rule)
List of candidate rules to form part of the factual
class_val : str
Predicted value that the factual will explain
Returns
-------
list(Rule)
List of factual rules
"""
fired_rules = _fired_rules(instance, rule_list)
class_fired_rules = _get_class_value_rules(fired_rules, class_val)
class_fired_rules.sort(key=lambda rule: rule.matching(instance) * rule.weight, reverse=True)
avg = reduce(lambda x, y: x + (y.matching(instance) * y.weight), class_fired_rules, 0) / len(class_fired_rules)
return [rule for rule in class_fired_rules if rule.matching(instance) * rule.weight >= avg]
def mr_factual(instance, rule_list, class_val):
"""Returns the minimum robust factual generated
as explained in [ref]
Parameters
----------
instance : dict, {feature: {set_1: pert_1, set_2: pert_2, ...}, ...}
Fuzzy representation of the instance with all the features and pertenence
degrees to each fuzzy set
rule_list : list(Rule)
List of candidate rules to form part of the factual
class_val : str
Predicted value that the factual will explain
Returns
-------
list(Rule)
List of factual rules
"""
fired_rules = _fired_rules(instance, rule_list)
class_fired_rules = _get_class_value_rules(fired_rules, class_val)
class_fired_rules.sort(key=lambda rule: rule.matching(instance) * rule.weight, reverse=True)
robust_threshold = _robust_threshold(instance, rule_list, class_val)
factual = []
AD_sum = 0
for rule in class_fired_rules:
if robust_threshold < AD_sum:
break
factual.append(rule)
AD_sum += rule.matching(instance) * rule.weight
return factual
def c_factual(instance, rule_list, class_val, lam, beta=None):
"""Returns the factual associated to the lambda quotient generated
as explained in [ref]. If beta is passed, it returns the minimum
mass factual
Parameters
----------
instance : dict, {feature: {set_1: pert_1, set_2: pert_2, ...}, ...}
Fuzzy representation of the instance with all the features and pertenence
degrees to each fuzzy set
rule_list : list(Rule)
List of candidate rules to form part of the factual
class_val : str
Predicted value that the factual will explain
lam : float, between (0,1)
Lambda quotient to determine which rules form part of the factual
beta : float, between (0,1), optional
If passed, determines the minimum mass to obtain the rules which
form part of the factual, by default None
Returns
-------
list(Rule)
List of factual rules
"""
fired_rules = _fired_rules(instance, rule_list)
class_fired_rules = _get_class_value_rules(fired_rules, class_val)
class_fired_rules.sort(key=lambda rule: rule.matching(instance) * rule.weight, reverse=True)
factual = [class_fired_rules[0]]
prev_matching = factual[0].matching(instance) * factual[0].weight
AD_sum = prev_matching
for rule in class_fired_rules[1:]:
matching = rule.matching(instance) * rule.weight
factor = prev_matching / matching
if factor > 1 + lam:
if beta is None or beta <= AD_sum:
break
prev_matching = matching
AD_sum += matching
factual.append(rule)
return factual
|
{"hexsha": "94ec192b23ef4f554adf431266a95a6399ab0953", "size": 5949, "ext": "py", "lang": "Python", "max_stars_repo_path": "teacher/explanation/_factual.py", "max_stars_repo_name": "Kaysera/fuzzy-lore", "max_stars_repo_head_hexsha": "128131e0f41f480d509b63c5e75d0ce58f07bae4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-03-09T16:54:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T13:28:31.000Z", "max_issues_repo_path": "teacher/explanation/_factual.py", "max_issues_repo_name": "Kaysera/fuzzy-lore", "max_issues_repo_head_hexsha": "128131e0f41f480d509b63c5e75d0ce58f07bae4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-17T16:30:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T17:54:08.000Z", "max_forks_repo_path": "teacher/explanation/_factual.py", "max_forks_repo_name": "Kaysera/fuzzy-lore", "max_forks_repo_head_hexsha": "128131e0f41f480d509b63c5e75d0ce58f07bae4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.387283237, "max_line_length": 115, "alphanum_fraction": 0.6673390486, "include": true, "reason": "import numpy", "num_tokens": 1415}
|
clear
clc
close all
addpath('data')
addpath('src')
dataset = {'1_mECS', '2_Kolod', '3_Pollen', '4_Usoskin'}
for i = 1:4
% perform the analysis for the current dataset
load(['Test_' dataset{i}]);
C = max(true_labs); %%% number of clusters
rng(i,'twister'); %%% for reproducibility
[y, S, F, ydata,alpha] = SIMLR(in_X,C,10);
% report NMI values
NMI_i = Cal_NMI(y,true_labs);
fprintf(['The NMI value for dataset ' dataset{i} ' is %f\n'], NMI_i);
% visualization
figure;
gscatter(ydata(:,1),ydata(:,2),true_labs);
end
|
{"author": "BatzoglouLabSU", "repo": "SIMLR", "sha": "bf44967cd40d9d4c789ecf866b3aae15ae6190f5", "save_path": "github-repos/MATLAB/BatzoglouLabSU-SIMLR", "path": "github-repos/MATLAB/BatzoglouLabSU-SIMLR/SIMLR-bf44967cd40d9d4c789ecf866b3aae15ae6190f5/MATLAB/Matlab_main_demo_SIMLR.m"}
|
#!/usr/bin/env python
from __future__ import division
from builtins import range
from .._externals.srm import SRM
from .procrustes import procrustes
import numpy as np
from .format_data import format_data as formatter
from .._shared.helpers import memoize
import warnings
@memoize
def align(data, align='hyper', normalize=None, ndims=None, method=None,
format_data=True):
"""
Aligns a list of arrays
This function takes a list of high dimensional arrays and 'hyperaligns' them
to a 'common' space, or coordinate system following the approach outlined by
Haxby et al, 2011. Hyperalignment uses linear transformations (rotation,
reflection, translation, scaling) to register a group of arrays to a common
space. This can be useful when two or more datasets describe an identical
or similar system, but may not be in same coordinate system. For example,
consider the example of fMRI recordings (voxels by time) from the visual
cortex of a group of subjects watching the same movie: The brain responses
should be highly similar, but the coordinates may not be aligned.
Haxby JV, Guntupalli JS, Connolly AC, Halchenko YO, Conroy BR, Gobbini
MI, Hanke M, and Ramadge PJ (2011) A common, high-dimensional model of
the representational space in human ventral temporal cortex. Neuron 72,
404 -- 416. (used to implement hyperalignment, see https://github.com/PyMVPA/PyMVPA)
Brain Imaging Analysis Kit, http://brainiak.org. (used to implement Shared Response Model [SRM], see https://github.com/IntelPNI/brainiak)
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
A list of Numpy arrays or Pandas Dataframes
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
format_data : bool
Whether or not to first call the format_data function (default: True).
normalize : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
aligned : list
An aligned list of numpy arrays
"""
# if model is None, just return data
if align is None:
return data
elif isinstance(align, dict):
if align['model'] is None:
return data
else:
if method is not None:
warnings.warn('The method argument will be deprecated. Please use align. See the API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.align.html#hypertools.tools.align')
align = method
if align is True:
warnings.warn("Setting align=True will be deprecated. Please specify the \
type of alignment, i.e. align='hyper'. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.align.html#hypertools.tools.align")
align = 'hyper'
# common format
if format_data:
data = formatter(data, ppca=True)
if len(data) is 1:
warnings.warn('Data in list of length 1 can not be aligned. '
'Skipping the alignment.')
if data[0].shape[1] >= data[0].shape[0]:
warnings.warn('The number of features exceeds number of samples. This can lead \
to overfitting. We recommend reducing the dimensionality to be \
less than the number of samples prior to hyperalignment.')
if (align == 'hyper') or (method == 'hyper'):
##STEP 0: STANDARDIZE SIZE AND SHAPE##
sizes_0 = [x.shape[0] for x in data]
sizes_1 = [x.shape[1] for x in data]
#find the smallest number of rows
R = min(sizes_0)
C = max(sizes_1)
m = [np.empty((R,C), dtype=np.ndarray)] * len(data)
for idx,x in enumerate(data):
y = x[0:R,:]
missing = C - y.shape[1]
add = np.zeros((y.shape[0], missing))
y = np.append(y, add, axis=1)
m[idx]=y
##STEP 1: TEMPLATE##
for x in range(0, len(m)):
if x==0:
template = np.copy(m[x])
else:
next = procrustes(m[x], template / (x + 1))
template += next
template /= len(m)
##STEP 2: NEW COMMON TEMPLATE##
#align each subj to the template from STEP 1
template2 = np.zeros(template.shape)
for x in range(0, len(m)):
next = procrustes(m[x], template)
template2 += next
template2 /= len(m)
#STEP 3 (below): ALIGN TO NEW TEMPLATE
aligned = [np.zeros(template2.shape)] * len(m)
for x in range(0, len(m)):
next = procrustes(m[x], template2)
aligned[x] = next
return aligned
elif (align == 'SRM') or (method == 'SRM'):
data = [i.T for i in data]
srm = SRM(features=np.min([i.shape[0] for i in data]))
fit = srm.fit(data)
return [i.T for i in srm.transform(data)]
|
{"hexsha": "846a7423e82a5385e61f8007de3d707e81b644dd", "size": 5648, "ext": "py", "lang": "Python", "max_stars_repo_path": "hypertools/tools/align.py", "max_stars_repo_name": "mewbak/hypertools", "max_stars_repo_head_hexsha": "bc2947737be8bd5a6e2a3bdca84132f6fee8989c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1681, "max_stars_repo_stars_event_min_datetime": "2017-01-28T00:28:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T00:57:13.000Z", "max_issues_repo_path": "hypertools/tools/align.py", "max_issues_repo_name": "mewbak/hypertools", "max_issues_repo_head_hexsha": "bc2947737be8bd5a6e2a3bdca84132f6fee8989c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 170, "max_issues_repo_issues_event_min_datetime": "2017-01-27T22:59:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-12T03:47:46.000Z", "max_forks_repo_path": "hypertools/tools/align.py", "max_forks_repo_name": "mewbak/hypertools", "max_forks_repo_head_hexsha": "bc2947737be8bd5a6e2a3bdca84132f6fee8989c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 180, "max_forks_repo_forks_event_min_datetime": "2017-02-01T04:34:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T15:46:23.000Z", "avg_line_length": 39.7746478873, "max_line_length": 213, "alphanum_fraction": 0.6099504249, "include": true, "reason": "import numpy", "num_tokens": 1330}
|
###########################################################################################
## Nighttime light 1992-2018 in Mexican states' ##
## ##
## Code to clip worldwide files using Mexican states' poligons ##
## Harmonized luminosity rasters from https://www.nature.com/articles/s41597-020-0510-y ##
## ##
## Prepared by Eric Magar 27apr2021 ##
## emagar at itam dot mx ##
###########################################################################################
## # OJO: when using spTranform in script, use line below for google earth, or next line for OSM/google maps
#x.map <- spTransform(x.map, CRS("+proj=longlat +datum=WGS84"))
#x.map <- spTransform(x.map, osm()) # project to osm native Mercator
# to use osm backgrounds
library(rJava)
library(OpenStreetMap)
library(raster)
rm(list = ls())
edos <- c("ags", "bc", "bcs", "cam", "coa", "col", "cps", "cua", "df", "dgo", "gua", "gue", "hgo", "jal", "mex", "mic", "mor", "nay", "nl", "oax", "pue", "que", "qui", "san", "sin", "son", "tab", "tam", "tla", "ver", "yuc", "zac")
wd <- c("~/Dropbox/data/elecs/MXelsCalendGovt/redistrict/ife.ine/mapasComparados/loc/maps/0code/")
setwd(wd)
dd <- c("~/Dropbox/data/elecs/MXelsCalendGovt/elecReturns/")
# geospatial data
library(spdep);
library(maptools)
# used to determine what datum rojano data has
library(rgdal)
#gpclibPermit()
# read all state borders from rojano
ed.map <- list()
tmp <- "../../../fed/shp/disfed2018/ags" # archivo con mapas 2018
# tmp <- paste(md, "ags", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$ags <- tmp
#
tmp <- "../../../fed/shp/disfed2018/bc" # archivo con mapas 2018
# ## tmp <- paste(md, "bc", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$bc <- tmp
#
tmp <- "../../../fed/shp/disfed2018/bcs" # archivo con mapas 2018
# ## tmp <- paste(md, "bcs", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$bcs <- tmp
#
tmp <- "../../../fed/shp/disfed2018/cam" # archivo con mapas 2018
## # tmp <- paste(md, "cam", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$cam <- tmp
#
tmp <- "../../../fed/shp/disfed2018/coa" # archivo con mapas 2018
# ## tmp <- paste(md, "coa", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$coa <- tmp
#
tmp <- "../../../fed/shp/disfed2018/col" # archivo con mapas 2018
# ## tmp <- paste(md, "col", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$col <- tmp
#
tmp <- "../../../fed/shp/disfed2018/cps" # archivo con mapas 2018
# ## tmp <- paste(md, "cps", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$cps <- tmp
#
tmp <- "../../../fed/shp/disfed2018/cua" # archivo con mapas 2018
# ## tmp <- paste(md, "cua", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$cua <- tmp
#
tmp <- "../../../fed/shp/disfed2018/df" # archivo con mapas 2018
# ## tmp <- paste(md, "df", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$df <- tmp
#
tmp <- "../../../fed/shp/disfed2018/dgo" # archivo con mapas 2018
# ## tmp <- paste(md, "dgo", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$dgo <- tmp
#
tmp <- "../../../fed/shp/disfed2018/gua" # archivo con mapas 2018
# ## tmp <- paste(md, "gua", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$gua <- tmp
#
tmp <- "../../../fed/shp/disfed2018/gue" # archivo con mapas 2018
# ## tmp <- paste(md, "gue", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$gue <- tmp
#
tmp <- "../../../fed/shp/disfed2018/hgo" # archivo con mapas 2018
# ## tmp <- paste(md, "hgo", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$hgo <- tmp
#
tmp <- "../../../fed/shp/disfed2018/jal" # archivo con mapas 2018
# ## tmp <- paste(md, "jal", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$jal <- tmp
#
tmp <- "../../../fed/shp/disfed2018/mex" # archivo con mapas 2018
# ## tmp <- paste(md, "mex", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$mex <- tmp
#
tmp <- "../../../fed/shp/disfed2018/mic" # archivo con mapas 2018
# ## tmp <- paste(md, "mic", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$mic <- tmp
#
tmp <- "../../../fed/shp/disfed2018/mor" # archivo con mapas 2018
# ## tmp <- paste(md, "mor", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$mor <- tmp
#
tmp <- "../../../fed/shp/disfed2018/nay" # archivo con mapas 2018
# ## tmp <- paste(md, "nay", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$nay <- tmp
#
tmp <- "../../../fed/shp/disfed2018/nl" # archivo con mapas 2018
# ## tmp <- paste(md, "nl", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$nl <- tmp
#
tmp <- "../../../fed/shp/disfed2018/oax" # archivo con mapas 2018
## # tmp <- paste(md, "oax", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$oax <- tmp
#
tmp <- "../../../fed/shp/disfed2018/pue" # archivo con mapas 2018
# ## tmp <- paste(md, "pue", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$pue <- tmp
#
tmp <- "../../../fed/shp/disfed2018/que" # archivo con mapas 2018
# ## tmp <- paste(md, "que", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$que <- tmp
#
tmp <- "../../../fed/shp/disfed2018/qui" # archivo con mapas 2018
# ## tmp <- paste(md, "qui", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$qui <- tmp
#
tmp <- "../../../fed/shp/disfed2018/san" # archivo con mapas 2018
# ## tmp <- paste(md, "san", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$san <- tmp
#
tmp <- "../../../fed/shp/disfed2018/sin" # archivo con mapas 2018
# ## tmp <- paste(md, "sin", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$sin <- tmp
#
tmp <- "../../../fed/shp/disfed2018/son" # archivo con mapas 2018
# ## tmp <- paste(md, "son", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$son <- tmp
#
tmp <- "../../../fed/shp/disfed2018/tab" # archivo con mapas 2018
## # tmp <- paste(md, "tab", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$tab <- tmp
#
tmp <- "../../../fed/shp/disfed2018/tam" # archivo con mapas 2018
# ## tmp <- paste(md, "tam", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$tam <- tmp
## #
tmp <- "../../../fed/shp/disfed2018/tla" # archivo con mapas 2018
# ## tmp <- paste(md, "tla", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$tla <- tmp
#
tmp <- "../../../fed/shp/disfed2018/ver" # archivo con mapas 2018
## # tmp <- paste(md, "ver", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$ver <- tmp
#
tmp <- "../../../fed/shp/disfed2018/yuc" # archivo con mapas 2018
# ## tmp <- paste(md, "yuc", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$yuc <- tmp
#
tmp <- "../../../fed/shp/disfed2018/zac" # archivo con mapas 2018
# ## tmp <- paste(md, "zac", sep = "") # archivo con mapas rojano
tmp <- readOGR(dsn = tmp, layer = 'ENTIDAD')
# projects to a different datum with long and lat
tmp <- spTransform(tmp, osm())
ed.map$zac <- tmp
# open worldwide raster layer downloaded from source
setwd("~/Dropbox/data/mapas/luminosity/raster/") # archivo de luminosidad
#r <- raster("Harmonized_DN_NTL_2014_simVIIRS.tif") # filenames 2014-
r <- raster("Harmonized_DN_NTL_1998_calDMSP.tif") # filenames 1992-2013
xtnt <- extent(-130,-70,10,40) # select rectangle closer to mexico
r <- crop(r, xtnt)
# projects to a different datum with long and lat
r <- projectRaster(r, crs=osm()) # project to osm native Mercator
#plot(r)
#
# crop luminosity by state
l1998 <- list()
l1998$ags <- crop(r, ed.map$ags)
l1998$bc <- crop(r, ed.map$bc)
l1998$bcs <- crop(r, ed.map$bcs)
l1998$cam <- crop(r, ed.map$cam)
l1998$coa <- crop(r, ed.map$coa)
l1998$col <- crop(r, ed.map$col)
l1998$cps <- crop(r, ed.map$cps)
l1998$cua <- crop(r, ed.map$cua)
l1998$df <- crop(r, ed.map$df)
l1998$dgo <- crop(r, ed.map$dgo)
l1998$gua <- crop(r, ed.map$gua)
l1998$gue <- crop(r, ed.map$gue)
l1998$hgo <- crop(r, ed.map$hgo)
l1998$jal <- crop(r, ed.map$jal)
l1998$mex <- crop(r, ed.map$mex)
l1998$mic <- crop(r, ed.map$mic)
l1998$mor <- crop(r, ed.map$mor)
l1998$nay <- crop(r, ed.map$nay)
l1998$nl <- crop(r, ed.map$nl)
l1998$oax <- crop(r, ed.map$oax)
l1998$pue <- crop(r, ed.map$pue)
l1998$que <- crop(r, ed.map$que)
l1998$qui <- crop(r, ed.map$qui)
l1998$san <- crop(r, ed.map$san)
l1998$sin <- crop(r, ed.map$sin)
l1998$son <- crop(r, ed.map$son)
l1998$tab <- crop(r, ed.map$tab)
l1998$tam <- crop(r, ed.map$tam)
l1998$tla <- crop(r, ed.map$tla)
l1998$ver <- crop(r, ed.map$ver)
l1998$yuc <- crop(r, ed.map$yuc)
l1998$zac <- crop(r, ed.map$zac)
#
for (i in 1:32) writeRaster(l1998[[i]], filename = paste("~/Dropbox/data/mapas/luminosity/raster", edos[i], "l1998.tif", sep = "/"))
plot(l2012$bc)
plot(ed.map$bc, add = TRUE)
plot(ed.map$son, add = TRUE)
|
{"hexsha": "61b505738d32392550ad430c0e370d52f0cba06f", "size": 12433, "ext": "r", "lang": "R", "max_stars_repo_path": "code/clip-estados.r", "max_stars_repo_name": "emagar/luminosity", "max_stars_repo_head_hexsha": "db40e79902ed9bf6ad517317f296f0fb82fd01d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-01T21:26:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-01T21:26:10.000Z", "max_issues_repo_path": "code/clip-estados.r", "max_issues_repo_name": "emagar/luminosity", "max_issues_repo_head_hexsha": "db40e79902ed9bf6ad517317f296f0fb82fd01d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/clip-estados.r", "max_forks_repo_name": "emagar/luminosity", "max_forks_repo_head_hexsha": "db40e79902ed9bf6ad517317f296f0fb82fd01d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7220447284, "max_line_length": 230, "alphanum_fraction": 0.6136089439, "num_tokens": 3935}
|
module TestSweep
using TimeZoneLookup
using TimeZoneLookup: V
using Test
@testset "Points comparison" begin
@test V(1, 2) > V(1, 3)
@test V(1, 2) < V(2, 2)
end
end
|
{"hexsha": "db92a2c8fec4a09aa8d18a884b93b325c4e2fe77", "size": 175, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test03_sweep.jl", "max_stars_repo_name": "Arkoniak/TimeZoneLookup.jl", "max_stars_repo_head_hexsha": "68c18ef9f4875c7c836b7fdb70b4c13c00de6b7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test03_sweep.jl", "max_issues_repo_name": "Arkoniak/TimeZoneLookup.jl", "max_issues_repo_head_hexsha": "68c18ef9f4875c7c836b7fdb70b4c13c00de6b7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-13T21:22:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-13T21:23:09.000Z", "max_forks_repo_path": "test/test03_sweep.jl", "max_forks_repo_name": "Arkoniak/TimeZoneLookup.jl", "max_forks_repo_head_hexsha": "68c18ef9f4875c7c836b7fdb70b4c13c00de6b7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.4615384615, "max_line_length": 34, "alphanum_fraction": 0.6685714286, "num_tokens": 66}
|
struct RestrictedMeasure{F,M} <: AbstractMeasure
f::F
base::M
end
@inline function logdensity(d::RestrictedMeasure, x)
d.f(x) || return -Inf
return 0.0
end
function density(d::RestrictedMeasure, x)
d.f(x) || return 0.0
return 1.0
end
basemeasure(μ::RestrictedMeasure) = μ.base
|
{"hexsha": "5500405a8d7c52ab73cf1ff3fb0d0005642d6af2", "size": 304, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/combinators/restricted.jl", "max_stars_repo_name": "theogf/MeasureBase.jl", "max_stars_repo_head_hexsha": "917304dc4f13e8d353306d599ffc89b2f8b2acac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/combinators/restricted.jl", "max_issues_repo_name": "theogf/MeasureBase.jl", "max_issues_repo_head_hexsha": "917304dc4f13e8d353306d599ffc89b2f8b2acac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/combinators/restricted.jl", "max_forks_repo_name": "theogf/MeasureBase.jl", "max_forks_repo_head_hexsha": "917304dc4f13e8d353306d599ffc89b2f8b2acac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.8823529412, "max_line_length": 52, "alphanum_fraction": 0.6776315789, "num_tokens": 97}
|
import jax
import jax.numpy as jnp
import chex
from typing import Tuple
from ..strategy import Strategy
class SimAnneal(Strategy):
def __init__(self, num_dims: int, popsize: int):
"""Simulated Annealing (Rasdi Rere et al., 2015)
Reference: https://www.sciencedirect.com/science/article/pii/S1877050915035759
"""
super().__init__(num_dims, popsize)
self.strategy_name = "SimAnneal"
@property
def params_strategy(self) -> chex.ArrayTree:
"""Return default parameters of evolution strategy."""
return {
"init_min": 0.0,
"init_max": 0.0,
"temp_init": 1.0,
"temp_limit": 0.1,
"temp_decay": 0.999,
"boltzmann_const": 5.0,
"sigma_init": 0.05,
"sigma_limit": 0.001,
"sigma_decay": 0.999,
}
def initialize_strategy(
self, rng: chex.PRNGKey, params: chex.ArrayTree
) -> chex.ArrayTree:
"""`initialize` the evolution strategy."""
rng_init, rng_rep = jax.random.split(rng)
initialization = jax.random.uniform(
rng_init,
(self.num_dims,),
minval=params["init_min"],
maxval=params["init_max"],
)
state = {
"mean": initialization,
"sigma": params["sigma_init"],
"temp": params["temp_init"],
"replace_rng": jax.random.uniform(rng_rep, ()),
}
return state
def ask_strategy(
self, rng: chex.PRNGKey, state: chex.ArrayTree, params: chex.ArrayTree
) -> Tuple[chex.Array, chex.ArrayTree]:
"""`ask` for new proposed candidates to evaluate next."""
rng_init, rng_rep = jax.random.split(rng)
# Sampling of N(0, 1) noise
z = jax.random.normal(
rng_init,
(self.popsize, self.num_dims),
)
state["replace_rng"] = jax.random.uniform(rng_rep, ())
# print(state["best_member"].shape, (state["sigma"] * z).shape)
x = state["mean"] + state["sigma"] * z
return x, state
def tell_strategy(
self,
x: chex.Array,
fitness: chex.Array,
state: chex.ArrayTree,
params: chex.ArrayTree,
) -> chex.ArrayTree:
"""`tell` update to ES state."""
best_in_gen = jnp.argmin(fitness)
gen_fitness, gen_member = fitness[best_in_gen], x[best_in_gen]
improve_diff = state["best_fitness"] - gen_fitness
improved = improve_diff > 0
# Calculate temperature replacement constant (replace by best in gen)
metropolis = jnp.exp(
improve_diff / (state["temp"] * params["boltzmann_const"])
)
# Replace mean either if improvement or random metropolis acceptance
rand_replace = jnp.logical_or(
improved, state["replace_rng"] > metropolis
)
# Note: We replace by best member in generation (not completely random)
state["mean"] = jax.lax.select(rand_replace, gen_member, state["mean"])
# Update permutation standard deviation
state["sigma"] = jax.lax.select(
state["sigma"] > params["sigma_limit"],
state["sigma"] * params["sigma_decay"],
state["sigma"],
)
state["temp"] = jax.lax.select(
state["temp"] > params["temp_limit"],
state["temp"] * params["temp_decay"],
state["temp"],
)
return state
|
{"hexsha": "671c4abe24a91a76f60fb0c9435d564d2fb1a565", "size": 3496, "ext": "py", "lang": "Python", "max_stars_repo_path": "evosax/strategies/sim_anneal.py", "max_stars_repo_name": "RobertTLange/evosax", "max_stars_repo_head_hexsha": "2646be9053f08c068347cda27736c1399454eedd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 102, "max_stars_repo_stars_event_min_datetime": "2022-02-17T07:55:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:21:13.000Z", "max_issues_repo_path": "evosax/strategies/sim_anneal.py", "max_issues_repo_name": "RobertTLange/evosax", "max_issues_repo_head_hexsha": "2646be9053f08c068347cda27736c1399454eedd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2022-03-06T14:20:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T13:31:13.000Z", "max_forks_repo_path": "evosax/strategies/sim_anneal.py", "max_forks_repo_name": "RobertTLange/evosax", "max_forks_repo_head_hexsha": "2646be9053f08c068347cda27736c1399454eedd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2022-02-24T14:33:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T21:20:24.000Z", "avg_line_length": 33.9417475728, "max_line_length": 86, "alphanum_fraction": 0.5700800915, "include": true, "reason": "import jax", "num_tokens": 853}
|
#!/usr/bin/env python
import numpy as np
import rospy
import matplotlib.pyplot as plt
from sensor_msgs.msg import NavSatFix
f = plt.figure()
filter_points = np.empty((0, 2), float)
def callback1()
def callback(nav_sat_fix):
global filter_points
lat = nav_sat_fix.latitude
lon = nav_sat_fix.longitude
filter_points = np.append(filter_points, np.array([[lat, lon]]), axis=0)
if __name__ == '__main__':
rospy.init_node('imu_rotate', anonymous=False)
rospy.Subscriber("gps/filtered", NavSatFix, callback)
# rospy.Subscriber("gps/fix", NavSatFix, callback)
try:
rospy.spin()
plt.scatter(filter_points[:, 0], filter_points[:, 1])
plt.show()
except rospy.ROSInterruptException:
pass
|
{"hexsha": "0d50973596ea1a9208402b647dd88a9854aab568", "size": 751, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/plot_sat.py", "max_stars_repo_name": "HMellor/LIO-SAM", "max_stars_repo_head_hexsha": "bf08441fee7de68b5d3a0efe8f0ea2e4d70ca2e9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-27T23:41:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-27T23:41:02.000Z", "max_issues_repo_path": "src/plot_sat.py", "max_issues_repo_name": "HMellor/LIO-SAM", "max_issues_repo_head_hexsha": "bf08441fee7de68b5d3a0efe8f0ea2e4d70ca2e9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plot_sat.py", "max_forks_repo_name": "HMellor/LIO-SAM", "max_forks_repo_head_hexsha": "bf08441fee7de68b5d3a0efe8f0ea2e4d70ca2e9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-11T20:56:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-11T20:56:05.000Z", "avg_line_length": 24.2258064516, "max_line_length": 76, "alphanum_fraction": 0.689747004, "include": true, "reason": "import numpy", "num_tokens": 185}
|
# Set up and load data
# Includes
import sys
import os
import numpy as np
import json
import os
# Setup paths containing utility
curr_folder = os.getcwd()
sys.path.insert(0, os.path.join(curr_folder,'../app'))
# Load the data
from utils import load_SQuAD_train
arts = load_SQuAD_train()
|
{"hexsha": "d5b9050d95886e046bdb433fac150e234f745d31", "size": 290, "ext": "py", "lang": "Python", "max_stars_repo_path": "play/test.py", "max_stars_repo_name": "davestanley/animated-succotash", "max_stars_repo_head_hexsha": "174f08063c222ead153bf9db67c75e2843301912", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "play/test.py", "max_issues_repo_name": "davestanley/animated-succotash", "max_issues_repo_head_hexsha": "174f08063c222ead153bf9db67c75e2843301912", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "play/test.py", "max_forks_repo_name": "davestanley/animated-succotash", "max_forks_repo_head_hexsha": "174f08063c222ead153bf9db67c75e2843301912", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.0588235294, "max_line_length": 54, "alphanum_fraction": 0.7586206897, "include": true, "reason": "import numpy", "num_tokens": 76}
|
!************************************************************************
MODULE si3d_procedures
!************************************************************************
!
! Purpose: Procedures for the semi-implicit 3-D (si3d) hydrodynamic
! model.
!
!-------------------------------------------------------------------------
USE omp_lib
USE si3d_Types
USE si3d_ecomod
USE si3d_BoundaryConditions
USE si3d_Mixing
USE si3d_Utils
IMPLICIT NONE
SAVE
CONTAINS
!***********************************************************************
SUBROUTINE init
!***********************************************************************
!
! Purpose: To define initial conditions for the simulation .
!
!-----------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i, j, k, l, ios, &
kmx , kmy , kms, &
nwlsp, nwlup, nwlvp, js, c
REAL :: x, rhoz, salz, zeta0, amp, deltZ
! ... Initialize time step counter, time in seconds and hours from &
! start of simulations (these are global variables defined in
! si3d_types)
n = 0; its = 0; thrs = 0.0E0
!.....Define initial water surface elevations.....
SELECT CASE (testcase)
CASE(1)
PRINT *, '**** Free surface initilized for SW test case ****'
amp = 0.05;
DO i = 1, im1
x = REAL(i*idx) - 1.5*dx
zeta0 = amp*COS(pi*x/xl)
deltZ = zlevel(k1+1); ! Move initial free surface to first interface
DO j = 1, jm1
sp (ij2l(i,j)) = zeta0 - deltZ;
spp (ij2l(i,j)) = zeta0 - deltZ;
s (ij2l(i,j)) = zeta0 - deltZ;
END DO
END DO
CASE DEFAULT
s = zetainit;
sp = zetainit;
spp = zetainit;
END SELECT
! ... Define thickness of cells a s-,u- & v-points
hup = ZERO;
hvp = ZERO;
hp = ZERO;
k1z = km1;
k1u = km1;
k1v = km1;
DO l = 1, lm
! ... Map 3D-(i,j) from 2D-l indexes
i = l2i(l); j = l2j(l);
! ... At zeta-points
kms = kmz(l)
nwlsp = 0
DO k = k1, kms
hp (k,l)=AMIN1(zlevel(k+1),hhs(l)) - &
& AMAX1(zlevel( k),-sp(l))
IF(hp(k,l) > HMIN) THEN
nwlsp = nwlsp + 1;
IF(nwlsp==1) k1z(l) = k
ELSE
hp(k,l)=ZERO;
ENDIF
ENDDO
! Set zeta = hhs(i,j) for columns with mask2d = TRUE (i.e.
! potentially wett) but intitially dry (k1z = km1).
IF (k1z(l) == km1) THEN
s (l) = -hhs(l)+HMIN;
sp (l) = -hhs(l)+HMIN;
spp(l) = -hhs(l)+HMIN;
ENDIF
! ... At u-points
IF (mask2d(i+1,j)) THEN
kmx = MIN(kmz(l),kmz(lEC(l)))
nwlup = 0
DO k = k1, kmx
hup(k,l)=AMIN1(zlevel(k+1),hhu(l)) - &
& AMAX1(zlevel( k),-(sp(l)+sp(lEC(l)))/2.)
IF(hup(k,l) > HMIN) THEN
nwlup = nwlup + 1;
IF(nwlup==1) k1u(l) = k
ELSE
hup(k,l)=ZERO;
ENDIF
ENDDO
ENDIF
! ... At v-points
IF (mask2d(i,j+1)) THEN
kmy = MIN(kmz(l),kmz(lNC(l)))
nwlvp = 0
DO k = k1, kmy
hvp(k,l)=AMIN1(zlevel(k+1),hhv(l)) - &
& AMAX1(zlevel( k),-(sp(l)+sp(lNC(l)))/2.)
IF(hvp(k,l) > HMIN) THEN
nwlvp = nwlvp + 1;
IF(nwlvp==1) k1v(l) = k
ELSE
hvp(k,l)=ZERO;
ENDIF
ENDDO
ENDIF
ENDDO
hupp = hup; hu = hup;
hvpp = hvp; hv = hvp;
hpp = hp ; h = hp
contNG=0;
!.....Initialize 1-d arrays.....
uout = 0.0;
vout = 0.0;
wout = 0.0;
uhout = 0.0
Avout = 0.0;
Dvout = 0.0;
sal1 = 0.0;
ds = 0.0;
!.....Initialize solution arrays.....
eagx = 0.0;
eagy = 0.0;
earx = 0.0;
eary = 0.0
sx = 0.0;
sy = 0.0;
dd = 0.0;
qq = 0.0;
rr = 1.0;
!.....Initialize velocity arrays.....
u=u0; up=u0; upp=u0; uh=u0h0; uhp=u0h0; uhpp=u0h0
v=v0; vp=v0; vpp=v0; vh=v0h0; vhp=v0h0; vhpp=v0h0
wp=w0
! ... Initialize eddy coefficient arrays .....
Av=0.0;
Dv=0.0;
!.....Initialize arrays used in the soln of the matrix mom eq.....
ex=0.0; agx = 0.0; arx = 0.0; agy = 0.0; ary = 0.0
!.....Initialize 3D active scalar and density arrays.....
CALL InitializeScalarFields
! ... Inialize turbulence quanties for Turbulence Model
CALL InitializeTurbulenceModel
END SUBROUTINE init
!************************************************************************
SUBROUTINE InitializeScalarFields
!************************************************************************
!
! Purpose: Initialize scalar fields - The initial condition field
! is either read from an ASCII file (si3d_init.txt) or it
! is initialized using fields which will excite specific
! hydrodynamic responses in the lake (test case = 2).
! For test case 1 - the initial conditions are also hardcoded
! but I use uniform temperature for that cases.
!
!-------------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i, j, k, l, ios, imm1, jmm1, kmm1, ncols, ncols1, nc, &
nsets, ia, ib, nn, ntr1
REAL :: Vamp, rhoamp, Ts, Tb, & ! Used to initialize IW-problem
NBV, meandepth, length, &
rhohere, x, z, rhos, rhob
CHARACTER(LEN=18) :: initfmt
INTEGER, PARAMETER :: InitProc = 4
REAL, ALLOCATABLE, DIMENSION(:,:) :: Scalardepthile
SELECT CASE (initproc)
! ... OPTION 1 - Surface Seiche (use uniform temperatures) ------
CASE (1)
salp = 15.
sal = salp;
salpp = salp;
! ... Initialize Non-active scalar fields
IF (ntr > 0) THEN
DO nn = 1, ntr;
tracer(:,:,nn) = sal;
ENDDO
tracerpp = tracer;
ENDIF
! ... OPTION 2 - Internal Seiche (use analytical solution) ------
CASE (2)
Vamp = 0.10;
Ts = 25.00;
Tb = 15.00; ! Parameters used to define the solution
meandepth = zl; ! FLOAT(km-k1+1)*ddz
length = FLOAT(im-i1+1)*dx
rhos = 1028.*(1.-1.7E-4*(Ts-10.)); ! Surface density
rhob = 1028.*(1.-1.7E-4*(Tb-10.)); ! Bottom density
drho = rhos - rhob; ! Change in density from top to bottom
NBV=SQRT(-g/rhos*drho/meandepth); ! Brunt-Vaisala frequency
rhoamp=rhos*Vamp*NBV/g;
DO l = 1, lm
i = l2i(l); j = l2j(l);
x = FLOAT(i) * dx - 1.5 * dx
DO k = k1, km
z = zlevel(k+1) - 0.5 * hp(k,l); ! FLOAT(km-k1+1)*ddz
rhohere = rhos -z*drho/meandepth+rhoamp*COS(pi*x/length)*SIN(pi*z/meandepth);
salp(k,l)= 10.-((rhohere-1028.)/1028.)/1.7E-4;
ENDDO
END DO
sal = salp;
salpp = salp;
! ... Initialize Non-active scalar fields
IF (ntr > 0) THEN
DO nn = 1, ntr;
tracer(:,:,nn) = sal;
ENDDO
tracerpp = tracer;
ENDIF
PRINT *, '**** Scalar field initilized for IW test case ****'
! ... OPTION 3 - Use analytical solution in half a closed basin to test
! the nesting algorithms nesting. All variables defining the basin
! & the IW need to be the same in the fine & coarse grid -
! In the fine grid we only modify the length and x.
CASE (3)
Vamp = 0.10; Ts = 25.00; Tb = 15.0; ! Make sure these constants are as in CASE (1)
meandepth = zl; ! FLOAT(km-k1+1)*ddz
length = FLOAT(im-i1+1)*dx; length = length * 2.;
rhos = 1028.*(1.-1.7E-4*(Ts-10.)); ! Surface density
rhob = 1028.*(1.-1.7E-4*(Tb-10.)); ! Bottom density
drho = rhos - rhob; ! Change in density from top to bottom
NBV=SQRT(-g/rhos*drho/meandepth);
rhoamp=rhos*Vamp*NBV/g;
DO l = 1, lm
i = l2i(l); j = l2j(l);
x = FLOAT(i) * dx - 1.5 * dx; x = x + length/2.;
DO k = k1, km
z = zlevel(k+1) - 0.5 * hp(k,l); ! z = FLOAT(k) * ddz - 1.5 * ddz
rhohere = rhos -z*drho/meandepth+rhoamp*COS(pi*x/length)*SIN(pi*z/meandepth);
salp(k,l)= 10.-((rhohere-1028.)/1028.)/1.7E-4;
ENDDO
END DO
sal = salp;
salpp = salp;
! ... Initialize Non-active scalar fields
IF (ntr > 0) THEN
DO nn = 1, ntr;
tracer(:,:,nn) = sal;
ENDDO
tracerpp = tracer;
ENDIF
! ... All other options - Initialize from file ---------------------
CASE DEFAULT
!.....Open initial condition file.....
sal_ic_file = 'si3d_init.txt'
OPEN (UNIT=i4, FILE='si3d_init.txt', STATUS="OLD", FORM="FORMATTED", IOSTAT=ios)
IF(ios /= 0) CALL open_error ( "Error opening "//sal_ic_file, ios )
!.....Allocate space for local variables used to read IC ...
ALLOCATE ( Scalardepthile (km1, ntr+1), STAT = ios )
IF (ios /= 0) THEN; PRINT *, 'Error alloc. init. arrays'; STOP; ENDIF
! Skip over first five header records in open boundary condition file
READ (UNIT=i4, FMT='(/////)', IOSTAT=ios)
IF (ios /= 0) CALL input_error ( ios, 13 )
! Write the format of the data records into an internal file
WRITE (UNIT=initfmt, FMT='("(10X,",I3,"G11.2)")') ntr+1
! Read data array and store it in array Scalardepthile
print *,"km1:",km1
DO k = 1, km1
print *,"leo:",k
READ (UNIT=i4, FMT=initfmt, IOSTAT=ios) &
(Scalardepthile(k,nn), nn = 1, ntr+1)
IF (ios /= 0) CALL input_error ( ios, 14 )
END DO
! ... Initialize the active scalar field (allways)
salp = 0.0
DO k = 1, km1;
salp(k,:) = Scalardepthile(k,1)
END DO;
sal = salp;
salpp = salp;
! ... Initialize non-active scalar fields (if requested)
IF (ntr > 0) THEN
tracer = 0.0;
IF (ecomod < 0 ) THEN
CALL InitTracerCloud
ELSE
DO nn = 1, ntr
DO k = 1, km1
tracer(k,:,nn) = Scalardepthile(k,nn+1)
ENDDO
END DO ! ... End loop over tracers
END IF
tracerpp = tracer;
ENDIF
! ... Deallocate array holding scalar concs.
DEALLOCATE ( Scalardepthile )
! ... Close io file
CLOSE (i4)
END SELECT
! ... Initialize density field at time n-1 & n
DO l = 1, lm1; DO k = k1, km1;
rhop(k,l) = densty_s ( salp(k,l), t0 ) - 1000.
END DO; END DO
END SUBROUTINE InitializeScalarFields
!***********************************************************************
SUBROUTINE fd(n,t_exmom2,t_matmom2,t_matcon2,Bhaxpp,Bhaypp,Bth,Bth1,Bstart,Bend, &
& lSCH,lNCH,lWCH,lECH,Bex,Bth2,Beagx,Bearx,Bagx,Barx,Beagy,Beary,Bagy,Bary,Bsx,Bsy, &
& Bdd,Bqq,Brr,Bth3,Bth4,istep,lastiter, &
& ShearProduction,BuoyancyProduction, Dissipation, TKinE, &
& Qsw,Qn,Qlw,eta,Ta,Pa,RH,Cc,Qsw2dB,Qlw2dB,Ta2dB,RH2dB,Cc2dB,uair2dB,vair2dB, &
& uairB,vairB,cdwB,heatSourceB,QswFrB,iter,bclncxB,hupdrhoB,its,thrs)
!***********************************************************************
!
! Purpose: fd is the supervisory subroutine for the finite
! difference method. It advances the solution one time
! step. Vertical diffusion is treated implicitly.
!
!-----------------------------------------------------------------------
REAL, INTENT(INOUT) :: t_exmom2,t_matmom2,t_matcon2
REAL, INTENT(IN) :: thrs, its
INTEGER, INTENT(IN) :: Bstart,Bend,istep,n,lastiter,iter
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bhaxpp, Bhaypp, Bth, Bth1, Bex, heatSourceB,QswFrB
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bth2, Bagx, Barx,Bagy,Bary,Bth3,Bth4
REAL, DIMENSION (Bstart:Bend+1), INTENT(INOUT) :: Beagx, Bearx,Beagy,Beary,Bsx,Bsy
REAL, DIMENSION (Bstart:Bend+1), INTENT(INOUT) :: Bdd, Bqq,Brr,uairB,vairB,cdwB
INTEGER, DIMENSION (Bstart:Bend+1), INTENT(IN) :: lSCH,lNCH,lWCH,lECH
REAL, INTENT(INOUT) :: Qsw,Qn,Qlw,eta,Ta,Pa,RH,Cc
REAL(real_G1), INTENT(INOUT) :: ShearProduction,BuoyancyProduction, Dissipation, TKinE
REAL, DIMENSION (nmetstat), INTENT(INOUT) :: Qsw2dB,Qlw2dB,Ta2dB,RH2dB,Cc2dB,uair2dB,vair2dB
REAL,DIMENSION (1:km1), INTENT(INOUT) :: bclncxB,hupdrhoB
! ... Local variables
INTEGER :: itr,l,lol,liter
REAL, EXTERNAL :: TIMER
REAL :: tsbar,tebar
! ... Define tz used in all following routines ...............
tz = 1.0/istep
!print *,"hihihi:",omp_get_thread_num()
!$omp barrier
! ... Read in nested boundary conditions, if specified .......
CALL readbcNGB(thrs)
!$omp barrier
!print *,"hihihi2:",omp_get_thread_num()
!.....Assign new values of s or u/v along open boundaries.....
CALL openbcUVH(thrs)
!$omp barrier
!print *,"hihihi3:",omp_get_thread_num()
!.... Find magnitude of sources and sinks and their scalar loads
IF (iopss > 0) CALL PointSourceSinkSolve(n,istep,thrs)
!$omp barrier
! ... Assign boundary conditions at free surface .............
CALL surfbc(n,istep,thrs)
!$omp barrier
!print *,"hihihi4:",omp_get_thread_num()
!.....Assing horizonal eddy viscosity and diffusivity at n ...........
CALL UpdateHorizontalMixingCoefficients
!$omp barrier
!print *,"hihihi5:",omp_get_thread_num()
!.....Evaluate explicit terms in x-momentum eq................
CALL exmom(1)
!$omp barrier
!print *,"ex1:",sum(ex(:,:))
!print *,"hihihi6:",omp_get_thread_num()
!.....Solve a tridiagonal system at each horizontal node
! to obtain the matrices for the x-momentum equations.....
CALL matmom(1,t_matmom2,Bstart,Bend,Bex,Beagx,Bearx,Bagx,Barx,Beagy,Beary,Bagy,Bary,uairB,vairB,cdwB,bclncxB,hupdrhoB)
!$omp barrier
!print *,"hihihi7:",omp_get_thread_num()
!.....Evaluate explicit terms in y-momentum eq................
CALL exmom(2)
!$omp barrier
!print *,"hihihi8:",omp_get_thread_num()
!$omp barrier
!! if(omp_get_thread_num() .EQ. 0) THEN
!! print *,"ex2:",sum(ex(:,:))
!! end if
!eagy=0
!Beagy=0
!! DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
!! l = id_column(liter)
!! Bagy(:,l) = 0.0
!! END DO
!.....Solve a tridiagonal system at each horizontal node
! to obtain the matrices for the y-momentum equations.....
CALL matmom(2,t_matmom2,Bstart,Bend,Bex,Beagx,Bearx,Bagx,Barx,Beagy,Beary,Bagy,Bary,uairB,vairB,cdwB,bclncxB,hupdrhoB)
!print *,"hihihi9:",omp_get_thread_num()
!$omp barrier
CALL matcon(t_matcon2,Bstart,Bend,lWCH,lSCH,Beagx,Bearx,Beagy,Beary,Bsx,Bsy,Bdd,Bqq,Brr)
!print *,"hihihi10:",omp_get_thread_num()
!$omp barrier
!.....Solve implicit system of equations for zeta............
!CALL SolverBlock ! Original formulation writen by P.E. Smith
CALL SolverSparse(n,Bstart,Bend,lWCH,lSCH,Bsx,Bsy,Bqq,Brr,iter,istep,thrs) ! Formulation by F.J. Rueda
!print *,"hihihi11:",omp_get_thread_num()
!.....Reassign new values of s or u/v along open boundaries.....
CALL openbcUVH(thrs)
!
!! if(omp_get_thread_num() .EQ. 0) THEN
!! print *,"vhavel:",sum(vh(:,:))
!! vh(:,:)=0.0
!! end if
!! DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
!! l = id_column(liter)
!! ary(:,l)=Bary(:,l)
!! agy(:,l)=Bagy(:,l)
!! END DO
!$omp barrier
!! if(omp_get_thread_num() .EQ. 0) THEN
!! print *,"ary:",sum(ary(:,:))
!! print *,"agy:",sum(agy(:,:))
!! end if
!.....Solve for velocities explicitly. If DRYING occurs
! at this point, dry cells are removed and k1z/k1u/k1v
! recalculated (wetting is done after finishing the
! calculations for a given time step or iteration) ......
CALL vel(Bstart,Bend,Bagx,Barx,Bagy,Bary)
!print *,"hihihi12:",omp_get_thread_num()
!$omp barrier
!! if(omp_get_thread_num() .EQ. 0) THEN
!! print *,"vhdvel:",sum(vh(:,:))
!! end if
! ... Solve for active scalar transport
IF (isal /= 0) THEN
CALL exsal(Bstart,Bend,lSCH,lNCH,lECH,lWCH,Bhaxpp,Bhaypp,Bth3,Bth4,Bth2,Bex,thrs)
!$omp barrier
CALL imsal(Bstart,Bend,Bex,heatSourceB)
!$omp barrier
CALL openbcSCA(thrs)
END IF
!print *,"hihihi13:",omp_get_thread_num()
!.....Solve for non-active scalar transport
IF (ntr > 0 .AND. &
niter > 0 .AND. &
lastiter == 1 ) THEN
IF (ecomod < 0) THEN
CALL srcsnk00
ELSE IF (ecomod == 0) THEN
CALL srcsnk00
ELSE IF (ecomod == 1) THEN
CALL srcsnkWQ
ELSE IF (ecomod == 2) THEN
CALL srcsnkSZ
ELSE IF (ecomod == 3) THEN
CALL srcsnkSD
ENDIF
DO itr = 1, ntr
IF (ecomod < 0 .AND. ( trct0(itr) > n .OR. trctn(itr) < n ) ) CYCLE
CALL exTracer (itr,Bstart,Bend,Bhaxpp,Bhaypp,Bth3,Bth4,Bth2,lSCH,lNCH,lECH,lWCH,Bex,thrs)
CALL imTracer (itr,Bstart,Bend,Bex)
CALL openbctracer (itr,thrs)
ENDDO
ENDIF
!$omp barrier
!! if(omp_get_thread_num() .EQ. 0) THEN
!! print *,"vhavel2:",sum(vh(:,:))
!! end if
! ... Recalculate near surface velocity to account
! for WETTING & recalculate k1z, k1u & k1v ..............
CALL vel2
!$omp barrier
!! if(omp_get_thread_num() .EQ. 0) THEN
!! print *,"u:",sum(u(:,:))
!! print *,"v:",sum(v(:,:))
!! print *,"wp:",sum(wp(:,:))
!! print *,"vhp:",sum(vhp(:,:))
!! print *,"hvp:",sum(hvp(:,:))
!! print *,"vh:",sum(vh(:,:))
!! end if
!print *,"hihihi14:",omp_get_thread_num()
!.....Smooth solution on leapfrog step if ismooth>=1.........
IF (ismooth >= 1 .AND. istep == 1) CALL smooth
!.....Assing eddy viscosity and diffusivity at n+1 ...........
CALL UpdateMixingCoefficients(Bstart,Bend,istep,uairB,vairB,cdwB, &
& ShearProduction,BuoyancyProduction, Dissipation, TKinE)
!print *,"hihihi15:",omp_get_thread_num()
!$omp barrier
END SUBROUTINE fd
!***********************************************************************
SUBROUTINE exmom ( ieq )
!***********************************************************************
!
! Purpose: To evaluate the explicit terms (advection, Coriolis, and
! horizontal diffusion) in the momentum equations. The sum
! of these terms are saved in the 3-D array ex(i,j,k)
! which is the primary output from this subroutine. The
! horizontal advection terms can be evaluated by either upwind
! or centered differencing.
! (note: ex(i,j,k) is a temporary workspace array that is
! used for storing the explicit terms from both the x- and y-
! momentum eqs and is also used later for storing the explicit
! terms from the salinity eq in SUBR. exsal.)
!
! Dummy argument:
! ieq = Parameter indicating whether the explicit terms in the
! X or Y momentum equation are to be evaluated.
! (1=x-momentum, 2=y-momentum)
!
!-----------------------------------------------------------------------
!.....Argument.....
INTEGER, INTENT(IN) :: ieq
!.....Local variables.....
REAL :: twodt1
REAL :: corx, cory, advx, advy, hdx, hdy, uE, uW, vN, vS, wU, wD, &
scW, scE, scN, scS, scU, scD
INTEGER :: i, j, k, l, istat, kmx, kmy, k1x, k1y, k1ne,liter
INTEGER :: kmne, nwlayers, nn, is, ie, js, je,no
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
n_exmom = n_exmom + 1
!.....Constant.....
twodt1 = twodt*tz
!.....Choose to evaluate explicit terms for either the x- or y-mom eq.....
SELECT CASE (ieq)
! -----X-momentum equation-----
CASE (1)
!.....Calculate coefficient arrays haxpp&haypp for use
! in horizontal diffusion term & th1,th for use
! in vertical advection term in the x-momentum
haxpp(:,lm1) = 0.0;
haypp(:,lm1) = 0.0
! print *, "lEC127:",lEC(127),"maskij:",mask2d(l2i(127)+1,l2j(127))
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
!.....Compute layer number for the bottom wet u-pt.......
kmx = MIN(kmz(lEC(l)), kmz(l))
k1x = k1u(l)
if(k1x > 1) THEN
haypp(1:k1x-1,l) = 0.0;
haxpp(1:k1x-1,l) = 0.0;
th (1:k1x-1,l) = 0.0;
th1 (1:k1x-1,l) = 0.0;
end if
if(kmx < km1) THEN
haypp(kmx+1:km1,l) = 0.0;
haxpp(kmx+1:km1,l) = 0.0;
th (kmx+1:km1,l) = 0.0;
th1 (kmx+1:km1,l) = 0.0;
end if
! ... Map 3D-(i,j) from 2D-l indexes
i = l2i(l); j = l2j(l);
! ... Cycle if E-column is dry
IF ( .NOT. mask2d(i+1,j) ) THEN
haypp(k1x:kmx,l) = 0.0;
haxpp(k1x:kmx,l) = 0.0;
th (k1x:kmx,l) = 0.0;
th1 (k1x:kmx,l) = 0.0;
CYCLE
end if
! ... Horizontal diffusion ...........................
IF ( ihd == 1 ) THEN ! Constant
DO k = k1x,kmx
haxpp(k,l)=Ax0*MIN(hupp(k,lEC(l)),hupp(k,l))
haypp(k,l)=Ay0*MIN(hupp(k,lNC(l)),hupp(k,l))
ENDDO
ELSEIF ( ihd > 1) THEN ! Smagorinsky
DO k = k1x,kmx
haxpp(k,l)= kh(k,lEC(l))*MIN(hupp(k,lEC(l)),hupp(k,l))
haypp(k,l)=(kh(k,lEC( l ))+ &
kh(k, l )+ &
kh(k,lNC( l ))+ &
kh(k,lEC(lNC(l))) )* &
0.25 * MIN(hupp(k,lNC(l)),hupp(k,l))
ENDDO
ENDIF
!.....Calculate weighting arrays for vertical advection
DO k = k1x, kmx
th (k,l) = hup(k-1,l)/(hup(k-1,l)+hup(k,l))
th1(k,l) = 1.-th(k,l)
ENDDO
!.....Set th=th1 at the free surface & bottom
th (k1x ,l) = 0.0;
th1(k1x ,l) = 0.0;
th (kmx+1,l) = 0.5;
th1(kmx+1,l) = 0.5;
END DO
IF(omp_get_thread_num ( )>0)THEN
DO liter = lhiW(omp_get_thread_num ( )+1), lhfW(omp_get_thread_num ( )+1)
l = id_column(liter)
!.....Compute layer number for the bottom wet u-pt.......
kmx = MIN(kmz(lEC(l)), kmz(l))
k1x = k1u(l)
if(k1x > 1) THEN
haypp(1:k1x-1,l) = 0.0;
haxpp(1:k1x-1,l) = 0.0;
th (1:k1x-1,l) = 0.0;
th1 (1:k1x-1,l) = 0.0;
end if
if(kmx < km1) THEN
haypp(kmx+1:km1,l) = 0.0;
haxpp(kmx+1:km1,l) = 0.0;
th (kmx+1:km1,l) = 0.0;
th1 (kmx+1:km1,l) = 0.0;
end if
! ... Map 3D-(i,j) from 2D-l indexes
i = l2i(l); j = l2j(l);
! ... Cycle if E-column is dry
IF ( .NOT. mask2d(i+1,j) ) THEN
haypp(k1x:kmx,l) = 0.0;
haxpp(k1x:kmx,l) = 0.0;
th (k1x:kmx,l) = 0.0;
th1 (k1x:kmx,l) = 0.0;
CYCLE
end if
! ... Horizontal diffusion ...........................
IF ( ihd == 1 ) THEN ! Constant
DO k = k1x,kmx
haxpp(k,l)=Ax0*MIN(hupp(k,lEC(l)),hupp(k,l))
haypp(k,l)=Ay0*MIN(hupp(k,lNC(l)),hupp(k,l))
ENDDO
ELSEIF ( ihd > 1) THEN ! Smagorinsky
DO k = k1x,kmx
haxpp(k,l)= kh(k,lEC(l))*MIN(hupp(k,lEC(l)),hupp(k,l))
haypp(k,l)=(kh(k,lEC( l ))+ &
kh(k, l )+ &
kh(k,lNC( l ))+ &
kh(k,lEC(lNC(l))) )* &
0.25 * MIN(hupp(k,lNC(l)),hupp(k,l))
ENDDO
ENDIF
!.....Calculate weighting arrays for vertical advection
DO k = k1x+1, kmx
th (k,l) = hup(k-1,l)/(hup(k-1,l)+hup(k,l))
th1(k,l) = 1.-th(k,l)
ENDDO
!.....Set th=th1 at the free surface & bottom
th (k1x ,l) = 0.0;
th1(k1x ,l) = 0.0;
th (kmx+1,l) = 0.5;
th1(kmx+1,l) = 0.5;
l=lWC(l)
if(l .EQ. lm1) CYCLE
!.....Compute layer number for the bottom wet u-pt.......
kmx = MIN(kmz(lEC(l)), kmz(l))
k1x = k1u(l)
if(k1x > 1) THEN
haypp(1:k1x-1,l) = 0.0;
haxpp(1:k1x-1,l) = 0.0;
th (1:k1x-1,l) = 0.0;
th1 (1:k1x-1,l) = 0.0;
end if
if(kmx < km1) THEN
haypp(kmx+1:km1,l) = 0.0;
haxpp(kmx+1:km1,l) = 0.0;
th (kmx+1:km1,l) = 0.0;
th1 (kmx+1:km1,l) = 0.0;
end if
! ... Map 3D-(i,j) from 2D-l indexes
i = l2i(l); j = l2j(l);
IF ( .NOT. mask2d(i+1,j) .OR. .NOT. mask2d(i,j) ) THEN
haypp(k1x:kmx,l) = 0.0;
haxpp(k1x:kmx,l) = 0.0;
th (k1x:kmx,l) = 0.0;
th1 (k1x:kmx,l) = 0.0;
CYCLE
END IF
! ... Horizontal diffusion ...........................
IF ( ihd == 1 ) THEN ! Constant
DO k = k1x,kmx
haxpp(k,l)=Ax0*MIN(hupp(k,lEC(l)),hupp(k,l))
haypp(k,l)=Ay0*MIN(hupp(k,lNC(l)),hupp(k,l))
ENDDO
ELSEIF ( ihd > 1) THEN ! Smagorinsky
DO k = k1x,kmx
haxpp(k,l)= kh(k,lEC(l))*MIN(hupp(k,lEC(l)),hupp(k,l))
haypp(k,l)=(kh(k,lEC( l ))+ &
kh(k, l )+ &
kh(k,lNC( l ))+ &
kh(k,lEC(lNC(l))) )* &
0.25 * MIN(hupp(k,lNC(l)),hupp(k,l))
ENDDO
ENDIF
!.....Calculate weighting arrays for vertical advection
DO k = k1x+1, kmx
th (k,l) = hup(k-1,l)/(hup(k-1,l)+hup(k,l))
th1(k,l) = 1.-th(k,l)
ENDDO
!.....Set th=th1 at the free surface & bottom
th (k1x ,l) = 0.0;
th1(k1x ,l) = 0.0;
th (kmx+1,l) = 0.5;
th1(kmx+1,l) = 0.5;
end do
end if
!......Calulate the explicit terms by sweeping over interior u-pts.....
DO liter = lhiW(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
If(liter == 0) CYCLE
l = id_column(liter)
! Compute the layer number for the bottom wet u-pt
kmx = MIN(kmz(lEC(l)), kmz(l))
k1x = k1u(l)
if(k1x > 1) THEN
ex(1:k1x-1,l) = 0.0;
end if
if(kmx < km1) THEN
ex(kmx+1:km1,l) = 0.0;
end if
! ... Map 2D-l into 3D-(i,j) indexes
i = l2i(l); j = l2j(l);
! ... Cycle if E-column is dry
IF ( .NOT. mask2d(i+1,j) ) THEN
ex(k1x:kmx,l)=0.0
CYCLE
END IF
! Compute explicit term
DO k = k1x,kmx
! ... For u-layers connecting wett & dry cells neglect
! contribution from advective, coriolis & diffusion
IF ( hp(k,l) <= ZERO .OR. hp(k,lEC(l)) <= ZERO) THEN
ex(k,l) = uhpp(k,l)
CYCLE
ENDIF
!.....Coriolis.....
corx = 0.25 * f * (vhp(k, lEC(l) ) + vhp(k, l ) &
& +vhp(k, lSC(lEC(l))) + vhp(k,lSC(l)))
!.....Advection
uE = uhp(k, lEC(l) ) +uhp(k, l )
uW = uhp(k, l ) +uhp(k,lWC(l))
vN = vhp(k, lEC(l) ) +vhp(k, l )
vS = vhp(k,lSC(lEC(l))) +vhp(k,lSC(l))
wU = wp (k, lEC(l) ) +wp (k, l ); IF ( k == k1x ) wU = 0.0;
wD = wp (k+1, lEC(l) ) +wp (k+1 ,l ); IF ( k == kmx ) wD = 0.0;
SELECT CASE (itrmom)
CASE (1) ! Centered differences with th & th1
scE = up(k,lEC(l))+up(k, l )
scW = up(k,lWC(l))+up(k, l )
scN = up(k,lNC(l))+up(k, l )
scS = up(k,lSC(l))+up(k, l )
advx = (uE * scE - uW * scW ) / fourdx + &
(vN * scN - vS * scS ) / fourdy
advx=advx+(wU*(th (k ,l)* up(k ,l) + &
th1(k ,l)* up(k-1,l)) - &
wD*(th (k+1,l)* up(k+1,l) + &
th1(k+1,l)* up(k ,l)) ) / 2.
CASE (2) ! Upwinding all
advx = ( (uE+ABS(uE))* upp(k, l ) + &
(uE-ABS(uE))* upp(k,lEC(l)) - &
(uW+ABS(uW))* upp(k,lWC(l)) - &
(uW-ABS(uW))* upp(k, l ) ) / fourdx &
+( (vN+ABS(vN))* upp(k, l ) + &
(vN-ABS(vN))* upp(k,lNC(l)) - &
(vS+ABS(vS))* upp(k,lSC(l)) - &
(vS-ABS(vS))* upp(k, l ) ) / fourdy &
+( (wU+ABS(wU)) * upp(k ,l) + &
(wU-ABS(wU)) * upp(k-1,l)) / 4. &
-((wD+ABS(wD)) * upp(k+1,l) + &
(wD-ABS(wD)) * upp(k ,l)) / 4.
CASE (3) ! Centered differences - avoid computation of th1 and th
scE = up(k,lEC(l))+up(k, l )
scW = up(k,lWC(l))+up(k, l )
scN = up(k,lNC(l))+up(k, l )
scS = up(k,lSC(l))+up(k, l )
scU = (up(k ,l)*hup(k ,l)+ &
up(k-1,l)*hup(k-1,l))/ &
(hup(k ,l)+hup(k-1,l))
scD = (up(k ,l)*hup(k ,l)+ &
up(k+1,l)*hup(k+1,l))/ &
(hup(k ,l)+hup(k+1,l))
advx = (uE * scE - uW * scW ) / fourdx + &
(vN * scN - vS * scS ) / fourdy + &
(wU * scU - wD * scD ) / 2.
CASE (4) ! Upwinding for horizontal & centered for vertical
advx = ( (uE+ABS(uE))* upp(k, l ) + &
(uE-ABS(uE))* upp(k,lEC(l)) - &
(uW+ABS(uW))* upp(k,lWC(l)) - &
(uW-ABS(uW))* upp(k, l ) ) / fourdx &
+( (vN+ABS(vN))* upp(k, l ) + &
(vN-ABS(vN))* upp(k,lNC(l)) - &
(vS+ABS(vS))* upp(k,lSC(l)) - &
(vS-ABS(vS))* upp(k, l ) ) / fourdy
advx=advx+(wU*(th (k ,l)* up(k ,l) + &
th1(k ,l)* up(k-1,l)) - &
wD*(th (k+1,l)* up(k+1,l) + &
th1(k+1,l)* up(k ,l)) ) / 2.
END SELECT
!.....Horizontal diffusion.....
IF ( ihd == 0) THEN
hdx = 0.0E0
ELSEIF ( ihd == 1) THEN
hdx = (haypp(k, l )*(upp(k,lNC(l))-upp(k, l )) &
& - haypp(k,lSC(l))*(upp(k, l )-upp(k,lSC(l))))/dydy &
& + (haxpp(k, l )*(upp(k,lEC(l))-upp(k, l )) &
& - haxpp(k,lWC(l))*(upp(k, l )-upp(k,lWC(l))))/dxdx
ELSEIF ( ihd > 1) THEN
hdx = 2.* (haxpp(k, (l))*(upp(k,lEC( l ))-upp(k, l )) &
& - haxpp(k,lWC(l))*(upp(k, l )-upp(k,lWC(l))))/dxdx &
& + (haypp(k, l )*(upp(k,lNC( l ))-upp(k, l )) &
& - haypp(k,lSC(l))*(upp(k, l )-upp(k,lSC(l))))/dydy &
& + (haypp(k, l )*(vpp(k,lEC( l ))-vpp(k, l )) &
& - haypp(k,lSC(l))*(vpp(k,lEC(lSC(l)))-vpp(k,lSC(l))))/dxdy
ENDIF
! ... Adjust terms to account for boundary conditions - HardCoded for NortheDelta Study
!IF(( j>= jm1 - 20 ) .OR. &
! (i<=591 .AND. j<= 60 ) .OR. &
! (i<=20 ) .OR. &
! (i>=990 .AND. i<=1010 .AND. j>= 74 .AND. j <= 81 ) .OR. &
! ( j<= 20 ) .OR. &
! (i>=678 .AND. i<= 682 .AND. j>= 100 .AND. j <= 115 )) THEN
! corx = 0.0
! advx = 0.0
! hdx = 2.*hdx
!ENDIF
!IF ((i<=86 .AND. j>991 ) .OR. & !CINTIA (SUTTER SLOUGH)
! (i>=613 .AND. j<=27 ) .OR. & !CINTIA (SUTTER GEO)
! (i>679 .AND. j<=114 )) THEN !CINTIA (SUTTER DCC)
! !corx = 0.0
! !advx = 0.0
! hdx = 2.*hdx
!ENDIF
! ... Needed to keep simulations stable near the boundaries - TAHOE MAC
!IF(i >= 2 .AND. i <= 10) THEN
! hdx = 4.*hdx
! advx = 0.0
! corx = 0.
!ENDIF
!IF(j >= 2 .AND. j <= 13) THEN
! hdx = 4.*hdx
! advx = 0.0
! corx = 0.
!ENDIF
!IF(i >= 2 .AND. i <= 7) THEN
! hdx = 4.*hdx
! advx = 0.0
! corx = 0.
!ENDIF
!IF(i == 2) THEN
! hdx = 4.*hdx
! advx = 0.0
! corx = 0.
!ENDIF
!IF(j == 2) THEN
! hdx = 4.*hdx
! advx = 0.0
! corx = 0.
!ENDIF
!IF(j == 23) THEN !13
! hdx = 4.*hdx
! advx = 0.0
! corx = 0.
!ENDIF
!IF (i >= (im1 - 20) ) THEN; !BeznarCOLA
! hdx = 10.*hdx;
! corx = 0.0;
! advx = 0.0;
!end IF
!.....Final explicit term.....
ex(k,l) = uhpp(k,l) - twodt1*(advx*iadv-corx-hdx)
ENDDO
END DO
! ... Recalculate ex for near bdry. cells
IF (nopen > 0 ) CALL MODexmom4openBCX
! -----Y-momentum equation-----
CASE (2)
!.....Calculate coefficient arrays haxpp&haypp for use
! in horizontal diffusion term & th1,th for use
! in vertical advection term in the y-momentum
haxpp(:,lm1) = 0.0;
haypp(:,lm1) = 0.0
DO liter = lhiW(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
If(liter == 0) CYCLE
l = id_column(liter)
!.....Compute layer number for the bottom wet u-pt.......
kmy = MIN(kmz(lNC(l)), kmz(l))
k1y = k1v(l)
if(k1y > 1) THEN
haypp(1:k1y-1,l) = 0.0;
haxpp(1:k1y-1,l) = 0.0;
th (1:k1y-1,l) = 0.0;
th1 (1:k1y-1,l) = 0.0;
end if
if(kmy < km1) THEN
haypp(kmy+1:km1,l) = 0.0;
haxpp(kmy+1:km1,l) = 0.0;
th (kmy+1:km1,l) = 0.0;
th1 (kmy+1:km1,l) = 0.0;
end if
! ... Map 3D-(i,j) from 2D-l indexes
i = l2i(l); j = l2j(l);
! ... Cycle if E-column is dry
IF ( .NOT. mask2d(i,j+1) ) THEN
haypp(k1y:kmy,l) = 0.0;
haxpp(k1y:kmy,l) = 0.0;
th (k1y:kmy,l) = 0.0;
th1 (k1y:kmy,l) = 0.0;
CYCLE
end if
! ... Horizontal diffusion
IF ( ihd == 1 ) THEN ! Constant
DO k = k1y, kmy
haypp(k,l)=Ay0*MIN(hvpp(k,lNC(l)),hvpp(k,l))
haxpp(k,l)=Ax0*MIN(hvpp(k,lEC(l)),hvpp(k,l))
ENDDO
ELSEIF ( ihd > 1) THEN ! Smagorinsky
DO k = k1y, kmy
haypp(k,l)=kh(k,lNC(l))*MIN(hvpp(k,lNC(l)),hvpp(k,l))
haxpp(k,l)=(kh(k,lEC( l )) + &
kh(k, l ) + &
kh(k,lNC( l )) + &
kh(k,lEC(lNC(l))))* &
0.25 *MIN(hvpp(k,lEC(l)),hvpp(k,l))
ENDDO
ENDIF
!.....Calculate weighting arrays for vertical advection
DO k = k1y, kmy
th (k,l) = hvp(k-1,l)/(hvp(k-1,l)+hvp(k,l))
th1(k,l) = 1.-th(k,l)
ENDDO
!.....Set th=th1 at the free surface & bottom
th (k1y ,l) = 0.0;
th1(k1y ,l) = 0.0;
th (kmy+1,l) = 0.5;
th1(kmy+1,l) = 0.5;
END DO
!......Calulate the explicit terms by sweeping over interior v-pts.....
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
ex(:,l) = 0.0
! ... Map 2D-l into 3D-(i,j) indexes
i = l2i(l); j = l2j(l);
! ... Cycle if N-column is dry
IF ( .NOT. mask2d(i,j+1) ) CYCLE
! Compute the layer number for top & bottom wet v-pt
kmy = MIN(kmz(lNC(l)), kmz(l))
k1y = k1v(l)
! Compute explicit term
DO k = k1y,kmy
! ... For v-layers connecting wett & dry cells neglect
! contribution from advective, coriolis & diffusion
IF ( hp(k,l) <= ZERO .OR. hp(k,lNC(l)) <= ZERO) THEN
ex(k,l) = vhpp(k,l)
CYCLE
ENDIF
!.....Coriolis.....
cory = 0.25 * f * (uhp(k, lNC(l) ) + uhp(k, l ) &
& +uhp(k, lWC(lNC(l))) + uhp(k,lWC(l)))
!.....Advection
uE = uhp(k, lNC(l) ) +uhp(k, l );
uW = uhp(k,lWC(lNC(l))) +uhp(k,lWC(l));
vN = vhp(k, lNC(l) ) +vhp(k, l );
vS = vhp(k, l ) +vhp(k,lSC(l));
wU = wp (k, lNC(l) ) +wp (k, l ); IF ( k == k1y ) wU = 0.0;
wD = wp (k+1, lNC(l) ) +wp (k+1 ,l ); IF ( k == kmy ) wD = 0.0;
SELECT CASE ( itrmom)
CASE (1) ! Centered differences using th & th1 factors
scE = vp(k,lEC(l)) + vp(k, l )
scW = vp(k,lWC(l)) + vp(k, l )
scN = vp(k,lNC(l)) + vp(k, l )
scS = vp(k,lSC(l)) + vp(k, l )
advy = (uE * scE - uW * scW ) / fourdx + &
(vN * scN - vS * scS ) / fourdy
advy = advy + &
(wU*(th (k ,l)* vp(k ,l) + &
th1(k ,l)* vp(k-1,l)) - &
wD*(th (k+1,l)* vp(k+1,l) + &
th1(k+1,l)* vp(k ,l)) ) / 2.
CASE(2) ! Upwinding
advy = ( (uE+ABS(uE)) * vpp(k, l ) + &
& (uE-ABS(uE)) * vpp(k,lEC(l)) - &
& (uW+ABS(uW)) * vpp(k,lWC(l)) - &
& (uW-ABS(uW)) * vpp(k, l ) ) / fourdx &
& +( (vN+ABS(vN)) * vpp(k, l ) + &
& (vN-ABS(vN)) * vpp(k,lNC(l)) - &
& (vS+ABS(vS)) * vpp(k,lSC(l)) - &
& (vS-ABS(vS)) * vpp(k, l ) ) / fourdy &
& +( (wU+ABS(wU)) * vpp(k ,l) + &
(wU-ABS(wU)) * vpp(k-1,l)) / 4. &
-( (wD+ABS(wD)) * vpp(k+1,l) + &
(wD-ABS(wD)) * vpp(k ,l)) / 4.
CASE (3) ! Centered differences - avoid computation of th1 and th
scE = vp(k,lEC(l)) + vp(k, l )
scW = vp(k,lWC(l)) + vp(k, l )
scN = vp(k,lNC(l)) + vp(k, l )
scS = vp(k,lSC(l)) + vp(k, l )
scU = (vp(k ,l)*hvp(k ,l)+ &
vp(k-1,l)*hvp(k-1,l))/ &
(hvp(k ,l)+hvp(k-1,l))
scD = (vp(k ,l)*hvp(k ,l)+ &
vp(k+1,l)*hvp(k+1,l))/ &
(hvp(k ,l)+hvp(k+1,l))
advy = (uE * scE - uW * scW ) / fourdx + &
(vN * scN - vS * scS ) / fourdy + &
(wU * scU - wD * scD ) / 2.
CASE(4) ! Upwinding only for horizontal advection
advy = ( (uE+ABS(uE))* vpp(k, l ) + &
& (uE-ABS(uE))* vpp(k,lEC(l)) - &
& (uW+ABS(uW))* vpp(k,lWC(l)) - &
& (uW-ABS(uW))* vpp(k, l ) ) / fourdx &
& +( (vN+ABS(vN))* vpp(k, l ) + &
& (vN-ABS(vN))* vpp(k,lNC(l)) - &
& (vS+ABS(vS))* vpp(k,lSC(l)) - &
& (vS-ABS(vS))* vpp(k, l ) ) / fourdy
advy = advy + &
(wU*(th (k ,l)* vp(k ,l) + &
th1(k ,l)* vp(k-1,l)) - &
wD*(th (k+1,l)* vp(k+1,l) + &
th1(k+1,l)* vp(k ,l)) ) / 2.
END SELECT
!.....Horizontal diffusion.....
IF ( ihd == 0) THEN
hdy = 0.0E0
ELSEIF ( ihd == 1) THEN
hdy = (haypp(k, l )*(vpp(k,lNC(l))-vpp(k, l )) &
& - haypp(k,lSC(l))*(vpp(k, l )-vpp(k,lSC(l))))/dydy &
& + (haxpp(k, l )*(vpp(k,lEC(l))-vpp(k, l )) &
& - haxpp(k,lWC(l))*(vpp(k, l )-vpp(k,lWC(l))))/dxdx
ELSEIF ( ihd > 1) THEN
hdy = 2.* (haypp(k, (l))*(vpp(k,lNC( l ))-vpp(k, l )) &
& - haypp(k,lSC(l))*(vpp(k, l )-vpp(k,lSC(l))))/dydy &
& + (haxpp(k, l )*(vpp(k,lEC(l ))-vpp(k, l )) &
& - haxpp(k,lWC(l))*(vpp(k, l )-vpp(k,lWC(l))))/dxdx &
& + (haxpp(k, l )*(upp(k,lNC( l ))-upp(k, l )) &
& - haxpp(k,lWC(l))*(upp(k,lNC(lWC(l)))-upp(k,lWC(l))))/dxdy
ENDIF
! ... Adjust terms to account for boundary conditions - HardCoded for NorthDelta Study
!IF(( j>= jm1 - 20 ) .OR. &
! (i<=591 .AND. j<= 60 ) .OR. &
! (i<=20 ) .OR. &
! (i>=990 .AND. i<=1010 .AND. j>= 74 .AND. j <= 81 ) .OR. &
! ( j<= 20 ) .OR. &
! (i>=678 .AND. i<= 682 .AND. j>= 100 .AND. j <= 115 )) THEN
! cory = 0.0
! advy = 0.0
! hdy = 2.*hdy
!ENDIF
!IF ((i<=86 .AND. j>991 ) .OR. & !CINTIA (SUTTER SLOUGH)
! (i>=613 .AND. j<=27 ) .OR. & !CINTIA (SUTTER GEO)
! (i>679 .AND. j<=114 )) THEN !CINTIA (SUTTER DCC)
! !cory = 0.0
! !advy = 0.0
! hdy = 2.*hdy
!ENDIF
! ... Needed to keep simulations stable near the boundaries - Cayuga
!IF( i >= im1 - 20) THEN;
! hdy = 4.*hdy
! advy = 0.0
! cory = 0.
!ENDIF
!IF(j >= 297 .AND. j <= 301) THEN
! hdy = 4.*hdy
! advy = 0.0
! cory = 0.
!ENDIF
!IF(j >= 2 .AND. j <= 6) THEN
! hdy = 4.*hdy
! advy = 0.0
! cory = 0.
!ENDIF
!IF(i >= 2 .AND. i <= 6) THEN
! hdy = 4.*hdy
! advy = 0.0
! cory = 0.
!ENDIF
!IF(j == 23) THEN !13
! hdy = 4.*hdy
! advy = 0.0
! cory = 0.
!ENDIF
!IF(j == 2) THEN
! hdy = 4.*hdy
! advy = 0.0
! cory = 0.
!ENDIF
!IF(i == 2) THEN
! hdy = 4.*hdy
! advy = 0.0
! cory = 0.
!ENDIF
! ... Needed to keep simulations stable near the boundaries - Beznar Cola
!IF( i >= im1 - 20) THEN;
! hdy = 10.*hdy
! advy = 0.0
! cory = 0.0
!ENDIF
!.....Final explicit term.....
ex(k,l) = vhpp(k,l) - twodt1*(advy*iadv+cory-hdy)
END DO
END DO
! ... Recalculate ex for near bdry. cells
IF (nopen > 0 ) CALL MODexmom4openBCY
END SELECT
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_exmom = t_exmom + (etime - btime)
END SUBROUTINE exmom
!***********************************************************************
SUBROUTINE matmom ( ieq, t_matmom2,Bstart, Bend, Bex,Beagx,Bearx,Bagx,Barx,Beagy,Beary,Bagy,Bary,uairB,vairB,cdwB,bclncxB,hupdrhoB )
!***********************************************************************
!
! Purpose: To define the matrices for the momentum equations.
!
! Algorithm: The x-momentum equations at each vertical array of
! u-pts are first expressed in the compact matrix form
!
! [aa] [uh] = [gg] - g*dt/dx*rho*(s(i+1,j)-s(i,j))*[hh]
!
! by defining the three matrices [hh], [gg], and [aa].
! (Because the [aa] matrix is tridiagonal, only the
! diagonals are stored.) The above system of equations is
! then rearranged into the form
!
! [uh] = [ag] - g*dt/dx*rho*(s(i+1,j)-s(i,j))*[ar]
!
! by indirect solution using the tridiagonal solver trid.
! The matrices [ag] and [ar] are the output from the trid
! subroutine along with their summation over the depth at
! each horizontal node point, [eag] and [ear]. The matrices
! for the x-momentum eq are stored in the fortran arrays
! agx, arx, eagx, and earx. Everything is similar for
! the y-momentum eq.
!
! Dummy argument:
! ieq = Parameter indicating whether the matrices for the
! x or y momentum equation are to be evaluated.
! (1=x-momentum, 2=y-momentum)
!
! 23/04/2008 F.J. Rueda Recompute baroclinic term at bottom
! 23/04/2008 F.J. Rueda Do not set to zero baroclinic term at
! wett/dry cells near the surface
!-----------------------------------------------------------------------
!.....Argument.....
INTEGER, INTENT(IN) :: ieq, Bstart, Bend
REAL, INTENT(INOUT) :: t_matmom2
REAL, DIMENSION(1:km1), INTENT(INOUT) :: bclncxB,hupdrhoB
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bex,Bagx,Barx,Bagy,Bary
REAL, DIMENSION (Bstart:Bend+1), INTENT(INOUT) :: Beagx,Bearx,Beagy,Beary,uairB,vairB,cdwB
!.....Local variables.....
INTEGER :: i, j, k, l, kmx, kmy, k1x, k1y, nwlayers, inn, liter, innH
REAL :: twodt1, wsx0, wsy0, tausx, taubx, tausy, tauby, &
hpmin, usurf, vsurf,Uhvalue, Usource, Vsource, &
Vhvalue, cwx, cwy,lol
REAL :: aaux,aaux2,aaux3
REAL, DIMENSION(km1) :: vdiffx, vdiffy, &
& deltaz, Avxdudz, Avydvdz, Avx, Avy, &
& rhopx, rhopy, gg, hh, ar, ag
REAL,DIMENSION(km1) :: hvpdrho,bclncy,bclncx,hupdrho
REAL, DIMENSION(3,km1) :: aa
REAL, PARAMETER :: rhop0 = 1000.
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
!print *,"mattt:",omp_get_thread_num()
!.....Constants.....
twodt1 = twodt*tz;
SELECT CASE ( ieq )
! -----X-momentum equation-----
CASE (1)
!.....Loop over interior u-pts .....
DO liter = lhiWCE(omp_get_thread_num ( )+1), lhfCE(omp_get_thread_num ( )+1)
If(liter == 0) CYCLE
l = id_columnCE(liter)
! ... Map 2D-l into 3D-(i,j) indexes - FJR uncomment - needed for pss - CHECK mario!!!!
i = l2i(l);
j = l2j(l);
! ... Skip for dry u-pts
!IF (.NOT.mask(c2l(lEC(l)))) THEN
!Beagx(l) = 0.0;
!Bearx(l) = 0.0;
!CYCLE
!END IF
! ... Compute the layer number for top & bottom wet u-pt
kmx = MIN(kmz(lEC(l)), kmz(l))
k1x = k1u(l)
nwlayers = (kmx-k1x) + 1
IF(nwlayers < 1) CYCLE
! ... Compute eddy viscosity at interfaces vertically between u-pts
Avx = 0.0
DO k = k1x+1,kmx
Avx(k) = 0.5 * (Av(k,lEC(l))+Av(k,l))
ENDDO
! ... Define average layer density at u-pt (in kg/m**3) ...........
rhopx(k1x:kmx) = 1000. ! Neglect vertical density variations
! ... Compute explicit portion of water surface slope term ........
wsx0 = rhopx(k1x) * gdtdx * (spp(lEC(l)) - spp(l))
! ... Compute baroclinic term .....................................
SELECT CASE (ibc)
CASE (0) ! No baroclinic term
bclncx(k1x:kmx) = 0.0
CASE (1:)
DO k = k1x, kmx
hupdrho(k) = gdtdx*hup(k,l)*(rhop(k,lEC(l))-rhop(k,l))
!IF(hp(k,l) <= ZERO .OR. hp(k,lEC(l)) <= ZERO) hupdrho(k) = 0.0
ENDDO
bclncx(k1x) = hupdrho(k1x)
IF (kmx > k1x) THEN ! Two or more wet layers
! Recompute bottom layer baroclinic term along horizontal plane
CALL bclnc_km (l, kmx, 1, hupdrho(kmx) )
DO k = k1x+1, kmx
aaux = hupdrho(k-1) + hupdrho(k)
aaux2 = bclncx(k-1) + aaux
bclncx(k) = aaux2
END DO
END IF
END SELECT
! ... Compute explicit portion of vertical diffusion term ........
SELECT CASE (nwlayers)
CASE (1) ! Single wet layer
vdiffx(k1x) = 0.0
CASE (2:) ! Two or more wet layers (hupp->hup)
DO k = k1x+1,kmx
deltaz(k) = hup(k-1,l) + hup(k,l)
Avxdudz(k)= Avx(k)*(upp(k-1,l)-upp(k,l))/deltaz(k)
ENDDO
Avxdudz(k1x) = 0.0 ! Set value at free surface to zero
Avxdudz(kmx+1) = 0.0 ! Set value at bottom boundary to zero
vdiffx(k1x:kmx) = twodt*(Avxdudz(k1x:kmx) - Avxdudz(k1x+1:kmx+1)) &
*2.*(1.-theta) ! theta accounts for semi-implicitness
END SELECT
!.....Form [hh] matrix...........................................
hh (k1x:kmx) = hup(k1x:kmx,l)/rhopx(k1x:kmx)
!.....Form [gg] matrix............................................
gg(k1x:kmx) = ex(k1x:kmx,l) &
-hh(k1x:kmx ) * (bclncx(k1x:kmx)+wsx0) * tz &
+vdiffx(k1x:kmx) * tz
!.....Form [aa] matrix............................................
SELECT CASE (nwlayers)
CASE (1) ! Single wet layer
aa(2,k1x) = 1.0
CASE (2:) ! Two or more wet layers (hu->hup)
! Define upper diagonal terms
aa(3,k1x:kmx-1)= -twodt1*Avx(k1x+1:kmx)/(hup(k1x+1:kmx,l)* &
(hup(k1x:kmx-1,l)+hup(k1x+1:kmx,l)))*2.*theta
aa(3,kmx) = 0.0
! Define lower diagonal terms
aa(1,k1x+1:kmx)= -twodt1*Avx(k1x+1:kmx)/(hup(k1x:kmx-1,l)* &
(hup(k1x:kmx-1,l)+hup(k1x+1:kmx,l)))*2.*theta
aa(1,k1x) = 0.0
! Define center diagonal terms
aa(2,k1x:kmx) = 1.0 &
-(hup(k1x-1:kmx-1,l)/hup(k1x:kmx,l))*aa(1,k1x:kmx) &
-(hup(k1x+1:kmx+1,l)/hup(k1x:kmx,l))*aa(3,k1x:kmx)
END SELECT
! ... Top boundary conditions......................................
! a. Form wind stress term
usurf = up(k1x,l)
vsurf =(vp(k1v(l ), l ) + &
vp(k1v(lSC(l)), lSC(l) ) + &
vp(k1v(lEC(l)), lEC(l) ) + &
vp(k1v(lSC(lEC(l))),lSC(lEC(l))))/4.
cwx = cdw(l)*rhoair*SQRT((vair(l)-vsurf)**2.+ &
(uair(l)-usurf)**2.)
! b. Modify [gg] matrix
tausx = cwx/rhopx(k1x)*uair(l)
gg( k1x) = gg( k1x) + tausx*twodt1
! c. Modify [aa] matrix
tausx = cwx/rhopx(k1x)/hup(k1x,l)
aa(2,k1x) = aa(2,k1x) + tausx*twodt1
! ... Bottom boundary conditions...................................
! a. Form bottom stress term
taubx = cd*SQRT((uhpp(kmx, l)*uhpp(kmx, l)) + &
& ((0.25*(vhpp(kmx,lEC(l))+vhpp(kmx, l) &
& +vhpp(kmx,lSC(l))+vhpp(kmx,lSC(lEC(l)))))**2)) &
& /(hup(kmx,l)*hup(kmx,l))
! b. Modify [aa] matrix
aa(2,kmx) = aa(2,kmx) + taubx*twodt1
! .... Point sources and sinks ....................................
IF ( iopssH(omp_get_thread_num ( )+1) > 0) THEN
DO innH = 1, iopssH(omp_get_thread_num ( )+1)
inn = ioph2iop(innH,omp_get_thread_num ( )+1)
IF (i == ipss(inn) .AND. &
j == jpss(inn) ) THEN
DO k = k1x,kmx
IF (ABS(Qpss(k,inn))<1.E-10) CYCLE
! ... Strength of Source - here it is assumed that
! only half of the flow shows up in the control volume
! used in the momentum equation -> factor 2 below
Usource = ABS(Qpss(k,inn))/(dx*dy*hup(k,l))*twodt1/2.
IF(ptype(iodev(inn)) == -2) Usource = 1.E2
! ... Velocity of the source in E direction (positive
! towards east if a source; negative or towards west if
! a sink) - idetr = 1 by default;
Uhvalue = (Qpss(k,inn)*uEpss(iodev(inn))/dy)*idetr(iodev(inn)) !mod ACC oct11
aa(2,k) = aa(2,k) + Usource
gg( k) = gg( k) + Usource * Uhvalue
ENDDO
ENDIF
IF (i == ipss(inn)-1 .AND. &
j == jpss(inn) ) THEN
DO k = k1x,kmx
IF (ABS(Qpss(k,inn))<1.E-10) CYCLE
! ... Strength of Source - here it is assumed that
! only half of the flow shows up in the control volume
! used in the momentum equation -> factor 2 below
Usource = ABS(Qpss(k,inn))/(dx*dy*hup(k,l))*twodt1/2.
IF(ptype(iodev(inn)) == -2) Usource = 1.E2
! ... Velocity of the source in N direction (positive
! towards north if a source; negative or towards south if
! a sink) - idetr = 1 by default;
Uhvalue = -(Qpss(k,inn)*uWpss(iodev(inn))/dy)*idetr(iodev(inn)) !mod ACC oct11
aa(2,k) = aa(2,k) + Usource
gg( k) = gg( k) + Usource * Uhvalue
ENDDO
ENDIF
ENDDO
ENDIF
!.....Solve tridiagonal system for [ag] and [ar] arrays........
SELECT CASE (nwlayers)
CASE (1) ! Single wet layer
ag(k1x) = gg(k1x)/aa(2,k1x)
ar(k1x) = hh(k1x)/aa(2,k1x)
CASE (2:) ! Two or more wet layers
CALL trid ( aa, gg, hh, ag, ar, k1x, kmx, kmx+1, nwlayers )
END SELECT
!.....Save [ag] and [ar] arrays and sum them over
! depth for use in solution of continuity equation.............
Bagx(k1x:kmx,l) = ag(k1x:kmx)
Barx(k1x:kmx,l) = ar(k1x:kmx)
Beagx(l) = SUM(ag(k1x:kmx))
Bearx(l) = SUM(ar(k1x:kmx))
!.....End loop over u-pts.....
END DO
! -----Y-momentum equation-----
CASE (2)
!.....Loop over interior v-pts
!print *,"mattt2:",omp_get_thread_num()
DO liter = lhiCN(omp_get_thread_num ( )+1), lhfCN(omp_get_thread_num ( )+1)
l = id_columnCN(liter)
! ... Map 2D-l into 3D-(i,j) indexes - uncomment - needed
i = l2i(l);
j = l2j(l);
!print *,"mcol:",l,":",omp_get_thread_num()
! ... Skip if North Column is dry
!IF (.NOT. mask2d(i,j+1)) THEN
!Beagy(l) = 0.0;
!Beary(l) = 0.0;
!CYCLE
!END IF
! ... Compute layer numbers of wet v-pt
kmy = MIN(kmz(lNC(l)), kmz(l))
k1y = k1v(l)
nwlayers = (kmy-k1y) + 1
IF(nwlayers < 1) CYCLE
! .... Compute eddy viscosity at interfaces between v-pts)
Avy = 0.0
DO k = k1y, kmy
Avy(k) = 0.5*(Av(k,lNC(l))+Av(k,l))
ENDDO
! .... Define average layer density at v-pts (in kg/m**3) .........
rhopy(k1y:kmy) = 1000. ! Neglect vertical density variations
!.....Compute explicit part of water surface slope term ...........
wsy0 = rhopy(k1y) * gdtdy *(spp(lNC(l)) - spp(l))
!.... Compute baroclinic term .....................................
SELECT CASE (ibc)
CASE (0) ! No baroclinic term
bclncy(k1:kmy) = 0.0
CASE (1:)
DO k = k1y, kmy
hvpdrho(k) = gdtdy*hvp(k,l)*(rhop(k,lNC(l))-rhop(k,l))
! IF(hp(k,l)<=ZERO .OR. hp(k,lNC(l))<=ZERO) hvpdrho(k) = 0.0
ENDDO
bclncy(k1y) = hvpdrho(k1y)
IF (kmy > k1y) THEN ! Two or more wet layers
! Recompute bottom layer baroclinic term along horizontal plane
CALL bclnc_km (l, kmy, 2, hvpdrho(kmy) )
DO k = k1y+1, kmy
bclncy(k) = bclncy(k-1) + hvpdrho(k-1) + hvpdrho(k)
END DO
END IF
END SELECT
! ... Compute explicit portion of vertical diffusion term ........
SELECT CASE (nwlayers)
CASE (1) ! Single wet layer
vdiffy(k1y) = 0.0
CASE (2:) ! Two or more wet layers (hvpp->hvp)
DO k = k1y+1 , kmy
deltaz(k) = hvp(k-1,l) + hvp(k,l)
Avydvdz(k)= Avy(k)*(vpp(k-1,l)-vpp(k,l))/deltaz(k)
ENDDO
Avydvdz(k1y) = 0.0 ! Set value at free surface to zero
Avydvdz(kmy+1) = 0.0 ! Set value at bottom boundary to zero
vdiffy(k1y:kmy) = twodt*(Avydvdz(k1y:kmy) - Avydvdz(k1y+1:kmy+1)) &
*2.*(1.-theta) ! This factor accounts for semi-implicitness
END SELECT
!.....Form [hh] matrix...........................................
hh (k1y:kmy) = hvp(k1y:kmy,l)/rhopy(k1y:kmy)
!.....Compute [gg] matrix .......................................
gg(k1y:kmy) = ex(k1y:kmy,l) &
- hh(k1y:kmy ) * (bclncy(k1y:kmy)+wsy0) * tz &
+ vdiffy(k1y:kmy) * tz
!.....Form [aa] matrix...........................................
SELECT CASE (nwlayers)
CASE (1) ! Single wet layer
aa(2,k1y) = 1.0
CASE (2:) ! Two or more wet layers (hv->hvp)
! Define upper diagonal terms
aa(3,k1y:kmy-1)=-twodt1*Avy(k1y+1:kmy)/(hvp(k1y+1:kmy,l)* &
& (hvp(k1y:kmy-1,l)+hvp(k1y+1:kmy,l)))*2.*theta
aa(3,kmy) = 0.0
! Define lower diagonal terms
aa(1,k1y+1:kmy)=-twodt1*Avy(k1y+1:kmy)/(hvp(k1y:kmy-1,l)* &
& (hvp(k1y:kmy-1,l)+hvp(k1y+1:kmy,l)))*2.*theta
aa(1,k1y) = 0.0
! Define center diagonal terms
aa(2,k1y:kmy) = 1.0 &
-(hvp(k1y-1:kmy-1,l)/hvp(k1y:kmy,l))*aa(1,k1y:kmy) &
-(hvp(k1y+1:kmy+1,l)/hvp(k1y:kmy,l))*aa(3,k1y:kmy)
END SELECT
! ... Top boundary conditions .....................................
! a. Form wind stress term
vsurf = vp(k1y,l)
usurf =(up(k1u(l), l ) + &
up(k1u(lWC(l)), lWC(l) ) + &
up(k1u(lNC(l)), lNC(l) ) + &
up(k1u(lWC(lNC(l))),lWC(lNC(l))))/4.
cwy = cdw(l)*rhoair*SQRT((uair(l)-usurf)**2.+ &
(vair(l)-vsurf)**2.)
! b. Modify [gg] matrix
tausy = cwy/rhopy(k1y)*vair(l)
gg(k1y) = gg( k1y) + tausy*twodt1
! c. Modify [aa] matrix
tausy = cwy/rhopy(k1y)/hvp(k1y,l)
aa(2,k1y) = aa(2,k1y) + tausy*twodt1
! ... Bottom boundary conditions ..................................
! a. Form bottom stress term
tauby = cd*SQRT((vhpp(kmy,l)*vhpp(kmy,l)) + &
& ((0.25*(uhpp(kmy,lNC(l))+uhpp(kmy,lWC(lNC(l))) &
& +uhpp(kmy,lWC(l))+uhpp(kmy,l)))**2)) &
& /(hvp(kmy,l)*hvp(kmy,l))
! b. Modify [aa] matrix
aa(2,kmy) = aa(2,kmy) + tauby*twodt1
! .... Point sources and sinks ....................................
IF ( iopssH(omp_get_thread_num ( )+1) > 0) THEN
DO innH = 1, iopssH(omp_get_thread_num ( )+1)
inn = ioph2iop(innH,omp_get_thread_num ( )+1)
IF (i == ipss(inn) .AND. &
j == jpss(inn) ) THEN
DO k = k1y,kmy
IF (ABS(Qpss(k,inn))<1.E-10) CYCLE
! ... Strength of Source - here it is assumed that
! only half of the flow shows up in the control volume
! used in the momentum equation -> factor 2 below
Vsource = ABS(Qpss(k,inn))/(dx*dy*hvp(k,l))*twodt1/2.
IF(ptype(iodev(inn)) == -2) Vsource = 1.E2
! ... Velocity of the source in N direction (positive
! towards north if a source; negative or towards south if
! a sink) - idetr = 1 by default;
Vhvalue = (Qpss(k,inn)*vNpss(iodev(inn))/dx)*idetr(iodev(inn)) !mod ACC oct11
aa(2,k) = aa(2,k) + Vsource
gg( k) = gg( k) + Vsource * Vhvalue
ENDDO
ENDIF
IF (i == ipss(inn) .AND. &
j == jpss(inn)-1 ) THEN
DO k = k1y,kmy
IF (ABS(Qpss(k,inn))<1.E-10) CYCLE
! ... Strength of Source - here it is assumed that
! only half of the flow shows up in the control volume
! used in the momentum equation -> factor 2 below
Vsource = ABS(Qpss(k,inn))/(dx*dy*hvp(k,l))*twodt1/2.
IF(ptype(iodev(inn)) == -2) Vsource = 1.E2
! ... Velocity of the source in S direction (negative
! towards south if a source; positive or towards north if
! a sink) - idetr = 1 by default;
Vhvalue = -(Qpss(k,inn)*vSpss(iodev(inn))/dx)*idetr(iodev(inn)) !mod ACC oct11
aa(2,k) = aa(2,k) + Vsource
gg( k) = gg( k) + Vsource * Vhvalue
ENDDO
ENDIF
ENDDO
ENDIF
!.....Solve tridiagonal system for [ag] and [ar] arrays........
SELECT CASE (nwlayers)
CASE (1) ! Single wet layer
ag(k1y) = gg(k1y)/aa(2,k1y)
ar(k1y) = hh(k1y)/aa(2,k1y)
CASE (2:) ! Two or more wet layers
CALL trid ( aa, gg, hh, ag, ar, k1y, kmy, km1, nwlayers )
END SELECT
!.....Save [ag] and [ar] arrays and sum them over
! depth for use in solution of continuity equation..............
Bagy(k1y:kmy,l) = ag(k1y:kmy)
Bary(k1y:kmy,l) = ar(k1y:kmy)
Beagy(l) = SUM(ag(k1y:kmy))
Beary(l) = SUM(ar(k1y:kmy))
!.....End loop over v-pts.....
END DO
END SELECT
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_matmom2 = t_matmom2 + (etime - btime)
END SUBROUTINE matmom
!***********************************************************************
SUBROUTINE bclnc_km (l, kb, ieq, huvpdrho )
!***********************************************************************
!
! Purpose: To recompute the baroclinic term for a bottom layer of
! variable depth using densities interpolated (or
! extrapolated) to locations on a horizontal surface.
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
! 18/09/00 P.E. Smith Original f90 code
!
!-----------------------------------------------------------------------
!.....Arguments.....
INTEGER, INTENT(IN ) :: l, kb, ieq
REAL , INTENT(INOUT) :: huvpdrho
!.....Local variables.....
REAL, PARAMETER :: eps = EPSILON(0.0)
REAL :: dz1, dz2, rhop_e, rhop_w, rhop_n, rhop_s
REAL :: hup_plus, hup_minus, hvp_plus, hvp_minus
LOGICAL :: condition_e, condition_w, condition_n, condition_s
!.....Choose x- or y-momentum equation...............................
SELECT CASE ( ieq )
! -----Recompute the x-momentum term-----
CASE (1)
!.....Check if the depth at the press-pt on the east side of the
! control volume is not equal to the depth at the u-pt.....
hup_plus = hup(kb,l)+eps; hup_minus = hup(kb,l)-eps
IF((hp(kb,lEC(l)) > hup_plus) .OR. (hp(kb,lEC(l)) < hup_minus)) THEN
condition_e = .TRUE.
dz1 = 0.5*(hp(kb-1,lEC(l)) + hp (kb,lEC(l)))
dz2 = 0.5*(hp(kb-1,lEC(l)) + hup(kb, l ))
! Interpolate (or extrapolate) for the density at the
! horizontal level of the u-pt
rhop_e=rhop(kb-1,lEC(l))+(dz2/dz1)*(rhop(kb,lEC(l))-rhop(kb-1,lEC(l)))
ELSE
condition_e = .FALSE.
rhop_e = rhop(kb,lEC(l))
END IF
!.....Check if the depth at the press-pt on the west side of the
! control volume is not equal to the depth at the u-pt.....
IF((hp(kb,l) > hup_plus) .OR. (hp(kb,l) < hup_minus)) THEN
condition_w = .TRUE.
dz1 = 0.5*(hp(kb-1,l) + hp (kb,l))
dz2 = 0.5*(hp(kb-1,l) + hup(kb,l))
! Interpolate (or extrapolate) for the density at the
! horizontal level of the u-pt
rhop_w=rhop(kb-1,l)+(dz2/dz1)*(rhop(kb,l)-rhop(kb-1,l))
ELSE
condition_w = .FALSE.
rhop_w = rhop(kb,l)
END IF
!.....If necessary, recompute the x-direction
! baroclinic term on a horizontal plane.....
IF ( condition_e .OR. condition_w ) THEN
huvpdrho = gdtdx*hup(kb,l)*(rhop_e-rhop_w)
END IF
! -----Recompute the y-momentum term-----
CASE (2)
!.....Check if the depth at the press-pt on the north side of
! the control volume is not equal to the depth at the v-pt.....
hvp_plus = hvp(kb,l)+eps; hvp_minus = hvp(kb,l)-eps
IF((hp(kb,lNC(l)) > hvp_plus) .OR. (hp(kb,lNC(l)) < hvp_minus)) THEN
condition_n = .TRUE.
dz1 = 0.5*(hp(kb-1,lNC(l)) + hp (kb,lNC(l)))
dz2 = 0.5*(hp(kb-1,lNC(l)) + hvp(kb, l ))
! Interpolate (or extrapolate) for the density at the
! horizontal level of the v-pt
rhop_n=rhop(kb-1,lNC(l))+(dz2/dz1)*(rhop(kb,lNC(l))-rhop(kb-1,lNC(l)))
ELSE
condition_n = .FALSE.
rhop_n = rhop(kb,lNC(l))
END IF
!.....Check if the depth at the press-pt on the south side of
! the control volume is not equal to the depth at the v-pt.....
IF((hp(kb,l) > hvp_plus) .OR. (hp(kb,l) < hvp_minus)) THEN
condition_s = .TRUE.
dz1 = 0.5*(hp(kb-1,l) + hp (kb,l))
dz2 = 0.5*(hp(kb-1,l) + hvp(kb,l))
! Interpolate (or extrapolate) for the density at the
! horizontal level of the v-pt
rhop_s=rhop(kb-1,l)+(dz2/dz1)*(rhop(kb,l)-rhop(kb-1,l))
ELSE
condition_s = .FALSE.
rhop_s = rhop(kb,l)
END IF
!.....If necessary, recompute the y-direction
! baroclinic term on a horizontal plane.....
IF ( condition_n .OR. condition_s ) THEN
huvpdrho = gdtdy*hvp(kb,l)*(rhop_n-rhop_s)
END IF
END SELECT
END SUBROUTINE bclnc_km
!***********************************************************************
SUBROUTINE matcon(t_matcon2,Bstart,Bend,lWCH,lSCH,Beagx,Bearx,Beagy,Beary,Bsx,Bsy,Bdd,Bqq,Brr)
!***********************************************************************
!
! Purpose: To calculate the matrix coefficients for solving the
! continuity equation for zeta.
!
!-----------------------------------------------------------------------
REAL, INTENT(INOUT) :: t_matcon2
INTEGER, INTENT(IN) :: Bstart,Bend
REAL, DIMENSION (Bstart:Bend+1), INTENT(INOUT) :: Beagx,Bearx,Beagy,Beary,Bsx,Bsy,Bdd,Bqq,Brr
INTEGER, DIMENSION(Bstart:Bend+1), INTENT(IN) :: lWCH,lSCH
!.....Local variables.....
REAL :: cx, cy, dt1, dtdx1, dtdy1, rho4sx, rho4sy
INTEGER :: i, j, k, l, k1s, kms, inn, liter, innH
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
!.....Constants.....
cx = gdt2dx2*tz*tz;
cy = gdt2dy2*tz*tz;
dtdx1 = dtdx*tz;
dtdy1 = dtdy*tz;
dt1 = dt*tz;
rho4sx = 1000. ! Neglect density variations
rho4sy = 1000. ! Neglect density variations
!.....Calculate [sx] matrix at u-pts & [sy] matrix at v-pts ....
Bsx(Bend+1) = 0.0; Bsy(Bend+1) = 0.0;
DO liter = lhiW(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
If(liter == 0) CYCLE
l = id_column(liter)
! ... Map 2D-l into 3D-(i,j) indexes
i = l2i(l); j = l2j(l);
! ... u-pts
IF (mask2d(i+1,j)) THEN
Bsx(l)= cx * rho4sx * Bearx(l)
ELSE
Bsx(l)= 0.0
ENDIF
ENDDO
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
i = l2i(l); j = l2j(l);
! ... v-pts
IF (mask2d(i,j+1)) THEN
Bsy(l)= cy * rho4sy * Beary(l)
ELSE
Bsy(l)= 0.0
ENDIF
ENDDO
!.....Calculate [dd], [qq], and [rr] matrices at zeta-pts.....
! dd(lm1) = 0.0; qq(lm1) = 0.0; rr(lm1) = 1.0
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map 2D-l into 3D-(i,j) indexes
! i = l2i(l); j = l2j(l);
! ... Top & bottom cells
k1s = k1
kms = kmz(l)
! ... Form matrices
Bdd(l) = dt1*SUM((uhpp(k1s:kms, l) - &
& uhpp(k1s:kms,lWC(l))) /dx &
& +(vhpp(k1s:kms, l) - &
& vhpp(k1s:kms,lSC(l)))/dy)
Bqq(l) = spp(l) - (dtdx1)*(Beagx(l)-Beagx(lWCH(l))) &
& - (dtdy1)*(Beagy(l)-Beagy(lSCH(l))) &
& - Bdd(l)
Brr(l) = 1 + Bsx(l) + Bsx(lWCH(l)) + Bsy(l) + Bsy(lSCH(l))
END DO;
! .... Modify qq matrices to incorporate sources/sinks
IF ( iopssH(omp_get_thread_num ( )+1) > 0 ) THEN
DO innH = 1, iopssH(omp_get_thread_num ( )+1)
inn = ioph2iop(innH,omp_get_thread_num ( )+1)
IF ( ptype(iodev(inn)) > 0) CYCLE
i = ipss(inn);
j = jpss(inn);
Bqq(ij2l(i,j)) = Bqq(ij2l(i,j)) + SUM(Qpss(:,inn))/(dx*dy)*twodt*tz
ENDDO
ENDIF
!DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
!l = id_column(liter)
!qq(l)=Bqq(l)
!dd(l)=Bdd(l)
!eagx(l)=Beagx(l)
!eagy(l)=Beagy(l)
!END DO
!print *,"qq:",sum(qq(:))
!print *,"spp:",sum(spp(:))
!print *,"dd:",sum(dd(:))
!print *,"eagx:",sum(eagx(:))
!print *,"eagy:",sum(eagy(:))
!.....Adjust qq & sx/sy matrices for open boundary conditions.....
IF (nopen > 0 ) CALL MODqqddrr4openBC(Bstart,Bend,Bqq,Bsx,Bsy)
!print *,"qqmod:",sum(qq(:))
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_matcon2 = t_matcon2 + (etime - btime)
END SUBROUTINE matcon
!***********************************************************************
SUBROUTINE SolverSparse(n,Bstart,Bend,lWCH,lSCH,Bsx,Bsy,Bqq,Brr,iter,istep,thrs)
!***********************************************************************
!
! Purpose: To solve the system matrix for zeta using the
! preconditioned conjugate gradient method. It uses
! Storage format 1 (i.e. ELLPACK or iparm(12) = 1;
! in this manner a considerable amount of time is saved
! as we do not have to store an imxjm by imxjm matrix,
! pentadiagonal but very large; instead we only store
! lmxlm matrix; this is extremely useful in sparse
! bathymetries such as in rivers. Each row in the matrix
! has at most 5 non-zero elements, which are stored in coeffA;
! the location of the coefficients in the matrix is stored
! in jcoefA - see instructions.
!
!-----------------------------------------------------------------------
INTEGER, INTENT(IN) :: Bstart,Bend,n,iter,istep
REAL, INTENT(IN) :: thrs
REAL, DIMENSION (Bstart:Bend+1), INTENT(IN) :: Bsx,Bsy,Bqq,Brr
INTEGER, DIMENSION(Bstart:Bend+1), INTENT(IN) :: lWCH,lSCH
!.....Local variables.....
EXTERNAL mic1, jac1, cg, si
INTEGER :: nw1a, inw1a, maxnz1a, i, j, m, &
ier, nrA, ncA, istat, nfirstA=0, ios,aux_indice,liter
INTEGER, SAVE:: i895 = 895
CHARACTER(LEN=25):: solverfile="SolverAMODE.txt"
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
!.....Set matrix solution variables and workspace
! parameters on first entry into subroutine.....
!IF ( nfirstA == 0 ) THEN
! nfirstA = 1
! ! Compute the number of rows of active cells in the grid
! nrA = jlast-jfirst + 1;
! ! Compute the number of columns of active cells in the grid
! ncA = ilast-ifirst + 1
! ! Compute number of equations to be solved
! ndimA = nrA*ncA ! NorthDelta
! ! Define the matrix bandwidth
! ibdwdA = nrA
! ! Define column width of 'coef' matrix
! maxnzA = mdimA
! ! Liberally estimate workspace requirements
! nwA = 20*ndimA; inwA = 5*ndimA
! ! Open file
! OPEN (UNIT=i895, FILE=solverfile, IOSTAT=ios)
!END IF
IF(omp_get_thread_num ( )==0) THEN
!.....Set matrix solution variables and workspace
! parameters on first entry into subroutine.....
IF ( nfirstA == 0 ) THEN
nfirstA = 1
! Compute number of equations to be solved
ndimA = lm
! Define column width of 'coef' matrix
maxnzA = mdimA
! Liberally estimate workspace requirements
nwA = 20*ndimA; inwA = 5*ndimA
! Open file
!OPEN (UNIT=i895, FILE=solverfile, IOSTAT=ios)
END IF
!.....Reset workspace parameters.....
nw1a = nwA+(2*ndimA); inw1a = inwA; maxnz1a = maxnzA
!.....Allocate arrays.....
ALLOCATE (wksp (nw1A ) , &
& iwksp (inw1A) , STAT=istat)
IF (istat /= 0) CALL allocate_error ( istat, 23 )
END IF
!.....Define coef, jcoef1 and rhs arrays in
! preparation for calling .....
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
m = id_column(liter)
! i = l2i(m);
! j = l2j(m);
coeffA (m,1) = Brr(m)
coeffA (m,2) = -Bsx(lWCH(m))
coeffA (m,3) = -Bsy(lSCH(m))
coeffA (m,4) = -Bsy(m)
coeffA (m,5) = -Bsx(m)
jcoefA (m,1) = m
jcoefA (m,2) = lWC(m)
jcoefA (m,3) = lSC(m)
jcoefA (m,4) = lNC(m)
jcoefA (m,5) = lEC(m)
IF(jcoefA(m,2)==lm1) jcoefA(m,2)=0;
IF(jcoefA(m,3)==lm1) jcoefA(m,3)=0;
IF(jcoefA(m,4)==lm1) jcoefA(m,4)=0;
IF(jcoefA(m,5)==lm1) jcoefA(m,5)=0;
rhs(m) = Bqq(m)
! initial guess at zeta
zeta(m) = sp(m)
END DO
!if(nopen > 0) CALL MODcoef4openBC
!$omp barrier
IF(omp_get_thread_num ( )==0) THEN
!print *,"coeff1",sum(coeffA(:,1))
!print *,"coeff4",sum(coeffA(:,4))
!print *,"coeff5",sum(coeffA(:,5))
!print *,"rhs",sum(rhs(:))
!print *,"zeta",sum(zeta(:))
!.....Set parameter defaults.....
CALL dfault ( iparm, rparm )
!.....Reset some default parameter values.....
iparm(1) = 2 ! Use the default for DCC runs
iparm(2) = 200 ! Limit maximum number of iterations to 500
iparm(3) = 1 ! Warning messages and minimum output
iparm(4) = i6 ! Define fortran unit number for output
!iparm(21)= 0 ! Use scalar algorithm for matrix factorization
rparm(1) = 1.E-6 ! Try default stopping test value
iparm(12) = 1; ! Storage mode use (1 = Primary format)
!.....Solve for zeta.....
WRITE (UNIT=i6,FMT='("**Enter nspcg n = ", I6)') n
CALL nspcg (mic1,cg,ndimA,mdimA,ndimA,maxnz1A,coeffA,jcoefA,jp,ip,zeta, &
& ubar,rhs,wksp,iwksp,nw1A,inw1A,iparm,rparm,ier)
WRITE (UNIT=i6,FMT='("**Exit nspcg n = ", I6)') n
IF(ier /= 0) WRITE(UNIT=i6,FMT='("**ERROR", I5, " from nspcg")') ier
!.....STOP program execution if a fatal error is encountered in nspcg.....
IF (ier < 0 ) THEN
PRINT *, " "
PRINT '(" Fatal error in matrix solution on time step = ", I7)', n
PRINT '(" **ERROR", I5, " from nspcg")', ier
PRINT '(" Time = ", F10.4, " hours")', thrs
PRINT *, " "
PRINT *, " "
PRINT *, " ****STOPPING si3d due to fatal error in matrix solution"
WRITE (UNIT=i6,FMT='(" ****STOPPING si3d due to fatal matrix error")' )
WRITE (UNIT=i6,FMT='(" Time = ", F10.4, " hours")') thrs
STOP
END IF
! print *,sum(zeta(:))
! if( n .EQ. 300) THEN
! do m=1,lm
!
! print *,zeta(m)
!
! end do
! end if
END IF
!$omp barrier
!.....Load matrix solution into zeta array.....
IF(omp_get_thread_num ( )+1 == num_threads)THEN
aux_indice=lhf(omp_get_thread_num ( )+1)
ELSE
aux_indice=lhfE(omp_get_thread_num ( )+1)
END IF
DO liter = lhi(omp_get_thread_num ( )+1), aux_indice
m = id_column(liter)
! i = l2i(m);
! j = l2j(m);
s(m) = zeta(m)
END DO
!.....Load matrix solution into zeta array.....
!DO m = 1, ndimA
! i = (m-1)/ibdwdA + ifirst
! j = m + (jfirst-1) - (i-ifirst)*ibdwdA
! s(i,j) = zeta(m)
!END DO
! ... Calculate surface layer thickness at time n+1
CALL slayer_h
!.....Save the workspace parameters, slightly over-dimensioning it
! Otherwise one gets some errors. - FJR: I really do not get this
IF(omp_get_thread_num ( )==0) THEN
nwA = FLOOR(nw1A*1.5); inwA = FLOOR(inw1A*1.5); maxnzA = maxnz1A
!.....Deallocate arrays.....
DEALLOCATE (wksp, iwksp )
END IF
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_solver = t_solver + (etime - btime)
END SUBROUTINE SolverSparse
!***********************************************************************
SUBROUTINE slayer_h
!***********************************************************************
!
! Purpose: To recompute the new values for the surface layer thicknesses
! (h, hu, hv) after the zeta array is redefined. Note that
! cells may become dry at time n+1, but no new cells will appear
! at this time (wetting). The indexes for the surface layers
! are not updated at this time, since they are used in subr. vel
!
!-----------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i, j, k, l,liter
REAL :: haux
! ... Initialize layer thickness for time n+1
h(:,lm1)=hp(:,lm1)
hu(:,lm1)=hup(:,lm1)
hv(:,lm1)=hvp(:,lm1)
! ... Redo calculations for surface cells
! 1.- If drying occurs redo calcs. at cell k1s+1
! 2.- If cell k1s becomes thicker than its nominal size
! just ignore. Wetting is not done at this time
! Arrays storing surface cells are not modified at this
! time since they are used in subr. vel
DO liter = lhiW(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
If(liter == 0) CYCLE
l = id_column(liter)
h(1:k1z(l)-1,l)=hp(1:k1z(l)-1,l)
hu(1:k1u(l)-1,l)=hup(1:k1u(l)-1,l)
hv(1:k1v(l)-1,l)=hvp(1:k1v(l)-1,l)
h(k1z(l)+2:km1,l)=hp(k1z(l)+2:km1,l)
hu(k1u(l)+1:km1,l)=hup(k1u(l)+1:km1,l)
hv(k1v(l)+2:km1,l)=hvp(k1v(l)+2:km1,l)
! ... Map 3D-(i,j) from 2D-l indexes
! i = l2i(l); j = l2j(l);
! ... At zeta-points
k = k1z(l)
haux = AMIN1(zlevel(k+1),hhs(l)) + s(l)
IF(haux <= HMIN) THEN
h(k,l) = ZERO
k = k + 1
haux = AMIN1(zlevel(k+1),hhs(l)) + s(l)
IF(haux <= HMIN) THEN
h(k,l) = ZERO
ELSE
h(k,l) = haux
END IF
ELSE
h(k,l) = haux
END IF
! ... At u-points
IF (mask(c2l(lEC(l)))) THEN
k = k1u(l)
haux=AMIN1(zlevel(k+1),hhu(l)) + &
& MAX(s(l),s(lEC(l)))
IF(haux <= HMIN) THEN
hu(k,l) = ZERO
k = k + 1; k = k1u(l)
haux =AMIN1(zlevel(k+1),hhu(l)) + &
& MAX(s(l),s(lEC(l)))
IF(haux <= HMIN) THEN
hu(k,l) = ZERO
ELSE
hu(k,l) = haux
END IF
ELSE
hu(k,l) = haux
END IF
ELSE
hu(k1u(l):k1u(l)+1,l) = hup(k1u(l):k1u(l)+1,l)
END IF
! ... At v-points
IF (mask(c2l(lNC(l)))) THEN
k = k1v(l)
haux=AMIN1(zlevel(k+1),hhv(l)) + &
& MAX(s(l),s(lNC(l)))
IF(haux <= HMIN) THEN
hv(k,l) = ZERO
k = k + 1;
haux =AMIN1(zlevel(k+1),hhv(l)) + &
& MAX(s(l),s(lNC(l)))
IF(haux <= HMIN) THEN
hv(k,l) = ZERO
ELSE
hv(k,l) = haux
END IF
ELSE
hv(k,l) = haux
END IF
ELSE
hv(k1v(l):k1v(l)+1,l) = hvp(k1v(l):k1v(l)+1,l)
END IF
ENDDO
END SUBROUTINE slayer_h
!***********************************************************************
SUBROUTINE layer_h
!***********************************************************************
!
! Purpose: To recompute the new values for the surface layer thicknesses
! (h, hu, hv) after the zeta array is redefined. For a linear
! problem (ilin=1), the surface layer thicknesses are left as
! constants. (Note: hu and hv on closed boundaries should be
! considered undefined.)
!
!-----------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i, j, k, l, kms, kmx, kmy, liter
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map 3D-(i,j) from 2D-l indexes
! i = l2i(l); j = l2j(l);
! ... At zeta-points
kms = kmz(l)
DO k = k1, kms
h (k,l)=AMIN1(zlevel(k+1),hhs(l)) - &
& AMAX1(zlevel( k),-s (l))
IF(h (k,l) <= HMIN) h (k,l) = ZERO;
ENDDO
! ... At u-points
IF (mask(c2l(lEC(l)))) THEN
kmx = MIN(kmz(l),kmz(lEC(l)))
DO k = k1, kmx
hu (k,l)=AMIN1(zlevel(k+1),hhu(l)) - &
& AMAX1(zlevel( k),-MAX(s(l),s(lEC(l))))
IF(hu(k,l) <= HMIN) hu(k,l) = ZERO;
ENDDO
ENDIF
! ... At v-points
IF (mask(c2l(lNC(l)))) THEN
kmy = MIN(kmz(l),kmz(lNC(l)))
DO k = k1, kmy
hv (k,l)=AMIN1(zlevel(k+1),hhv(l)) - &
& AMAX1(zlevel( k),-MAX(s(l),s(lNC(l))))
IF(hv (k,l) <= HMIN) hv (k,l) = ZERO;
ENDDO
ENDIF
ENDDO
END SUBROUTINE layer_h
!***********************************************************************
SUBROUTINE layer_hp2
!***********************************************************************
!
! Purpose: To recompute the old values for the surface layer thicknesses
! (hp, hup, hvp) after the zeta array is smoothed. For a linear
! problem (ilin=1), the surface layer thicknesses are left as
! constants. (Note: hup and hvp on closed boundaries should be
! considered undefined.)
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i, j, k, l, kmx, kmy, kms, nwlup, nwlvp, nwlsp,liter
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map 3D-(i,j) from 2D-l indexes
i = l2i(l); j = l2j(l);
! ... At zeta-points
kms = kmz(l)
DO k = k1, kms
hp (k,l)=AMIN1(zlevel(k+1),hhs(l)) - &
& AMAX1(zlevel( k),-sp(l))
IF(hp(k,l) <= HMIN) hp(k,l) = ZERO;
ENDDO
! ... At u-points
IF (mask2d(i+1,j)) THEN
kmx = MIN(kmz(l),kmz(lEC(l)))
DO k = k1, kmx
hup(k,l)=AMIN1(zlevel(k+1),hhu(l)) - &
& AMAX1(zlevel( k),-MAX(sp(l),sp(lEC(l))))
IF(hup(k,l) <= HMIN) hup(k,l) = ZERO;
ENDDO
ENDIF
! ... At v-points
IF (mask2d(i,j+1)) THEN
kmy = MIN(kmz(l),kmz(lNC(l)))
DO k = k1, kmy
hvp(k,l)=AMIN1(zlevel(k+1),hhv(l)) - &
& AMAX1(zlevel( k),-MAX(sp(l),sp(lNC(l))))
IF(hvp(k,l) <= HMIN) hvp(k,l) = ZERO;
ENDDO
ENDIF
ENDDO
END SUBROUTINE layer_hp2
!***********************************************************************
SUBROUTINE layer_hp3
!***********************************************************************
!
! Purpose: To recompute the old values for the surface layer thicknesses
! (hp, hup, hvp) after the zeta array is smoothed. For a linear
! problem (ilin=1), the surface layer thicknesses are left as
! constants. (Note: hup and hvp on closed boundaries should be
! considered undefined.)
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i, j, k, l, kmx, kmy, kms, nwlup, nwlvp, nwlsp,liter
REAL :: aux
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map 3D-(i,j) from 2D-l indexes
! i = l2i(l); j = l2j(l);
! ... At zeta-points
kms = kmz(l)
DO k = k1, kms
aux=AMIN1(zlevel(k+1),hhs(l)) - &
& AMAX1(zlevel( k),-sp(l))
IF(aux <= HMIN) THEN
hp(k,l) = ZERO;
ELSE
hp (k,l)=aux
END IF
ENDDO
! ... At u-points
IF (mask(c2l(lEC(l)))) THEN
kmx = MIN(kmz(l),kmz(lEC(l)))
DO k = k1, kmx
aux=AMIN1(zlevel(k+1),hhu(l)) - &
& AMAX1(zlevel( k),-MAX(sp(l),sp(lEC(l))))
IF(aux <= HMIN) THEN
hup(k,l) = ZERO;
ELSE
hup(k,l)=aux
END IF
ENDDO
ENDIF
! ... At v-points
IF (mask(c2l(lNC(l)))) THEN
kmy = MIN(kmz(l),kmz(lNC(l)))
DO k = k1, kmy
aux=AMIN1(zlevel(k+1),hhv(l)) - &
& AMAX1(zlevel( k),-MAX(sp(l),sp(lNC(l))))
IF(aux <= HMIN) THEN
hvp(k,l) = ZERO;
ELSE
hvp(k,l)=aux
END IF
ENDDO
ENDIF
ENDDO
END SUBROUTINE layer_hp3
!***********************************************************************
SUBROUTINE TopLayerIndexp2
!***********************************************************************
!
! Purpose: To determine the top layer index given that values of hp,
! hup and hvp have been calculated previously
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i, j, k, l, kmx, kmy, kms, nwlup, nwlvp, nwlsp,liter
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
k1z(l)=km1;
k1u(l)=km1;
k1v(l)=km1;
! ... Map 3D-(i,j) from 2D-l indexes
! i = l2i(l); j = l2j(l);
! ... At zeta-points
kms = kmz(l)
DO k = k1, kms
IF(hp (k,l) > ZERO) THEN
k1z(l) = k
EXIT
ENDIF
ENDDO
! ... At u-points
IF (mask(c2l(lEC(l)))) THEN
kmx = MIN(kmz(l),kmz(lEC(l)))
DO k = k1, kmx
IF(hup(k,l) > ZERO) THEN
k1u(l) = k
EXIT
ENDIF
ENDDO
ENDIF
! ... At v-points
IF (mask(c2l(lNC(l)))) THEN
kmy = MIN(kmz(l),kmz(lNC(l)))
DO k = k1, kmy
IF(hvp(k,l) > ZERO) THEN
k1v(l) = k
EXIT
ENDIF
ENDDO
ENDIF
ENDDO
END SUBROUTINE TopLayerIndexp2
!***********************************************************************
SUBROUTINE vel(Bstart,Bend,Bagx,Barx,Bagy,Bary)
!***********************************************************************
!
! Purpose: To solve the momentum equations explicitly for velocity.
!
!-----------------------------------------------------------------------
INTEGER, INTENT(IN) :: Bstart,Bend
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bagx,Barx,Bagy,Bary
!.....Local variables.....
REAL :: gthx1, gthy1, rho4cxx, rho4cyy, vvtemp, uutemp, cxx, cyy
INTEGER :: i, j, k, l, istat, kmx, kmy, kms, k0x, k0y, k0s, k1s, k1ss,liter
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
!.....Constants.....
gthx1 = gdtdx*tz; gthy1 = gdtdy*tz
! -----X-momentum equation-----
! ... Define constants: Ignore density variations in z
rho4cxx = 1000.
rho4cyy = 1000.
! ... Loop over cells
DO liter = lhiWCE(omp_get_thread_num ( )+1), lhfCE(omp_get_thread_num ( )+1)
if(liter==0) CYCLE
l = id_columnCE(liter)
! .... Map l into 2D-xy space
! i = l2i(l);
! j = l2j(l);
! ... Skip if East Column is dry
!IF(.NOT.mask(c2l(lEC(l)))) CYCLE
!.....Top & Bottom wett u-points
kmx = MIN(kmz(lEC(l)),kmz(l))
k0x = k1u(l)
!.....Solve for the water surface slope portion of the
! x-mom eq and save the result in the cxx array.....
cxx = gthx1 * rho4cxx * (s(lEC(l))-s(l))
!.....Solve the x-momentum equation for uh.....
DO k = k0x, kmx
uh(k,l) = Bagx(k,l) - cxx*Barx(k,l)
ENDDO
! ... Redo near surface flux calcs. if Drying occurs
! hu is calculated after the solution of zeta
! in subr. solver. The top most layer during
! time n (n+1/2) remains the same through the
! calculations to predict n+1 from n or n+1/2
k = k0x;
IF (hu(k ,l) <= ZERO) THEN
! ... Update fluxes
uh(k+1,l) = uh(k,l)+uh(k+1,l)
uh(k ,l) = 0.0
! ... Update surface array
k1u(l) = k0x+1
ENDIF
END DO
! -----Y-momentum equation-----
! ... Loop over cells
DO liter = lhiCN(omp_get_thread_num ( )+1), lhfCN(omp_get_thread_num ( )+1)
l = id_columnCN(liter)
! .... Map l into 2D-xy space
! i = l2i(l);
! j = l2j(l);
! ... Skip if North Column is dry
!IF(.NOT.mask(c2l(lNC(l)))) CYCLE
!.....Top & Bottom wett v-points .....
kmy = MIN(kmz(lNC(l)),kmz(l))
k0y = k1v(l)
!.....Solve for the water surface slope portion of the
! y-mom eq and save the result in the cyy array.....
cyy = gthy1 * rho4cyy * (s(lNC(l))-s(l))
!.....Solve the y-momentum equation for vh.....
DO k = k0y, kmy
vh(k,l) = Bagy(k,l) - cyy*Bary(k,l)
ENDDO
! ... Redo near surface flux calcs. if Drying occurs
! hu is calculated after the solution of zeta
! in subr. solver. The top most layer during
! time n (n+1/2) remains the same through the
! calculations to predict n+1 from n or n+1/2
k = k0y;
IF (hv(k ,l) <= ZERO) THEN
! ... Update fluxes
vh(k+1,l) = vh(k,l)+vh(k+1,l)
vh(k ,l) = 0.0
! ... Update surface array
k1v(l) = k0y+1
ENDIF
END DO
!.... Calculate vertical velocities from uh & uhpp values
! to be used in the solution of scalar transport eq.
CALL continuity(1)
! ... Update surface array at s-points if drying occurs
DO liter = lhiW(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
IF(liter==0) CYCLE
l = id_column(liter)
! .... Map l into 2D-xy space
! i = l2i(l); j = l2j(l);
! ... Top wett s-points
k0s = k1z(l);
! ... Recalculate surface cell if water column
! is wett.
!IF (k0s<km1 .AND. h(k0s,l)<=ZERO) k1z(i,j)=k0s+1
IF (h (k0s,l)<=ZERO) k1z(l)=MIN(k0s+1,km1)
ENDDO
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_vel = t_vel + (etime - btime)
END SUBROUTINE vel
!***********************************************************************
SUBROUTINE vel2
!***********************************************************************
!
! Purpose: To recompute velocities at new time layer if wetting occurs
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!-----------------------------------------------------------------------
!.....Local variables.....
REAL :: vvtemp, uutemp
INTEGER :: i, j, k, l, liter
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
! ... Recalculate layer thicknesses including wetting & drying
CALL layer_h
! ... Loop over cells
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! .... Map l into 2D-xy space
! i = l2i(l); j = l2j(l);
! ... Skip if East Column is dry
IF(mask(c2l(lEC(l)))) THEN
k = k1u(l);
IF (hu(k-1,l) > ZERO) THEN
uutemp = uh(k,l)/(hu(k,l)+hu(k-1,l))
uh(k ,l) = uutemp * hu(k ,l);
uh(k-1,l) = uutemp * hu(k-1,l);
ENDIF
ENDIF
! ... Skip if North Column is dry
IF(mask(c2l(lNC(l)))) THEN
k = k1v(l);
IF (hv(k-1,l) > ZERO) THEN
vvtemp = vh(k,l)/(hv(k,l)+ hv(k-1,l))
vh(k ,l) = vvtemp * hv(k ,l);
vh(k-1,l) = vvtemp * hv(k-1,l);
ENDIF
ENDIF
END DO
!.....No need to recalculate vertical velocities since
! they are calculated in either save or setrap routines for
! next iteration or next time setp
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_vel = t_vel + (etime - btime)
END SUBROUTINE vel2
!***********************************************************************
SUBROUTINE continuity (ist)
!***********************************************************************
!
! Purpose: To compute wp from horizontal components of the velocity
! field
!
!-----------------------------------------------------------------------
! ... Arguments
INTEGER, INTENT(IN):: ist
! ... Local variables
INTEGER :: i, j, k, l, k1s, kms, inn,liter,innH
REAL :: uhp_aux
SELECT CASE (ist)
CASE (1) ! Compute wp from uh, uhpp, vh and vhpp
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! .... Map l into 2D-xy space
! i = l2i(l); j = l2j(l);
! ... Bottom wett s-points
kms = kmz(l);
k1s = k1z(l);
! ... Cycle if water column is dry
IF( k1s > kms ) CYCLE
! .... Loop over cells in water colum to estimate vertical
! velocities which are consistent with the formulation of
! continuity (mass conservation). These velocities are then
! used for scalar transport calculations
wp(:,l) = 0.0
DO k = kms,k1s,-1
wp(k,l) = wp (k+1,l) &
& -(uh (k ,l)-uh (k ,lWC(l))+ &
uhpp(k ,l)-uhpp(k ,lWC(l)))/twodx &
& -(vh (k ,l)-vh (k ,lSC(l))+ &
vhpp(k ,l)-vhpp(k ,lSC(l)))/twody
ENDDO
! ... Correct wp estimates for surface cell, due to
! advective flux from neighbouring cells above it
DO k = k1,k1s-1
wp(k1s,l) = wp (k1s,l) &
& -(uh (k ,l)-uh (k ,lWC(l))+ &
uhpp(k ,l)-uhpp(k ,lWC(l)))/twodx &
& -(vh (k ,l)-vh (k ,lSC(l))+ &
vhpp(k ,l)-vhpp(k ,lSC(l)))/twody
ENDDO
ENDDO
! .... Modify w estimates to incorporate sources/sinks (PSS)
IF ( iopssH(omp_get_thread_num ( )+1) > 0 ) THEN
DO innH = 1, iopssH(omp_get_thread_num ( )+1)
inn = ioph2iop(innH,omp_get_thread_num ( )+1)
i = ipss(inn)
j = jpss(inn)
l = ij2l(i,j);
kms = kmz(l);
k1s = k1z(l);
DO k = kms,k1s,-1
wp(k,l) = wp(k,l) + SUM(Qpss(k:kms,inn))/(dx*dy)
ENDDO
ENDDO
ENDIF
! ... Calculate wp in terms of uhp and vhp values - for computations
! of velocities at next time step
CASE (2)
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! .... Map l into 2D-xy space
! i = l2i(l); j = l2j(l);
! ... Bottom wett s-points
kms = kmz(l);
k1s = k1z(l);
! ... Cycle if water column is dry
IF( k1s > kms ) CYCLE
! .... Loop over cells in water colum to estimate vertical
! velocities
wp(:,l) = 0.0
DO k = kms,k1s,-1
wp(k,l) = wp(k+1,l)-(uhp(k,l)-uhp(k,lWC(l)))/dx &
& -(vhp(k,l)-vhp(k,lSC(l)))/dy
END DO
! ... Correct wp estimates for surface cell, due to
! advective flux from neighbouring cells above it
DO k = k1,k1s-1
wp(k1s,l) = wp(k1s,l)-(uhp(k,l)-uhp(k,lWC(l)))/dx &
& -(vhp(k,l)-vhp(k,lSC(l)))/dy
END DO
END DO
! .... Modify w estimates to incorporate sources/sinks (PSS)
IF ( iopssH(omp_get_thread_num ( )+1) > 0 ) THEN
DO innH = 1, iopssH(omp_get_thread_num ( )+1)
inn = ioph2iop(innH,omp_get_thread_num ( )+1)
i = ipss(inn)
j = jpss(inn)
l = ij2l(i,j);
kms = kmz(l);
k1s = k1z(l);
DO k = kms,k1s,-1
wp(k,l) = wp(k,l) + SUM(Qpss(k:kms,inn))/(dx*dy)
ENDDO
ENDDO
ENDIF
CASE (3)
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! .... Map l into 2D-xy space
! i = l2i(l); j = l2j(l);
! ... Bottom wett s-points
kms = kmz(l);
k1s = k1z(l);
! ... Cycle if water column is dry
IF( k1s > kms ) CYCLE
! .... Loop over cells in water colum to estimate vertical
! velocities
wp(:,l) = 0.0
DO k = kms,k1s,-1
if(hup(k,l)>ZERO)THEN
uhp_aux=uh(k,lWC(l))
else
uhp_aux=0.0
end if
wp(k,l) = wp(k+1,l)-(uhp(k,l)-uhp_aux)/dx &
& -(vhp(k,l)-vhp(k,lSC(l)))/dy
END DO
! ... Correct wp estimates for surface cell, due to
! advective flux from neighbouring cells above it
DO k = k1,k1s-1
if(hup(k,l)>ZERO)THEN
uhp_aux=uh(k,lWC(l))
else
uhp_aux=0.0
end if
wp(k1s,l) = wp(k1s,l)-(uhp(k,l)-uhp_aux)/dx &
& -(vhp(k,l)-vhp(k,lSC(l)))/dy
END DO
END DO
! .... Modify w estimates to incorporate sources/sinks (PSS)
IF ( iopssH(omp_get_thread_num ( )+1) > 0 ) THEN
DO innH = 1, iopssH(omp_get_thread_num ( )+1)
inn = ioph2iop(innH,omp_get_thread_num ( )+1)
i = ipss(inn)
j = jpss(inn)
l = ij2l(i,j);
kms = kmz(l);
k1s = k1z(l);
DO k = kms,k1s,-1
wp(k,l) = wp(k,l) + SUM(Qpss(k:kms,inn))/(dx*dy)
ENDDO
ENDDO
ENDIF
END SELECT
! ... Modify velocity estimates near the boundaries to
! account for open boundaries.
IF (nopen > 0) CALL MODvel4openBC
END SUBROUTINE continuity
!***********************************************************************
SUBROUTINE exsal(Bstart,Bend,lSCH,lNCH,lECH,lWCH,Bhaxpp,Bhaypp,Bth3,Bth4,Bth2,Bex,thrs)
!***********************************************************************
!
! Purpose: To evaluate the explicit terms (advection) in the scalar
! transport equation using flux limiter methods. The sum of these
! terms are saved in the array ex(k,l) which is the
! primary output from this subroutine.
!
!-----------------------------------------------------------------------
INTEGER, INTENT(IN) :: Bstart,Bend
REAL, INTENT(IN) :: thrs
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bhaxpp, Bhaypp,Bth3,Bth4,Bth2,Bex
INTEGER, DIMENSION (Bstart:Bend+1), INTENT(IN) :: lSCH,lNCH,lWCH,lECH
! ... Local variables
INTEGER :: i, j, k, l, k1s, kms, gamma1, istat,liter
REAL :: vel, ratio, C_f, delz, twodt1, hd
REAL, DIMENSION (4 ) :: ss
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
! ... Constants used in solution
twodt1 = twodt*tz
! ... Calculate hdxpp & hdypp arrays for diffusion terms
Bhaxpp(:,Bend+1) = 0.0;
Bhaypp(:,Bend+1) = 0.0;
DO liter = lhiW(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
If(liter == 0) CYCLE
l = id_column(liter)
! ... 3D-(i,j) indexes for l
! i = l2i(l); j = l2j(l);
! ... Retrieve top & bottom wet sal-pts .................
kms = kmz(l)
k1s = k1z(l)
Bhaxpp(1:k1-1,l) = 0.0
Bhaxpp(kms+1:km1,l) = 0.0
Bhaypp(1:k1-1,l) = 0.0
Bhaypp(kms+1:km1,l) = 0.0
! ... Calculate hdxpp & hdypp array at u-&v- pts ........
! Interfaces connecting wett & dry cells will not
! have diffussive transport in present formulation
DO k = k1, kms
Bhaxpp(k,l) = Ax0*hupp(k,l)
Bhaypp(k,l) = Ay0*hvpp(k,l)
ENDDO
END DO
Bth3(:,Bend+1) = 0
! Bth2 = 0
! ... Initialize ex & flux arrays to zeros
Bex(:,Bend+1) = 0.0; Bth4(:,Bend+1) = 0.0;! fluxXsal(:,lm1)= 0.0;
Bth2(:,Bend+1) = 0.0;
! Bth3 = 0.0; Bth2 = 0.0; Bex = 0.0; Bth4 = 0.0;
DO liter = lhiW(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
If(liter == 0) CYCLE
l = id_column(liter)
! ... Map l- into (i,j)-indexes .........................
! i = l2i(l); j = l2j(l);
! ... Retrieve top & bottom wet sal-pts .................
kms = kmz(l)
k1s = k1z(l)
DO k = k1s, kms;
! ... EW fluxes .......................................
IF (hup(k,l)> ZERO) THEN
! ... Velocities at time n+1/2
vel = (uhpp(k,l) + uh(k,l))/2.
! ... Define stencil for scalar transport
ss(2) = salpp(k, l );
ss(3) = salpp(k,lEC(l));
IF (hpp(k, lWC(l) )<=ZERO) THEN; ss(1) = ss(2);
ELSE; ss(1)=salpp(k, lWC(l) ); ENDIF;
IF (hpp(k,lEC(lEC(l)))<=ZERO) THEN; ss(4) = ss(3);
ELSE; ss(4)=salpp(k,lEC(lEC(l))); ENDIF;
! ... Calculate Cf for flux computation
C_f = 0.0;
gamma1 = -SIGN (1., vel)
IF (ss(3) - ss(2) /= 0 ) THEN
ratio =(ss(3+gamma1)-ss(2+gamma1))/(ss(3)-ss(2))
! MC flux limiter (VanLeer, 1977)
!C_f = MAX(0., MIN( 2*ratio, (1+ratio)/2., 2. ))
! ... Roe's Superbee Limiter
C_f = MAX(0., MIN(1.,2.*ratio),MIN(2.,ratio))
ENDIF
! ... Calculate fluxes
Bth2(k,l) = vel/2.*(ss(3)+ss(2))- &
& ((1.-C_f)*ABS(vel)+vel**2.*twodt1/dx*C_f)*(ss(3)-ss(2))/2.
ELSE
Bth2(k,l) = 0.0
ENDIF
ENDDO;
ENDDO;
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map l- into (i,j)-indexes .........................
! i = l2i(l); j = l2j(l);
! ... Retrieve top & bottom wet sal-pts .................
kms = kmz(l)
k1s = k1z(l)
DO k = k1s, kms;
! ... NS fluxes .......................................
IF (hvp(k,l)> ZERO) THEN
! ... Velocities at time n+1/2
vel = (vhpp(k,l) + vh(k,l))/2.
! ... Define stencil for scalar transport
ss(2) = salpp(k, l );
ss(3) = salpp(k, lNC(l) );
IF (hpp(k, lSC(l) )<= ZERO) THEN; ss(1) = ss(2);
ELSE; ss(1) = salpp(k, lSC(l) ); ENDIF;
IF (hpp(k,lNC(lNC(l)))<= ZERO) THEN; ss(4) = ss(3);
ELSE; ss(4) = salpp(k,lNC(lNC(l))); ENDIF;
! ... Calculate Cf for flux computation
C_f = 0.0
gamma1 = -SIGN (1., vel)
IF (ss(3) - ss(2) /= 0 ) THEN
ratio =(ss(3+gamma1)-ss(2+gamma1))/(ss(3)-ss(2))
! MC flux limiter (VanLeer, 1977)
!C_f = MAX(0., MIN( 2*ratio, (1+ratio)/2., 2. ))
! ... Roe's Superbee Limiter
C_f = MAX(0., MIN(1.,2.*ratio),MIN(2.,ratio))
ENDIF
! ... Calculate fluxes
Bth4(k,l) = vel/2.*(ss(3)+ss(2))- &
& ((1.-C_f)*ABS(vel)+vel**2.*twodt1/dx*C_f)*(ss(3)-ss(2))/2.
ELSE
Bth4(k,l) = 0.0
ENDIF
! ... UD fluxes .......................................
IF (hp(k-1,l) > ZERO) THEN
! ... Velocities at time n + 1
vel = wp(k,l); IF (k == k1s) vel = 0.0;
! ... Define stencil for scalar transport
ss(2) = salpp(k ,l);
ss(3) = salpp(k-1,l);
IF (hpp(k-2,l)<=ZERO) THEN ; ss(4)=ss(3);
ELSE; ss(4)=salpp(k-2,l); ENDIF;
IF (hpp(k+1,l)<=ZERO) THEN ; ss(1)=ss(2);
ELSE; ss(1)=salpp(k+1,l); ENDIF;
! ... Define C_f for flux computations
C_f = 1.0 ! Default method is Lax-Wendroff
gamma1 = -SIGN (1., vel)
IF (ss(3) - ss(2) /= 0 ) THEN
ratio =(ss(3+gamma1)-ss(2+gamma1))/(ss(3)-ss(2))
! MC flux limiter (VanLeer, 1977)
!C_f = MAX(0., MIN( 2*ratio, (1+ratio)/2., 2. ))
! ... Roe's Superbee Limiter
C_f = MAX(0., MIN(1.,2.*ratio),MIN(2.,ratio))
ENDIF
! ... Calculate fluxes
delz = (hp(k,l) + hp(k-1,l))/2.
Bth3(k,l) = vel/2.*(ss(3)+ss(2))- &
& ((1.-C_f)*ABS(vel)+vel**2.*twodt1/delz*C_f)*(ss(3)-ss(2))/2.
ELSE
Bth3(k,l) = 0.0
ENDIF
ENDDO;
ENDDO;
! ... Update ex array with x-flux divergence
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map l- into (i,j)-indexes .........................
! i = l2i(l); j = l2j(l);
! ... Retrieve top & bottom wet sal-pts .................
kms = kmz(l)
k1s = k1z(l)
Bex(1:k1s-1,l) = 0.0
Bex(kms+1:km1,l) = 0.0
DO k = k1s, kms;
!.....Horizontal diffusion.....
hd= (Bhaxpp(k, l )*(salpp(k,lEC(l)) - salpp(k, l )) &
& -Bhaxpp(k,lWCH(l))*(salpp(k, l ) - salpp(k,lWC(l))))/dxdx &
&+(Bhaypp(k, l )*(salpp(k,lNC(l)) - salpp(k, l )) &
& -Bhaypp(k,lSCH(l))*(salpp(k, l ) - salpp(k,lSC(l))))/dydy
!.....Sum all terms
Bex(k,l) = hpp(k,l)*salpp(k,l)/twodt1 &
- (Bth2(k,l) - Bth2(k,lWCH(l))) / dx &
- (Bth4(k,l) - Bth4(k,lSCH(l))) / dy &
- (Bth3(k,l) - Bth3(k+1,l )) !+ hd * ihd
IF (ihd>0) Bex(k,l) = Bex(k,l) + hd ! Changed 12/2010 SWA
ENDDO;
ENDDO
CALL MODexsal4openbc(Bstart,Bend,Bex,thrs)
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_exsal = t_exsal + (etime - btime)
END SUBROUTINE exsal
!***********************************************************************
SUBROUTINE imsal(Bstart,Bend,Bex,heatSourceB)
!***********************************************************************
!
! Purpose: To solve for active scalar concentration.
!
! 29-may-2009 (F.J.Rueda) Include tpload (temp. load associated to rwps)
!
!-----------------------------------------------------------------------
INTEGER, INTENT(IN) :: Bstart,Bend
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bex,heatSourceB
!.....Local variables.....
REAL :: twodt1, Tsource, Qsource
INTEGER :: i, j, k, l, k1s, kms, kt, nwlayers, inn, kk, noc,liter,lol,innH
REAL, DIMENSION (1:km1) :: hn
REAL, DIMENSION (3,1:km1) :: aa
REAL, DIMENSION (1:km) :: dsal
REAL, DIMENSION (1:ndz) :: sal1
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
! ... Constants used in solution
twodt1 = twodt*tz
!.....Loop over interior sal-pts to solve for
! matrix from the active scalar equation.....
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... 3D-(i,j) indexes for l - FJR - uncomment
i = l2i(l); j = l2j(l);
!.....Compute top & bottom layer numbers & No. of layers ....
kms = kmz(l);
k1s = k1z(l);
nwlayers = kms-k1s+1
! print *,"l:",l,"kmz:",kmz(l),"k1z:",k1z(l),"Hilo:",omp_get_thread_num ( )
! ... Define layer thikness at time n - The corrections for
! surface and recently submerged cells are needed to
! keep mass conservation - The test used to check
! mass conservation is that of a surface seiche with
! an equilibrium water surface level at the level of
! where grids cells change from level k to k+1
hn(k1s+1:kms) = h(k1s+1:kms,l)
hn(k1s ) = twodt1*wp(k1s,l)+hpp(k1s,l)
IF (hpp(k1s,l)<= ZERO) THEN
hn(k1s+1) = hpp(k1s+1,l)
ENDIF
SELECT CASE (nwlayers)
!.....Calculate active scalar for case of a single layer.....
CASE (1)
! ... Use h(k1s,l) instead of hn(k1s) -
aa( 2,k1s) = hn(k1s)/twodt1
dsal( k1s) = Bex(k1s,l) + HeatSource(k1s,l)
sal(k1s,l) = dsal(k1s )/aa(2,k1s)
! ... For one-layer columns that become dry - The
! value of the threshold 1.E-5 is completely
! arbitrary but small - it is used to avoid the
! occurrence of errors in scalar conc. etimates
! arising from errors in dividing ds by aa -
! Note that the error allowed in estimating zeta
! is of O(10-6) - see SUB. SOLVER
IF (h(k1s,l) < 1.E-2) sal(k1s,l) = salpp(k1s,l)
!.....Calculate active scalar for case of two or more layers.....
CASE (2:)
!.....Form coefficient matrix [aa]
! Define upper diagonal terms
aa(3,k1s:kms-1) = -Dv(k1s+1:kms,l)/(hn(k1s:kms-1)+hn(k1s+1:kms))*2.
aa(3,kms) = 0.0
! Define lower diagonal terms
aa(1,k1s+1:kms) = -Dv(k1s+1:kms,l)/(hn(k1s:kms-1)+hn(k1s+1:kms))*2.
aa(1,k1s) = 0.0
! Define center diagonal terms
aa(2,k1s:kms) = hn(k1s:kms)/twodt1-aa(1,k1s:kms)-aa(3,k1s:kms)
!.....form r.h.s. matrix [ds].....
DO k = k1s, kms
dsal(k) = Bex(k,l) + HeatSource(k,l)
ENDDO
!.....Solve tridiagonal system for the
! vertical distribution of active scalar.....
! ....Modify matrices to take into accout mixing action of plumes
IF ( iopssH(omp_get_thread_num ( )+1) > 0) THEN
DO innH = 1, iopssH(omp_get_thread_num ( )+1)
inn = ioph2iop(innH,omp_get_thread_num ( )+1)
IF ( j /= jpss(inn) .OR. i /=ipss(inn) ) CYCLE
DO k = k1s, kms
IF (ABS(Qpss(k,inn))<1.E-10) CYCLE
Qsource = Qpss(k,inn)/(dx*dy)
Tsource = Tpss(k,inn)
dsal(k) = dsal(k)+Qsource*Tsource
ENDDO
ENDDO
ENDIF
CALL trid1 (aa, dsal, sal1, k1s, kms, km1, nwlayers)
!.....Define scalars at new time step....
sal(k1s:kms ,l) = sal1(1:nwlayers)
sal(k1 :k1s-1,l) = sal1(1 )
END SELECT
!.....End loop over scalar-pts.....
END DO
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_salin = t_salin + (etime - btime)
END SUBROUTINE imsal
!***********************************************************************
SUBROUTINE trid ( acoef, g, r, ag, ar, k1, km, km1, n )
!***********************************************************************
!
! Purpose: Tridiagonal matrix solver for the momentum equation using
! the double-sweep method
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
! 7/1/98 P.E. Smith Original f90 code
! 2/15/99 P.E. Smith Reset kmax from 20 to 60 layers
! 6/15/99 P.E. Smith Reset kmax to 200 layers
!
!-----------------------------------------------------------------------
!.....Dimensioning parameter.....
INTEGER, PARAMETER :: kmax = 500
!.....Arguments.....
INTEGER, INTENT(IN) :: k1, km, km1, n
REAL, DIMENSION(km1), INTENT(IN) :: g, r
REAL, DIMENSION(km1), INTENT(INOUT) :: ag, ar
REAL, DIMENSION(3,km1), INTENT(IN) :: acoef
!.....Local variables.....
! INTEGER, AUTOMATIC :: k, kk
! REAL, AUTOMATIC, DIMENSION(kmax) :: a, b, c, d, e, c1, d1, e1, d2, e2
!.....Local variables.....
INTEGER, AUTOMATIC :: k, kk
REAL, AUTOMATIC, DIMENSION(kmax) :: a, b, c, d, e, c1, d1, e1, d2, e2
!.....Timing.....
! REAL, EXTERNAL :: TIMER
! REAL :: btime, etime
! btime = TIMER(0.0)
n_trid = n_trid + 1
!.....Load diagonals of coefficient matrix into
! 1-d arrays and define r.h.s. vectors.....
k = 0
DO kk = k1, km
k = k + 1
a(k) = acoef(1,kk)
b(k) = acoef(2,kk)
c(k) = acoef(3,kk)
d(k) = g(kk)
e(k) = r(kk)
END DO
!.....Forward sweep--transform coefficient
! matrix into upper bidiagonal form.....
c1(1) = c(1)/b(1)
d1(1) = d(1)/b(1)
e1(1) = e(1)/b(1)
DO k = 2, n
c1(k) = c(k)/(b(k) - a(k)*c1(k-1))
d1(k) = (d(k) - a(k)*d1(k-1))/(b(k) - a(k)*c1(k-1))
e1(k) = (e(k) - a(k)*e1(k-1))/(b(k) - a(k)*c1(k-1))
END DO
!.....Backward sweep--transform coefficient
! matrix into diagonal form.....
d2(n) = d1(n)
e2(n) = e1(n)
DO k = n-1, 1, -1
d2(k) = d1(k) - c1(k)*d2(k+1)
e2(k) = e1(k) - c1(k)*e2(k+1)
END DO
!.....Load r.h.s. solution vectors into [ag] and [ar]
! arrays for passing back to the calling program.....
k = 0
DO kk = k1, km
k = k + 1
ag(kk) = d2(k)
ar(kk) = e2(k)
END DO
END SUBROUTINE trid
!***********************************************************************
SUBROUTINE trid1 ( acoef, dsal, sal, k1, km, km1, n )
!***********************************************************************
!
! Purpose: Tridiagonal matrix solver for the salinity equation using
! the double-sweep method
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
! 7/1/98 P.E. Smith Original f90 code
! 5/1/99 P.E. Smith Reset kmax from 20 to 60 layers
! 6/15/99 P.E. Smith Reset kmax to 200 layers
!
!-----------------------------------------------------------------------
!.....Dimensioning parameter.....
INTEGER, PARAMETER :: kmax = 500
!.....Arguments.....
INTEGER, INTENT(IN) :: k1, km, km1, n
REAL, DIMENSION(km), INTENT(INOUT) :: dsal
REAL, DIMENSION(n), INTENT(INOUT) :: sal
REAL, DIMENSION(3,km1), INTENT(IN) :: acoef
!.....Local variables.....
INTEGER :: k, kk
REAL, DIMENSION(kmax) :: a, b, c, d, e, f
! Note: n = number of 3-d layers (also number of unknowns)
!.....Load diagonals of coefficient matrix into
! 1-d arrays and rename r.h.s. vector.....
k = 0
DO kk = k1, km
k = k + 1
a(k) = acoef(1,kk)
b(k) = acoef(2,kk)
c(k) = acoef(3,kk)
d(k) = dsal(kk)
END DO
!.....Initialize for forward sweep.....
e(1) = -c(1)/b(1)
f(1) = d(1)/b(1)
!.....Forward sweep (solve for e and f vectors).....
IF(n == 2) GO TO 1
DO k = 2, n-1
e(k) = -c(k)/(b(k)+a(k)*e(k-1))
f(k) = (d(k)-a(k)*f(k-1))/(b(k)+a(k)*e(k-1))
END DO
!.....Compute salinity in bottom layer.....
1 sal(n) = (d(n)-a(n)*f(n-1))/(b(n)+a(n)*e(n-1))
!.....Backward sweep (solve for salinity vector).....
DO k = n-1, 1, -1
sal(k) = e(k)*sal(k+1) + f(k)
END DO
END SUBROUTINE trid1
!***********************************************************************
SUBROUTINE smooth
!***********************************************************************
!
! Purpose: To smooth the solution from the leapfrog step with the
! Asselin time filter (Mon. Weather Rev., v. 100, 1972,
! p. 487-490). Smoothing is only performed if ismooth>=1.
! The degree of smoothing is determined from the parameter
! beta. Beta=0.05 is recommended. Values as high as 1.0
! can be used. The choices for ismooth are:
! If ismooth = 0 --> no smoothing
! If ismooth = 1 --> smooth zeta and velocity
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!-----------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i, j, k, l, kmx, kmy, kms, k1s, k1x, k1y,liter
REAL :: wght, wghtpp, scC, scCpp
!.....Smooth zeta (and then recalculate hp, hup, and hvp).....
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! i = l2i(l); j = l2j(l);
sp(l) = sp(l) + (beta2)*(s(l)-2.*sp(l)+spp(l))
ENDDO
!$omp barrier
CALL layer_hp3
!.....Smooth horizontal velocity components.....
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map l- into (i,j)-indexes
! i = l2i(l); j = l2j(l);
! ... At u-points
IF(mask(c2l(lEC(l)))) THEN
kmx = MIN(kmz(l),kmz(lEC(l)))
DO k = k1, kmx
IF(hup(k,l)<=0.0) CYCLE
uhp(k,l)= uhp(k,l)+beta2*(uh(k,l)-2.*uhp(k,l)+uhpp(k,l))
up (k,l)= uhp(k,l)/hup(k,l)
ENDDO
ENDIF
! ... At v-points
IF(mask(c2l(lNC(l)))) THEN
kmy = MIN(kmz(l),kmz(lNC(l)))
DO k = k1, kmy
IF(hvp(k,l)<=0.0) CYCLE
vhp(k,l)= vhp(k,l)+beta2*(vh(k,l)-2.*vhp(k,l)+vhpp(k,l))
vp (k,l)= vhp(k,l)/hvp(k,l)
ENDDO
ENDIF
ENDDO
!$omp barrier
!.....No need to recalculate vertical velocity components
! since these values should be stored in wpp either in save or
! in settrap, which are not used in any computations -
END SUBROUTINE smooth
!***********************************************************************
SUBROUTINE settrap
!***********************************************************************
!
! Purpose: To setup the arrays at the n and n+1/2 time levels
! for use in the first iteration of the trapezoidal step.
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i, j, k, l, kmx, kmy, kms, k1x, k1y, k1s,liter,no,ide_t,is,ie,js,je,nn
REAL :: uutemp, vvtemp, wght, wghtpp, scC, scCpp
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
ide_t = omp_get_thread_num ( )+1
spp(lm1)=sp(lm1)
hpp(:,lm1)=hp(:,lm1)
hupp(:,lm1)=hup(:,lm1)
hvpp(:,lm1)=hvp(:,lm1)
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
!....Zeta array.....
spp(l) = sp(l)
sp(l) = 0.5*(s(l) + spp(l))
! ... Save layer thickness at time n
hpp(:,l) = hp(:,l);
hupp(:,l) = hup(:,l);
hvpp(:,l) = hvp(:,l);
END DO
!$omp barrier
! ... Define layer thickness at time n+1/2 &
! recompute top layer index
CALL layer_hp2
CALL TopLayerIndexp2
! ... Define variable values at time n+1/2.
! Only define values at cells that at n+1/2
! are wett.
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map 3D-(i,j) from 2D-l indexes
i = l2i(l); j = l2j(l);
! ... At s-points
kms = kmz(l)
DO k = k1, kms
salpp(k,l) = salp(k,l);
salp (k,l)=(sal(k,l)+salpp(k,l))/2.
rhop (k,l)=densty_s(salp(k,l),t0)-1000.
ENDDO
! ... At u-points
IF (mask2d(i+1,j)) THEN
kmx = MIN(kmz(l),kmz(lEC(l)))
DO k = k1, kmx
uhpp(k,l) = uhp(k,l)
upp (k,l) = up(k,l)
IF (hup(k,l)>ZERO) THEN
uhp (k,l) = 0.5*(uh(k,l) + uhpp(k,l))
up (k,l) = uhp(k,l)/hup(k,l)
ELSE
uhp (k,l) = 0.0
up (k,l) = 0.0
ENDIF
ENDDO
! ... Redo near surface flux calcs. at n+1/2
k = k1u(l);
! a. Wetting occurs from n+1/2 to n+1
IF (hu (k-1,l) > ZERO) THEN
uhp(k,l) = uhp(k,l)+uh(k-1,l)/2.
up (k,l) = uhp(k,l) / hup(k,l)
ENDIF
! b. Drying occurs from n to n+1/2
IF (hupp(k-1,l) > ZERO) THEN
uhp (k ,l) = uhp (k,l)+uhpp(k-1,l)/2.
up (k ,l) = uhp(k,l) / hup(k,l)
ENDIF
ENDIF
! ... At v-points
IF (mask2d(i,j+1)) THEN
kmy = MIN(kmz(l),kmz(lNC(l)))
DO k = k1, kmy
vhpp(k,l) = vhp(k,l)
vpp (k,l) = vp(k,l)
IF (hvp(k,l)>ZERO) THEN
vhp (k,l) = 0.5*(vh(k,l) + vhpp(k,l))
vp (k,l) = vhp(k,l)/hvp(k,l)
ELSE
vhp (k,l) = 0.0
vp (k,l) = 0.0
ENDIF
ENDDO
! ... Redo near surface flux calcs. at n+1/2
k = k1v(l);
! a. Wetting occurs from n+1/2 to n+1
IF (hv (k-1,l) > ZERO) THEN
vhp(k,l) = vhp(k,l)+vh(k-1,l)/2.
vp (k,l) = vhp(k,l) / hvp(k,l)
ENDIF
! b. Drying occurs from n to n+1/2
IF (hvpp(k-1,l) > ZERO) THEN
vhp (k,l) = vhp (k,l)+vhpp(k-1,l)/2.
vp (k,l) = vhp (k,l) / hvp (k,l)
ENDIF
ENDIF
ENDDO
!$omp barrier
!.....Recalculate vertical velocity at n+1/2 - used in
! computing horizontal fluxes at n+1
CALL continuity(2)
! ... Save bndry. variables from n-1 into n
IF (nopenH(ide_t) > 0) THEN
do nn=1,nopenH(ide_t)
no = noh2no(nn,ide_t)
SELECT CASE ( iside(no) )
! West boundary
CASE (1)
i = isbcHH(no,ide_t); js = jsbcH(no,ide_t); je = jebcH(no,ide_t)
DO j = js, je
uhEBpp(:,j) = uhEBp(:,j); huEBpp(:,j) = huEBp(:,j);
uhWBpp(:,j) = uhWBp(:,j); huWBpp(:,j) = huWBp(:,j);
uhEBp(:,j) = (uhEBpp(:,j) + uhEB(:,j))/2.; huEBp(:,j) = (huEBp(:,j)+huEB(:,j))/2.;
uhWBp(:,j) = (uhWBpp(:,j) + uhWB(:,j))/2.; huWBp(:,j) = (huWBp(:,j)+huWB(:,j))/2.;
END DO
! North boundary
CASE (2)
j = jsbcH(no,ide_t); is = isbcHH(no,ide_t); ie = iebcHH(no,ide_t)
DO i = is, ie
vhNBpp(:,i) = vhNBp(:,i); hvNBpp(:,i) = hvNBp(:,i);
vhSBpp(:,i) = vhSBp(:,i); hvSBpp(:,i) = hvSBp(:,i);
vhNBp(:,i) = (vhNBpp(:,i) + vhNB(:,i))/2.; hvNBp(:,i) = (hvNBp(:,i)+hvNB(:,i))/2.;
vhSBp(:,i) = (vhSBpp(:,i) + vhSB(:,i))/2.; hvSBp(:,i) = (hvSBp(:,i)+hvSB(:,i))/2.;
END DO
! East boundary
CASE (3)
i = isbcHH(no,ide_t); js = jsbcH(no,ide_t); je = jebcH(no,ide_t)
DO j = js, je
uhEBpp(:,j) = uhEBp(:,j); huEBpp(:,j) = huEBp(:,j);
uhWBpp(:,j) = uhWBp(:,j); huWBpp(:,j) = huWBp(:,j);
uhEBp(:,j) = (uhEBpp(:,j) + uhEB(:,j))/2.; huEBp(:,j) = (huEBp(:,j)+huEB(:,j))/2.;
uhWBp(:,j) = (uhWBpp(:,j) + uhWB(:,j))/2.; huWBp(:,j) = (huWBp(:,j)+huWB(:,j))/2.;
END DO
! South boundary
CASE (4)
j = jsbcH(no,ide_t); is = isbcHH(no,ide_t); ie = iebcHH(no,ide_t)
DO i = is, ie
vhNBpp(:,i) = vhNBp(:,i); hvNBpp(:,i) = hvNBp(:,i);
vhSBpp(:,i) = vhSBp(:,i); hvSBpp(:,i) = hvSBp(:,i);
vhNBp(:,i) = (vhNBpp(:,i) + vhNB(:,i))/2.; hvNBp(:,i) = (hvNBp(:,i)+hvNB(:,i))/2.;
vhSBp(:,i) = (vhSBpp(:,i) + vhSB(:,i))/2.; hvSBp(:,i) = (hvSBp(:,i)+hvSB(:,i))/2.;
END DO
END SELECT
end do
end if
! ... Work with turbulence quantities (TurbModel)
IF (iturb>0) CALL settrap_2EqTVars
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_settrap = t_settrap + (etime - btime)
END SUBROUTINE settrap
!***********************************************************************
SUBROUTINE save(istep)
!***********************************************************************
!
! Purpose: Save solution for next time step
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
INTEGER,INTENT(IN) :: istep
!.....Local variables.....
INTEGER :: i, j, k, l, kms, k1s,liter,nn,no,js,je,is,ie,ide_t
REAL :: uutemp, vvtemp
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
ide_t = omp_get_thread_num ( )+1
SELECT CASE (istep)
! -----After a trapezoidal step (istep=2)-----
CASE (2)
sp(lm1)=s(lm1)
hp(:,lm1)=h(:,lm1)
hup(:,lm1)=hu(:,lm1)
hvp(:,lm1)=hv(:,lm1)
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
!.....Save zeta.....
sp(l) = s(l)
! ... Save layer thicknesses (h is calculated from s in solver)
hp(:,l) = h(:,l) ;
hup(:,l) = hu(:,l);
hvp(:,l) = hv(:,l);
end do
! ... Retrieve index for surface layer for next step
CALL TopLayerIndexp2
! ... Save state variables
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map 2D-l into 3D-(i,j) indexes
i = l2i(l); j = l2j(l)
DO k = k1, km;
! ... At s-points
salp (k,l) = sal(k,l)
rhop (k,l) = densty_s ( salp(k,l), t0 ) - 1000.
ENDDO
DO k = k1, km;
! ... At u-points
IF (hup(k,l)>ZERO)THEN
uhp (k,l) = uh (k,l) ! For horiz. scalar & momentum advection
up (k,l) = uhp(k,l)/hup(k,l) ! For horiz. momentum advection
u (k,l) = up (k,l) ! For output purposes
ELSE
uhp (k,l) = 0.0
up (k,l) = 0.0
u (k,l) = 0.0
ENDIF
ENDDO
DO k = k1, km;
! ... At v-points
IF (hvp(k,l)>ZERO)THEN
vhp (k,l) = vh (k,l) ! For horiz. scalar & momentum advection
vp (k,l) = vhp(k,l)/hvp(k,l) ! For horiz. momentum advection
v (k,l) = vp (k,l) ! For output purposes
ELSE
vhp (k,l) = 0.0
vp (k,l) = 0.0
v (k,l) = 0.0
ENDIF
ENDDO
END DO;
!$omp barrier
!.....Recalculate vertical velocity wp to be used
! in calcuation of velocities at next time step
CALL continuity(2)
! ... Save bndry. variables from n-1 into n
IF (nopenH(ide_t) > 0) THEN
do nn=1,nopenH(ide_t)
no = noh2no(nn,ide_t)
SELECT CASE ( iside(no) )
! West boundary
CASE (1)
i = isbcHH(no,ide_t); js = jsbcH(no,ide_t); je = jebcH(no,ide_t)
DO j = js, je
uhEBp(:,j) = uhEB(:,j) ; huEBp(:,j) = huEB(:,j) ;
uhWBp(:,j) = uhWB(:,j) ; huWBp(:,j) = huWB(:,j) ;
END DO
! North boundary
CASE (2)
j = jsbcH(no,ide_t); is = isbcHH(no,ide_t); ie = iebcHH(no,ide_t)
DO i = is, ie
vhNBp(:,i) = vhNB(:,i) ; hvNBp(:,i) = hvNB(:,i) ;
vhSBp(:,i) = vhSB(:,i) ; hvSBp(:,i) = hvSB(:,i) ;
END DO
! East boundary
CASE (3)
i = isbcHH(no,ide_t); js = jsbcH(no,ide_t); je = jebcH(no,ide_t)
DO j = js, je
uhEBp(:,j) = uhEB(:,j) ; huEBp(:,j) = huEB(:,j) ;
uhWBp(:,j) = uhWB(:,j) ; huWBp(:,j) = huWB(:,j) ;
END DO
! South boundary
CASE (4)
j = jsbcH(no,ide_t); is = isbcHH(no,ide_t); ie = iebcHH(no,ide_t)
DO i = is, ie
vhNBp(:,i) = vhNB(:,i) ; hvNBp(:,i) = hvNB(:,i) ;
vhSBp(:,i) = vhSB(:,i) ; hvSBp(:,i) = hvSB(:,i) ;
END DO
END SELECT
end do
end if
! ... Save Turbulence variables
IF (iturb>0) CALL save_2EqTVars(istep)
! ... Save tracers
IF (ntr > 0) THEN
tracerpp = tracer;
ENDIF
! -----After a leapfrog step (istep=1)-----
CASE (1)
sp(lm1)=s(lm1)
spp(lm1)=spp(lm1)
hp(:,lm1)=h(:,lm1)
hup(:,lm1)=hu(:,lm1)
hvp(:,lm1)=hv(:,lm1)
hpp(:,lm1)=hp(:,lm1)
hupp(:,lm1)=hup(:,lm1)
hvpp(:,lm1)=hvp(:,lm1)
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
!.....Save zeta.....
spp(l) = sp(l)
sp(l) = s(l)
!.....Save layer thickness at time n
hpp(:,l) = hp(:,l)
hupp(:,l) = hup(:,l)
hvpp(:,l) = hvp(:,l)
! ... Save layer thickness at time n+1
hp(:,l) = h(:,l)
hup(:,l) = hu(:,l)
hvp(:,l) = hv(:,l)
end do
!$omp barrier
! ... Retrieve index for surface layer for next step
CALL TopLayerIndexp2
! .... Save other variables
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Maks l- into (i,j)-indexes
! i = l2i(l); j = l2j(l);
DO k = k1, km;
! ... At s-points
salpp(k,l) = salp(k,l)
salp (k,l) = sal (k,l);
rhop (k,l) = densty_s ( salp(k,l), t0 ) - 1000.
ENDDO
DO k = k1, km;
! ... At u- and v- points
uhpp(k,l) = uhp(k,l)
vhpp(k,l) = vhp(k,l)
upp (k,l) = up (k,l)
vpp (k,l) = vp (k,l)
ENDDO
DO k = k1, km;
! ... At u-points
IF (hup(k,l)>ZERO)THEN
uhp (k,l) = uh (k,l)
up (k,l) = uhp(k,l)/hup(k,l)
u (k,l) = up (k,l) ! For output purposes
ELSE
uhp (k,l) = 0.0
up (k,l) = 0.0
u (k,l) = 0.0
ENDIF
ENDDO
DO k = k1, km;
! ... At v-points
IF (hvp(k,l)>ZERO)THEN
vhp (k,l) = vh (k,l)
vp (k,l) = vhp(k,l)/hvp(k,l)
v (k,l) = vp (k,l) ! For output purposes
ELSE
vhp (k,l) = 0.0
vp (k,l) = 0.0
v (k,l) = 0.0
ENDIF
END DO
END DO
!$omp barrier
!.....Recalculate vertical velocity wp to be used
! in calcuation of velocities at next time step
CALL continuity(2)
! ... Save bndry. variables
IF (nopenH(ide_t) > 0) THEN
do nn=1,nopenH(ide_t)
no = noh2no(nn,ide_t)
SELECT CASE ( iside(no) )
! West boundary
CASE (1)
i = isbcHH(no,ide_t); js = jsbcH(no,ide_t); je = jebcH(no,ide_t)
DO j = js, je
uhEBpp(:,j) = uhEBp(:,j); huEBpp(:,j) = huEBp(:,j);
uhWBpp(:,j) = uhWBp(:,j); huWBpp(:,j) = huWBp(:,j);
uhEBp(:,j) = uhEB(:,j) ; huEBp(:,j) = huEB(:,j) ;
uhWBp(:,j) = uhWB(:,j) ; huWBp(:,j) = huWB(:,j) ;
END DO
! North boundary
CASE (2)
j = jsbcH(no,ide_t); is = isbcHH(no,ide_t); ie = iebcHH(no,ide_t)
DO i = is, ie
vhNBpp(:,i) = vhNBp(:,i); hvNBpp(:,i) = hvNBp(:,i);
vhSBpp(:,i) = vhSBp(:,i); hvSBpp(:,i) = hvSBp(:,i);
vhNBp(:,i) = vhNB(:,i) ; hvNBp(:,i) = hvNB(:,i) ;
vhSBp(:,i) = vhSB(:,i) ; hvSBp(:,i) = hvSB(:,i) ;
END DO
! East boundary
CASE (3)
i = isbcHH(no,ide_t); js = jsbcH(no,ide_t); je = jebcH(no,ide_t)
DO j = js, je
uhEBpp(:,j) = uhEBp(:,j); huEBpp(:,j) = huEBp(:,j);
uhWBpp(:,j) = uhWBp(:,j); huWBpp(:,j) = huWBp(:,j);
uhEBp(:,j) = uhEB(:,j) ; huEBp(:,j) = huEB(:,j) ;
uhWBp(:,j) = uhWB(:,j) ; huWBp(:,j) = huWB(:,j) ;
END DO
! South boundary
CASE (4)
j = jsbcH(no,ide_t); is = isbcHH(no,ide_t); ie = iebcHH(no,ide_t)
DO i = is, ie
vhNBpp(:,i) = vhNBp(:,i); hvNBpp(:,i) = hvNBp(:,i);
vhSBpp(:,i) = vhSBp(:,i); hvSBpp(:,i) = hvSBp(:,i);
vhNBp(:,i) = vhNB(:,i) ; hvNBp(:,i) = hvNB(:,i) ;
vhSBp(:,i) = vhSB(:,i) ; hvSBp(:,i) = hvSB(:,i) ;
END DO
END SELECT
end do
end if
! ... Save Turbulence variables
IF (iturb>0) CALL save_2EqTVars(istep)
! ... Save tracers
IF (ntr > 0) THEN
tracerpp = tracer;
ENDIF
CASE DEFAULT
PRINT *, "Invalid value of ISTEP in subroutine save"
END SELECT
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_save = t_save + (etime - btime)
END SUBROUTINE save
!***********************************************************************
SUBROUTINE settrap2
!***********************************************************************
!
! Purpose: To setup the arrays at the n+1/2 time level for use in
! the second and subsequent iterations of the trapezoidal
! step. Do not use smoothing as in the version of the code
! by P.E. Smith.
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
!.....Local variables.....
INTEGER :: i,j,k,l,kms,kmy,kmx,lol,liter,aux_indice
REAL :: wght, wghtpp
!....Zeta array.....
sp(lm1)=0.5*(s(lm1)+spp(lm1))
IF(omp_get_thread_num ( )+1 == num_threads)THEN
aux_indice=lhf(omp_get_thread_num ( )+1)
ELSE
aux_indice=lhfE(omp_get_thread_num ( )+1)
END IF
DO liter = lhi(omp_get_thread_num ( )+1),aux_indice
l = id_column(liter)
!....Zeta array.....
sp(l) = 0.5*(s(l) + spp(l))
end do
! ...Define layers thickness at time n+1/2
CALL layer_hp2
CALL TopLayerIndexp2
! ... 3d arrays
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map 3D-(i,j) from 2D-l indexes
i = l2i(l); j = l2j(l);
! ... At s-points
kms = kmz(l)
DO k = k1, kms
salp (k,l)= (sal(k,l)+salpp(k,l))/2.
rhop (k,l)=densty_s(salp(k,l),t0)-1000.
ENDDO
! ... At u-points
IF (mask2d(i+1,j)) THEN
kmx = MIN(kmz(l),kmz(lEC(l)))
DO k = k1, kmx
IF (hup(k,l)>ZERO) THEN
uhp (k,l) = 0.5*(uh(k,l) + uhpp(k,l))
up (k,l) = uhp(k,l)/hup(k,l)
ELSE
uhp (k,l) = 0.0
up (k,l) = 0.0
ENDIF
ENDDO
! ... Redo near surface flux calcs.
k = k1u(l);
! a. Wetting occurs from n+1/2 to n+1
IF (hu (k-1,l) > ZERO) THEN
uhp(k,l) = uhp(k,l)+uh(k-1,l)/2.
up (k,l) = uhp(k,l) / hup(k,l)
ENDIF
! b. Drying occurs from n to n+1/2
IF (hupp(k-1,l) > ZERO) THEN
uhp (k,l) = uhp (k,l)+uhpp(k-1,l)/2.
up (k,l) = uhp(k,l) / hup(k,l)
ENDIF
ENDIF
! ... At v-points
IF (mask2d(i,j+1)) THEN
kmy = MIN(kmz(l),kmz(lNC(l)))
DO k = k1, kmy
IF (hvp(k,l)>ZERO) THEN
vhp (k,l) = 0.5*(vh(k,l) + vhpp(k,l))
vp (k,l) = vhp(k,l)/hvp(k,l)
ELSE
vhp (k,l) = 0.0
vp (k,l) = 0.0
ENDIF
ENDDO
! ... Redo near surface flux calcs.
k = k1v(l);
! a. Wetting occurs from n+1/2 to n+1
IF (hv (k-1,l) > ZERO) THEN
vhp(k,l) = vhp(k,l)+ vh(k-1,l)/2.
vp (k,l) = vhp(k,l) / hvp(k,l)
ENDIF
! b. Drying occurs from n to n+1/2
IF (hvpp(k-1,l) > ZERO) THEN
vhp (k,l) = vhp (k,l)+vhpp(k-1,l)/2.
vp (k,l) = vhp (k,l) / hvp (k,l)
ENDIF
ENDIF
ENDDO
!$omp barrier
CALL continuity(2)
!.....Recalculate vertical velocity components to be used in
! calculating horizontal velocity at next iteration
! DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
! l = id_column(liter)
! i = l2i(l); j = l2j(l);
! kms = kmz(l);
! wp(kms+1,l) = 0.0
! DO k = kms,k1,-1
! wp(k,l) = wp(k+1,l)-(uhp(k,l)-uhp(k,lWC(l)))/dx &
! & -(vhp(k,l)-vhp(k,lSC(l)))/dy
! END DO
! END DO
! ... Work with turbulence quantities (TurbModel)
IF (iturb>0) CALL settrap2_2EqTVars
END SUBROUTINE settrap2
!***********************************************************************
SUBROUTINE exTracer (nt,Bstart,Bend,Bhaxpp,Bhaypp,Bth3,Bth4,Bth2,lSCH,lNCH,lECH,lWCH,Bex,thrs)
!***********************************************************************
!
! Purpose: To solve transport equation for tracer, using Flux-limiters.
! nt denotes tracer number to be solved
!
!-----------------------------------------------------------------------
! ... Arguments
INTEGER, INTENT (IN) :: nt
REAL, INTENT(IN) :: thrs
INTEGER, INTENT(IN) :: Bstart,Bend
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bhaxpp, Bhaypp,Bth3,Bth4,Bth2,Bex
INTEGER, DIMENSION (Bstart:Bend+1), INTENT(IN) :: lSCH,lNCH,lWCH,lECH
! ... Local variables
INTEGER :: i, j, k, l, k1s, kms, gamma1, istat,liter
REAL :: vel, ratio, C_f, delz, twodt1, hd
REAL, DIMENSION (4 ) :: ss
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
! ... Constants used in solution
twodt1 = twodt*tz
! ... Calculate hdxpp & hdypp arrays for diffusion terms &
Bhaxpp(:,Bend+1) = 0.0; Bhaypp(:,Bend+1) = 0.0;
DO liter = lhiW(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
If(liter == 0) CYCLE
l = id_column(liter)
! ... 3D-(i,j) indexes for l
! i = l2i(l); j = l2j(l);
! ... Retrieve top & bottom wet sal-pts .................
kms = kmz(l)
k1s = k1z(l)
! ... Calculate hdxpp & hdypp array at u-&v- pts ........
DO k = k1, kms
Bhaxpp(k,l) = Ax0*hupp(k,l)
Bhaypp(k,l) = Ay0*hvpp(k,l)
ENDDO
END DO
Bth3(:,Bend+1) = 0.0
! ... Initialize ex & flux arrays to zeros
Bex(:,Bend+1) = 0.0; Bth2(:,Bend+1)= 0.0; Bth4(:,Bend+1) = 0.0;
DO liter = lhiW(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
If(liter == 0) CYCLE
l = id_column(liter)
! ... Map l- into (i,j)-indexes .........................
! i = l2i(l); j = l2j(l);
! ... Retrieve top & bottom wet sal-pts .................
kms = kmz(l)
k1s = k1z(l)
DO k = k1s, kms;
! ... EW fluxes .......................................
IF (hup(k,l)> ZERO) THEN
! ... Velocities at time n+1/2
vel = (uhpp(k,l) + uh(k,l))/2.
! ... Define stencil for scalar transport
ss(2) = tracerpp(k, l ,nt);
ss(3) = tracerpp(k,lEC(l),nt);
IF (hpp(k,lWC(l))<=ZERO)THEN; ss(1)=ss(2);
ELSE; ss(1)=tracerpp(k,lWC(l),nt); ENDIF
IF (hpp(k,lEC(lEC(l)))<=ZERO)THEN; ss(4)=ss(3);
ELSE; ss(4)=tracerpp(k,lEC(lEC(l)),nt); ENDIF
! ... Calculate Cf factor to use in flux calculations
gamma1 = -SIGN (1., vel)
C_f = 0.0;
IF (ss(3) - ss(2) /= 0 ) THEN
ratio =(ss(3+gamma1)-ss(2+gamma1))/(ss(3)-ss(2))
! MC flux limiter (VanLeer, 1977)
!C_f = MAX(0., MIN( 2*ratio, (1+ratio)/2., 2. ))
! ... Roe's Superbee Limiter
C_f = MAX(0., MIN(1.,2.*ratio),MIN(2.,ratio))
ENDIF
! ... Calculate fluxes at x-faces
Bth2(k,l) = vel/2.*(ss(3)+ss(2))- &
& ((1.-C_f)*ABS(vel)+vel**2.*twodt1/dx*C_f)*(ss(3)-ss(2))/2.
ELSE
Bth2(k,l) = 0.0
ENDIF
ENDDO;
ENDDO;
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map l- into (i,j)-indexes .........................
! i = l2i(l); j = l2j(l);
! ... Retrieve top & bottom wet sal-pts .................
kms = kmz(l)
k1s = k1z(l)
DO k = k1s, kms;
! ... NS fluxes .......................................
IF (hvp(k,l)> ZERO) THEN
! ... Velocities at time n+1/2
vel = (vhpp(k,l) + vh(k,l))/2.
! ... Define stencil for scalar transport
ss(2) = tracerpp(k, l ,nt );
ss(3) = tracerpp(k, lNC(l) ,nt );
IF (hpp(k,lSC(l))<=ZERO)THEN; ss(1)=ss(2);
ELSE; ss(1)=tracerpp(k,lSC(l),nt); ENDIF
IF (hpp(k,lNC(lNC(l)))<=ZERO)THEN; ss(4)=ss(3);
ELSE; ss(4)=tracerpp(k,lNC(lNC(l)),nt); ENDIF
! ... Calculate Cf factor to use in flux calculations
C_f = 0.0; ! Default value is for upwinding
gamma1 = -SIGN (1., vel)
IF (ss(3) - ss(2) /= 0 ) THEN
ratio =(ss(3+gamma1)-ss(2+gamma1))/(ss(3)-ss(2))
! MC flux limiter (VanLeer, 1977)
!C_f = MAX(0., MIN( 2*ratio, (1+ratio)/2., 2. ))
! ... Roe's Superbee Limiter
C_f = MAX(0., MIN(1.,2.*ratio),MIN(2.,ratio))
ENDIF
! ... Calculate fluxes at y-faces
Bth4(k,l) = vel/2.*(ss(3)+ss(2))- &
& ((1.-C_f)*ABS(vel)+vel**2.*twodt1/dx*C_f)*(ss(3)-ss(2))/2.
ELSE
Bth4(k,l) = 0.0
ENDIF
! ... UD fluxes .......................................
IF (hp(k-1,l)> ZERO) THEN
! ... Velocities at time n+1/2 (include settling velocity)
vel = wp(k,l) ; IF (k == k1s) vel = 0.0;
! ... Define stencil for scalar transport
ss(2) = tracerpp(k,l,nt);
ss(3) = tracerpp(k-1,l,nt)
IF(hpp(k-2,l)<=ZERO)THEN;ss(4)=ss(3);
ELSE;ss(4)=tracerpp(k-2,l,nt);ENDIF
IF(hpp(k+1,l)<=ZERO)THEN;ss(1)=ss(2);
ELSE;ss(1)=tracerpp(k+1,l,nt);ENDIF;
! ... Calculate ratio of slope of solution across interfaces &
! estimate flux limiter
C_f = 1.0 ! Default method is Lax-Wendroff
gamma1 = -SIGN (1., vel)
IF (ss(3) - ss(2) /= 0 ) THEN
ratio =(ss(3+gamma1)-ss(2+gamma1))/(ss(3)-ss(2))
! MC flux limiter (VanLeer, 1977)
! C_f = MAX(0., MIN( 2*ratio, (1+ratio)/2., 2. ))
! ... Roe's Superbee Limiter
C_f = MAX(0., MIN(1.,2.*ratio),MIN(2.,ratio))
ENDIF
! ... Calculate fluxes at z-faces
delz = (hp(k,l) + hp(k-1,l))/2.
Bth3(k,l) = vel/2.*(ss(3)+ss(2))- &
& ((1.-C_f)*ABS(vel)+vel**2.*twodt1/delz*C_f)*(ss(3)-ss(2))/2.
ELSE
Bth3(k,l) = 0.0
ENDIF
ENDDO;
ENDDO;
! ... Update ex array with divergence of advective fluxes & diffusion
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... Map l- into (i,j)-indexes .........................
! i = l2i(l); j = l2j(l);
! ... Retrieve top & bottom wet sal-pts .................
kms = kmz(l)
k1s = k1z(l)
DO k = k1s, kms;
!.....Horizontal diffusion.....
hd= (Bhaxpp(k, l )*(tracerpp(k,lEC(l),nt) - tracerpp(k, l ,nt)) &
& -Bhaxpp(k,lWCH(l))*(tracerpp(k, l ,nt) - tracerpp(k,lWC(l),nt)))/dxdx &
&+(Bhaypp(k, l )*(tracerpp(k,lNC(l),nt) - tracerpp(k, l ,nt)) &
& -Bhaypp(k,lSCH(l))*(tracerpp(k, l ,nt) - tracerpp(k,lSC(l),nt)))/dydy
Bex(k,l) = hpp(k,l)*tracerpp(k,l,nt)/twodt1 &
- (Bth2(k,l) - Bth2(k,lWCH(l))) / dx &
- (Bth4(k,l) - Bth4(k,lSCH(l))) / dy &
- (Bth3(k,l) - Bth3(k+1,l )) !+ hd * ihd
IF (ihd>0) Bex(k,l) = Bex(k,l) + hd ! Changed 12/2010 SWA
ENDDO;
ENDDO
! ... Modify explicit term to account for flow boundary conditions
CALL MODextracer4openbc (nt,Bstart,Bend,Bex,thrs)
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_exsal = t_exsal + (etime - btime)
END SUBROUTINE ExTracer
!***********************************************************************
SUBROUTINE ImTracer (nt,Bstart,Bend,Bex)
!***********************************************************************
!
! Purpose: To solve for active scalar concentration.
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
! ... Arguments
INTEGER, INTENT (IN) :: nt
INTEGER, INTENT(IN) :: Bstart,Bend
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bex
!.....Local variables.....
REAL :: twodt1, Osource, Qsource
INTEGER :: i, j, k, l, k1s, kms, kt, nwlayers, inn, kk, noc,liter,innH
REAL, DIMENSION (1:km1) :: hn
REAL, DIMENSION (3,1:km1) :: aa
REAL, DIMENSION (1:km) :: ds
REAL, DIMENSION (1:ndz) :: sal1
!.....Timing.....
REAL, EXTERNAL :: TIMER
REAL :: btime, etime
btime = TIMER(0.0)
! ... Constants used in solution
twodt1 = twodt*tz
!.....Loop over interior sal-pts to solve for
! matrix from the active scalar equation.....
DO liter = lhi(omp_get_thread_num ( )+1), lhf(omp_get_thread_num ( )+1)
l = id_column(liter)
! ... 3D-(i,j) indexes for l - FJR - uncomment
i = l2i(l); j = l2j(l);
!.....Compute top & bottom layer numbers & No. of layers ....
kms = kmz(l)
k1s = k1z(l)
nwlayers = (kms-k1s)+1
! ... Define layer thikness at time n - The corrections for
! surface and recently submerged cells are needed to
! keep mass conservation
hn(k1s+1:kms) = h(k1s+1:kms,l)
hn(k1s ) = twodt1*wp(k1s,l)+hpp(k1s,l)
IF (hpp(k1s,l)<= ZERO) THEN
hn(k1s+1) = hpp(k1s+1,l)
ENDIF
!.....Calculate active scalar for case of a single layer.....
SELECT CASE (nwlayers)
CASE (1)
aa( 2,k1s) = hn(k1s)/twodt1
ds( k1s) = Bex(k1s,l)
tracer(k1s,l,nt) = ds(k1s )/aa(2,k1s)
!.....Calculate active scalar for case of two or more layers.....
CASE (2:)
!.....Form coefficient matrix [aa]
! Define upper diagonal terms
aa(3,k1s:kms-1) = -Dv(k1s+1:kms,l)/(hn(k1s:kms-1)+hn(k1s+1:kms))*2.
aa(3,kms) = 0.0
! Define lower diagonal terms
aa(1,k1s+1:kms) = -Dv(k1s+1:kms,l)/(hn(k1s:kms-1)+hn(k1s+1:kms))*2.
aa(1,k1s) = 0.0
! Define center diagonal terms
aa(2,k1s:kms) = hn(k1s:kms)/twodt1-aa(1,k1s:kms)-aa(3,k1s:kms)
!.....form r.h.s. matrix [ds].....
DO k = k1s, kms
ds(k) = Bex(k,l)
ENDDO
! ... Modify transport eqs. to accont for sources & sinks.
IF ( iopssH(omp_get_thread_num ( )+1) > 0 ) THEN
DO innH = 1, iopssH(omp_get_thread_num ( )+1)
inn = ioph2iop(innH,omp_get_thread_num ( )+1)
IF ( j /= jpss(inn) .OR. i /=ipss(inn) ) CYCLE
DO k = k1s, kms
IF (ABS(Qpss(k,inn))<1.E-10) CYCLE
Qsource = Qpss(k,inn)/(dx*dy) ! Inflow per unit area (m/s)
Osource = Rpss(k,inn,nt) ! Concentration (kg/m3)
ds(k)=ds(k)+Qsource*Osource ! kg/m2/s = conc.* thickness / time
ENDDO
ENDDO
! ... Include SOD when modelling oxygen plumes -
IF (nt == ntr) ds(kms) = ds(kms) - k4sod
ENDIF
!.....Solve tridiagonal system for the
! vertical distribution of active scalar.....
CALL trid1 (aa, ds, sal1, k1s, kms, km1, nwlayers)
!.....Define scalars at new time step....
tracer(k1s:kms ,l,nt) = sal1(1:nwlayers)
tracer(k1 :k1s-1,l,nt) = sal1(1 )
END SELECT
!.....End loop over scalar-pts.....
END DO
!.....Compute CPU time spent in subroutine.....
etime = TIMER(0.0)
t_salin = t_salin + (etime - btime)
END SUBROUTINE ImTracer
!***********************************************************************
SUBROUTINE ConfigThreads (depth)
!***********************************************************************
!
! Purpose: to distribute the workload across the available threads,
! indicating the number of columns assigned to each thread
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
!Arguments
INTEGER, INTENT(INOUT) :: depth
!Local Variables
INTEGER :: p, counter, ind, aux, aux2, index_thread, id_thread
INTEGER :: aux3, aux4, depth_aux, aux5, i, j
! aux=int(im1/num_threads)
! aux2=MOD(im1,num_threads)
! lh(1:num_threads)=aux*jm1
! lh_aux(1:num_threads)=aux
! index_thread = 1
! IF(aux2>0) THEN
! do p=aux2, 1, -1
! lh(index_thread)=lh(index_thread) + jm1
! lh_aux(index_thread)=lh_aux(index_thread) + 1
! index_thread=index_thread + 1
! end do
! END IF
!to distribute the workload bases on the depth
!solve the number of rows from east to west are assigned to each thread
depth = 0
do p=1,lm
depth = depth + (kmz(p) - 1)
end do
depth_aux = int(depth/num_threads)
print *,"depth_aux:",depth_aux
aux = int(lm/num_threads)
aux2 = 0
aux3 = 0
aux5 = 0
ph(1:num_threads) = 0
lh(1:num_threads) = 0
lh_aux(1:num_threads) = 0
DO p=1,num_threads-1
if(p .EQ. 1) then
aux4 = 1
else
aux4 = aux4 + lh_aux(p-1)
end if
print *,"aux4:",aux4,p
DO i=aux4,im1
lh_aux(p)=lh_aux(p)+1
DO j=1,jm1
IF(mask2d(i,j)) THEN
lh(p) = lh(p) + 1
ph(p) = ph(p) + (kmz(ij2l(i,j)) - 1)
END IF
END DO
print *,"ph:",ph(p),p
IF(ph(p) >= depth_aux) THEN
aux2 = aux2 + lh(p)
aux3 = aux3 + lh_aux(p)
aux5 = aux5 + ph(p)
print *,"aux2:",aux2,"aux3:",aux3,"aux5:",aux5,p
EXIT
END IF
END DO
END DO
lh(num_threads) = lm - aux2
lh_aux(num_threads) = im - aux3
ph(num_threads) = depth - aux5
! lh_aux(1) = 47
! lh_aux(2) = 16
! lh_aux(3) = 17
! lh_aux(4) = 29
! lh_aux(1) = 51
! lh_aux(2) = 12
! lh_aux(3) = 22
! lh_aux(4) = 24
lh = 0
ph = 0
IF(num_threads > 1) THEN
DO i=1,lh_aux(1)
DO j=1,jm1
IF(mask2d(i,j)) THEN
lh(1) = lh(1) + 1
ph(1) = ph(1) + (kmz(ij2l(i,j)) - 1)
END IF
END DO
END DO
DO i=lh_aux(1)+1,lh_aux(1)+lh_aux(2)
DO j=1,jm1
IF(mask2d(i,j)) THEN
lh(2) = lh(2) + 1
ph(2) = ph(2) + (kmz(ij2l(i,j)) - 1)
END IF
END DO
END DO
IF(num_threads > 2) THEN
DO i=lh_aux(1)+lh_aux(2)+1,lh_aux(1)+lh_aux(2)+lh_aux(3)
DO j=1,jm1
IF(mask2d(i,j)) THEN
lh(3) = lh(3) + 1
ph(3) = ph(3) + (kmz(ij2l(i,j)) - 1)
END IF
END DO
END DO
DO i=lh_aux(1)+lh_aux(2)+lh_aux(3)+1,lh_aux(1)+lh_aux(2)+lh_aux(3)+lh_aux(4)
DO j=1,jm1
IF(mask2d(i,j)) THEN
lh(4) = lh(4) + 1
ph(4) = ph(4) + (kmz(ij2l(i,j)) - 1)
END IF
END DO
END DO
END IF
END IF
!to create the array id_column where stores the index of all columns
!assigned to each thread even those belonging to the border east or west
counter = 0
ind = 1
lhiE = 0
lhfE = 0
lhiW = 0
lhfW = 0
do p=1,num_threads
if(p > 1)THEN
lhiW(p)=counter+1
do j=1,jm1
if(mask2d(ind-1,j) .AND. mask2d(ind,j))THEN
counter=counter+1
id_column(counter)= ij2l(ind-1,j)
end if
end do
lhfW(p)=counter
end if
lhi(p)=counter+1
do i=ind,lh_aux(p)+ind-1
do j=1,jm1
IF(mask2d(i,j)) THEN
counter = counter + 1
id_column(counter) = ij2l(i,j)
END IF
end do
end do
ind = lh_aux(p) + ind
lhf(p)=counter
if(p < num_threads)THEN
lhiE(p)=counter+1
do j=1,jm1
if(mask2d(ind,j) .AND. mask2d(ind-1,j))THEN
counter=counter+1
id_column(counter)= ij2l(ind,j)
end if
end do
lhfE(p)=counter
end if
end do
!As above but it must also satisfy that the east column is not dry
counter = 0
ind = 1
lhiECE = 0
lhfECE = 0
lhiWCE = 0
lhfWCE = 0
do p=1,num_threads
if(p > 1)THEN
lhiWCE(p)=counter+1
do j=1,jm1
if(mask2d(ind-1,j) .AND. mask2d(ind,j))THEN
counter=counter+1
id_columnCE(counter)= ij2l(ind-1,j)
end if
end do
lhfWCE(p)=counter
end if
lhiCE(p)=counter+1
do i=ind,lh_aux(p)+ind-1
do j=1,jm1
IF(mask2d(i,j) .AND. mask2d(i+1,j)) THEN
counter = counter + 1
id_columnCE(counter) = ij2l(i,j)
END IF
end do
end do
ind = lh_aux(p) + ind
lhfCE(p)=counter
if(p < num_threads)THEN
lhiECE(p)=counter+1
do j=1,jm1
if(mask2d(ind,j) .AND. mask2d(ind-1,j) .AND. mask2d(ind+1,j))THEN
counter=counter+1
id_columnCE(counter)= ij2l(ind,j)
end if
end do
lhfECE(p)=counter
end if
end do
!As above but it must also satisfy that the north column is not dry
counter = 0
ind = 1
lhiECN = 0
lhfECN = 0
lhiWCN = 0
lhfWCN = 0
do p=1,num_threads
if(p > 1)THEN
lhiWCN(p)=counter+1
do j=1,jm1
if(mask2d(ind-1,j) .AND. mask2d(ind,j) .AND. mask2d(ind-1,j+1))THEN
counter=counter+1
id_columnCN(counter)= ij2l(ind-1,j)
end if
end do
lhfWCN(p)=counter
end if
lhiCN(p)=counter+1
do i=ind,lh_aux(p)+ind-1
do j=1,jm1
IF(mask2d(i,j) .AND. mask2d(i,j+1)) THEN
counter = counter + 1
id_columnCN(counter) = ij2l(i,j)
END IF
end do
end do
ind = lh_aux(p) + ind
lhfCN(p)=counter
if(p < num_threads)THEN
lhiECN(p)=counter+1
do j=1,jm1
if(mask2d(ind,j) .AND. mask2d(ind-1,j) .AND. mask2d(ind,j+1))THEN
counter=counter+1
id_columnCN(counter)= ij2l(ind,j)
end if
end do
lhfECN(p)=counter
end if
end do
END SUBROUTINE ConfigThreads
!***********************************************************************
SUBROUTINE BorderThreads(Bstart,Bend,ide_thread,mincol)
!***********************************************************************
!
! Purpose: to set the private variables used by the threads,
! dynamically allocates memory and initializes these variables
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
! Arguments
INTEGER, INTENT(INOUT) :: Bstart,Bend,ide_thread,mincol
! Local Variables
INTEGER :: i,j
Bstart = 0; Bend = 0;
! Set the first column of each thread
if(lhiW(ide_thread) .EQ. 0) THEN
Bstart = id_column(lhi(ide_thread))
ELSE
i = l2i(id_column(lhiW(ide_thread))) - 1
mincol = id_column(lhiW(ide_thread))
do j=1,jm1
if(ij2l(i,j) < mincol) mincol = ij2l(i,j)
end do
Bstart = mincol
end if
! Set the last column of each thread
if(lhiE(ide_thread) .EQ. 0) THEN
Bend = id_column(lhf(ide_thread))
ELSE
Bend = id_column(lhfE(ide_thread))
end if
print *,"Bstart:",Bstart,"Bend:",Bend,"h:",omp_get_thread_num()
END SUBROUTINE BorderThreads
!***********************************************************************
SUBROUTINE InitThreads(t_exmom2,t_matmom2,t_matcon2,Bhaxpp,Bhaypp,Bth,Bth1,Bstart, &
& Bend,ide_thread,lNCH,lSCH,lECH,lWCH,Bex, Bth2,Beagx,Bearx,Bagx,Barx,Beagy, &
& Beary,Bagy,Bary,Bsx,Bsy,Bdd,Bqq,Brr,Bth3,Bth4,uhp2,uhp3,mincol,ShearProduction, &
& BuoyancyProduction, Dissipation, TKinE,heatSourceB,uairB,vairB,cdwB,eta, &
& QswFrB,Qsw2dB,Qlw2dB,Ta2dB,RH2dB,Cc2dB,uair2dB,vair2dB,Qsw,Qn,Qlw,Ta,Pa,RH,Cc)
!***********************************************************************
!
! Purpose: to set the private variables used by the threads,
! dynamically allocates memory and initializes these variables
!
! Revisions:
! Date Programmer Description of revision
! ---- ---------- -----------------------
!
!-----------------------------------------------------------------------
! Arguments
REAL, INTENT(INOUT) :: t_exmom2,t_matmom2,t_matcon2
INTEGER, INTENT(IN) :: Bstart,Bend,ide_thread,mincol
REAL(real_G1), INTENT(INOUT) :: ShearProduction,BuoyancyProduction, Dissipation, TKinE
REAL, DIMENSION (1:km1,1:lm1), INTENT(INOUT) :: uhp3,uhp2
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bhaxpp, Bhaypp, Bth, Bth1, Bex, heatSourceB,QswFrB
REAL, DIMENSION (1:km1,Bstart:Bend+1), INTENT(INOUT) :: Bth2, Bagx, Barx,Bagy,Bary,Bth3,Bth4
REAL, DIMENSION (Bstart:Bend+1), INTENT(INOUT) :: Beagx, Bearx,Beagy,Beary,Bsx,Bsy
REAL, DIMENSION (Bstart:Bend+1), INTENT(INOUT) :: Bdd, Bqq,Brr,uairB,vairB,cdwB
INTEGER, DIMENSION (Bstart:Bend+1), INTENT(INOUT) :: lSCH,lNCH,lWCH,lECH
REAL, INTENT(INOUT) :: Qsw,Qn,Qlw,eta,Ta,Pa,RH,Cc
REAL, DIMENSION (nmetstat), INTENT(INOUT) :: Qsw2dB,Qlw2dB,Ta2dB,RH2dB,Cc2dB,uair2dB,vair2dB
! Local Variables
INTEGER :: i,j
! Make a copy of lWC, lSC, lEC, lNC to each thread
do i=Bstart,Bend
if(lWC(i) .EQ. lm1) THEN
lWCH(i) = Bend + 1
ELSE
lWCH(i) = lWC(i)
end if
if(lNC(i) .EQ. lm1) THEN
lNCH(i) = Bend + 1
ELSE
lNCH(i) = lNC(i)
end if
if(lSC(i) .EQ. lm1) THEN
lSCH(i) = Bend + 1
ELSE
lSCH(i) = lSC(i)
end if
if(lEC(i) .EQ. lm1) THEN
lECH(i) = Bend + 1
ELSE
lECH(i) = lEC(i)
end if
end do
! Initialize variables
t_exmom2 = 0; t_matmom2 = 0; t_matcon2 = 0;
Bth = 0; Bth1 = 0; Bex = 0; Bth2 = 0; Bth3 = 0; Bth4 = 0;
Bhaxpp = 0; Bhaypp = 0;
Beagx = 0; Bearx = 0; Bagx(:,Bend+1) = 0; Barx(:,Bend+1) = 0;
Beagy = 0; Beary = 0; Bagy(:,Bend+1) = 0; Bary(:,Bend+1) = 0;
Bsx = 0.0; Bsy(Bend+1) = 0; Bdd(Bend+1) = 0; Bqq(Bend+1) = 0; Brr(Bend+1) = 1.0;
uhp2 = 0; uhp3 = 0;
ShearProduction = 0; BuoyancyProduction= 0; Dissipation = 0; TKinE = 0;
uairB(Bstart:Bend) = uair(Bstart:Bend); vairB(Bstart:Bend) = vair(Bstart:Bend);
cdwB(Bstart:Bend) = cdw(Bstart:Bend); heatSourceB(:,Bstart:Bend) = heatSource(:,Bstart:Bend);
uairB(Bend+1) = 0; vairB(Bend+1) = 0; cdwB(Bend+1) = 0; heatSourceB(:,Bend+1) = 0;
QswFrB(:,Bstart:Bend) = QswFr(:,Bstart:Bend);
Qsw2dB = Qsw2d; Qlw2dB = Qlw2d; Ta2dB = Ta2d; RH2dB = RH2d; uair2dB = uair2d; vair2dB = vair2d;
! Cc2dB = Cc2d;
END SUBROUTINE InitThreads
END MODULE si3d_procedures
|
{"hexsha": "da6bb31a4bb98eac86075d5fd6ae7286c14d8c59", "size": 162079, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "PSi3DL/si3d_procedures.f90", "max_stars_repo_name": "savalbuena/2021WR030666_3D_Flow_Structures_During_Upwelling_Events_in_Lakes_of_Moderate_Size", "max_stars_repo_head_hexsha": "3f066a270da84b8ba4c8159d041e9b65fabcea43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PSi3DL/si3d_procedures.f90", "max_issues_repo_name": "savalbuena/2021WR030666_3D_Flow_Structures_During_Upwelling_Events_in_Lakes_of_Moderate_Size", "max_issues_repo_head_hexsha": "3f066a270da84b8ba4c8159d041e9b65fabcea43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PSi3DL/si3d_procedures.f90", "max_forks_repo_name": "savalbuena/2021WR030666_3D_Flow_Structures_During_Upwelling_Events_in_Lakes_of_Moderate_Size", "max_forks_repo_head_hexsha": "3f066a270da84b8ba4c8159d041e9b65fabcea43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.246974359, "max_line_length": 132, "alphanum_fraction": 0.4598313168, "num_tokens": 54657}
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 09_utils.ipynb (unless otherwise specified).
__all__ = ['checkIsListOfStr', 'checkUnique', 'checkNoRepeated', 'checkValidArray', 'checkValidDict', 'checkDictArray']
# Cell
def checkIsListOfStr(l):
"Make sure that l is a list containing only strings"
if not isinstance(l, list):
raise Exception(str(l) + ' must be a list of strings')
for i in l:
if not isinstance(i, str):
raise Exception(str(i) + ' must be a string')
# Cell
def checkUnique(l):
"Make sure that l does not contain repeated elements"
for i, item1 in enumerate(l):
for j, item2 in enumerate(l):
if i != j and item1 == item2:
raise Exception('Repeated item ' + str(item1))
# Cell
def checkNoRepeated(l1, l2):
"Make sure there are no repeated elements in both lists"
for i in l1:
if i in l2:
raise Exception('Repeated item ' + str(i))
# Cell
import numpy as np
def checkValidArray(a):
"Make sure an array is a numpy array with only 1 dimension"
if not isinstance(a, np.ndarray):
raise Exception('Values must be numpy arrays !')
if not a.ndim == 1:
raise Exception('Arrays must have only one dimension !')
# Cell
def checkValidDict(d):
"Make sure d is a dict with valid arrays (numpy arrays with only 1 dimension)"
if not isinstance(d, dict):
raise Exception('Values must be a dictionary !')
for k in d:
checkValidArray(d[k])
# Cell
def checkDictArray(d, a):
"Make sure that every key in a d is in a"
for k in d.keys():
if not k in a:
raise Exception(k + ' is not present in ' + str(a))
|
{"hexsha": "3d472b99ac97aa51c1e6fecdc46fac79189f9876", "size": 1695, "ext": "py", "lang": "Python", "max_stars_repo_path": "nangs/utils.py", "max_stars_repo_name": "smatkovi/nangs", "max_stars_repo_head_hexsha": "b9ab6f32fe3632d9ee403f197742cc203670217d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-26T17:44:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-05T10:27:44.000Z", "max_issues_repo_path": "nangs/utils.py", "max_issues_repo_name": "smatkovi/nangs", "max_issues_repo_head_hexsha": "b9ab6f32fe3632d9ee403f197742cc203670217d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nangs/utils.py", "max_forks_repo_name": "smatkovi/nangs", "max_forks_repo_head_hexsha": "b9ab6f32fe3632d9ee403f197742cc203670217d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5961538462, "max_line_length": 119, "alphanum_fraction": 0.6377581121, "include": true, "reason": "import numpy", "num_tokens": 432}
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from sg2im.utils import timeit
"""
Functions for performing differentiable bilinear cropping of images, for use in
the object discriminator
"""
def crop_bbox_batch(feats, bbox, bbox_to_feats, HH, WW=None, backend='cudnn'):
"""
Inputs:
- feats: FloatTensor of shape (N, C, H, W)
- bbox: FloatTensor of shape (B, 4) giving bounding box coordinates
- bbox_to_feats: LongTensor of shape (B,) mapping boxes to feature maps;
each element is in the range [0, N) and bbox_to_feats[b] = i means that
bbox[b] will be cropped from feats[i].
- HH, WW: Size of the output crops
Returns:
- crops: FloatTensor of shape (B, C, HH, WW) where crops[i] uses bbox[i] to
crop from feats[bbox_to_feats[i]].
"""
if backend == 'cudnn':
return crop_bbox_batch_cudnn(feats, bbox, bbox_to_feats, HH, WW)
N, C, H, W = feats.size()
B = bbox.size(0)
if WW is None: WW = HH
dtype, device = feats.dtype, feats.device
crops = torch.zeros(B, C, HH, WW, dtype=dtype, device=device)
for i in range(N):
idx = (bbox_to_feats.data == i).nonzero()
if idx.dim() == 0:
continue
idx = idx.view(-1)
n = idx.size(0)
cur_feats = feats[i].view(1, C, H, W).expand(n, C, H, W).contiguous()
cur_bbox = bbox[idx]
cur_crops = crop_bbox(cur_feats, cur_bbox, HH, WW)
crops[idx] = cur_crops
return crops
def _invperm(p):
N = p.size(0)
eye = torch.arange(0, N).type_as(p)
pp = (eye[:, None] == p).nonzero()[:, 1]
return pp
def crop_bbox_batch_cudnn(feats, bbox, bbox_to_feats, HH, WW=None):
N, C, H, W = feats.size()
B = bbox.size(0)
if WW is None: WW = HH
dtype = feats.data.type()
feats_flat, bbox_flat, all_idx = [], [], []
for i in range(N):
idx = (bbox_to_feats.data == i).nonzero()
if idx.dim() == 0:
continue
idx = idx.view(-1)
n = idx.size(0)
cur_feats = feats[i].view(1, C, H, W).expand(n, C, H, W).contiguous()
cur_bbox = bbox[idx]
feats_flat.append(cur_feats)
bbox_flat.append(cur_bbox)
all_idx.append(idx)
feats_flat = torch.cat(feats_flat, dim=0)
bbox_flat = torch.cat(bbox_flat, dim=0)
crops = crop_bbox(feats_flat, bbox_flat, HH, WW, backend='cudnn')
# If the crops were sequential (all_idx is identity permutation) then we can
# simply return them; otherwise we need to permute crops by the inverse
# permutation from all_idx.
all_idx = torch.cat(all_idx, dim=0)
eye = torch.arange(0, B).type_as(all_idx)
if (all_idx == eye).all():
return crops
return crops[_invperm(all_idx)]
def crop_bbox(feats, bbox, HH, WW=None, backend='cudnn'):
"""
Take differentiable crops of feats specified by bbox.
Inputs:
- feats: Tensor of shape (N, C, H, W)
- bbox: Bounding box coordinates of shape (N, 4) in the format
[x0, y0, x1, y1] in the [0, 1] coordinate space.
- HH, WW: Size of the output crops.
Returns:
- crops: Tensor of shape (N, C, HH, WW) where crops[i] is the portion of
feats[i] specified by bbox[i], reshaped to (HH, WW) using bilinear sampling.
"""
N = feats.size(0)
assert bbox.size(0) == N
assert bbox.size(1) == 4
if WW is None: WW = HH
if backend == 'cudnn':
# Change box from [0, 1] to [-1, 1] coordinate system
bbox = 2 * bbox - 1
x0, y0 = bbox[:, 0], bbox[:, 1]
x1, y1 = bbox[:, 2], bbox[:, 3]
X = tensor_linspace(x0, x1, steps=WW).view(N, 1, WW).expand(N, HH, WW)
Y = tensor_linspace(y0, y1, steps=HH).view(N, HH, 1).expand(N, HH, WW)
if backend == 'jj':
return bilinear_sample(feats, X, Y)
elif backend == 'cudnn':
grid = torch.stack([X, Y], dim=3)
return F.grid_sample(feats, grid)
def uncrop_bbox(feats, bbox, H, W=None, fill_value=0):
"""
Inverse operation to crop_bbox; construct output images where the feature maps
from feats have been reshaped and placed into the positions specified by bbox.
Inputs:
- feats: Tensor of shape (N, C, HH, WW)
- bbox: Bounding box coordinates of shape (N, 4) in the format
[x0, y0, x1, y1] in the [0, 1] coordinate space.
- H, W: Size of output.
- fill_value: Portions of the output image that are outside the bounding box
will be filled with this value.
Returns:
- out: Tensor of shape (N, C, H, W) where the portion of out[i] given by
bbox[i] contains feats[i], reshaped using bilinear sampling.
"""
N, C = feats.size(0), feats.size(1)
assert bbox.size(0) == N
assert bbox.size(1) == 4
if W is None: H = W
x0, y0 = bbox[:, 0], bbox[:, 1]
x1, y1 = bbox[:, 2], bbox[:, 3]
ww = x1 - x0
hh = y1 - y0
x0 = x0.contiguous().view(N, 1).expand(N, H)
x1 = x1.contiguous().view(N, 1).expand(N, H)
ww = ww.view(N, 1).expand(N, H)
y0 = y0.contiguous().view(N, 1).expand(N, W)
y1 = y1.contiguous().view(N, 1).expand(N, W)
hh = hh.view(N, 1).expand(N, W)
X = torch.linspace(0, 1, steps=W).view(1, W).expand(N, W).to(feats)
Y = torch.linspace(0, 1, steps=H).view(1, H).expand(N, H).to(feats)
X = (X - x0) / ww
Y = (Y - y0) / hh
# For ByteTensors, (x + y).clamp(max=1) gives logical_or
X_out_mask = ((X < 0) + (X > 1)).view(N, 1, W).expand(N, H, W)
Y_out_mask = ((Y < 0) + (Y > 1)).view(N, H, 1).expand(N, H, W)
out_mask = (X_out_mask + Y_out_mask).clamp(max=1)
out_mask = out_mask.view(N, 1, H, W).expand(N, C, H, W)
X = X.view(N, 1, W).expand(N, H, W)
Y = Y.view(N, H, 1).expand(N, H, W)
out = bilinear_sample(feats, X, Y)
out[out_mask] = fill_value
return out
def bilinear_sample(feats, X, Y):
"""
Perform bilinear sampling on the features in feats using the sampling grid
given by X and Y.
Inputs:
- feats: Tensor holding input feature map, of shape (N, C, H, W)
- X, Y: Tensors holding x and y coordinates of the sampling
grids; both have shape shape (N, HH, WW) and have elements in the range [0, 1].
Returns:
- out: Tensor of shape (B, C, HH, WW) where out[i] is computed
by sampling from feats[idx[i]] using the sampling grid (X[i], Y[i]).
"""
N, C, H, W = feats.size()
assert X.size() == Y.size()
assert X.size(0) == N
_, HH, WW = X.size()
X = X.mul(W)
Y = Y.mul(H)
# Get the x and y coordinates for the four samples
x0 = X.floor().clamp(min=0, max=W-1)
x1 = (x0 + 1).clamp(min=0, max=W-1)
y0 = Y.floor().clamp(min=0, max=H-1)
y1 = (y0 + 1).clamp(min=0, max=H-1)
# In numpy we could do something like feats[i, :, y0, x0] to pull out
# the elements of feats at coordinates y0 and x0, but PyTorch doesn't
# yet support this style of indexing. Instead we have to use the gather
# method, which only allows us to index along one dimension at a time;
# therefore we will collapse the features (BB, C, H, W) into (BB, C, H * W)
# and index along the last dimension. Below we generate linear indices into
# the collapsed last dimension for each of the four combinations we need.
y0x0_idx = (W * y0 + x0).view(N, 1, HH * WW).expand(N, C, HH * WW)
y1x0_idx = (W * y1 + x0).view(N, 1, HH * WW).expand(N, C, HH * WW)
y0x1_idx = (W * y0 + x1).view(N, 1, HH * WW).expand(N, C, HH * WW)
y1x1_idx = (W * y1 + x1).view(N, 1, HH * WW).expand(N, C, HH * WW)
# Actually use gather to pull out the values from feats corresponding
# to our four samples, then reshape them to (BB, C, HH, WW)
feats_flat = feats.view(N, C, H * W)
v1 = feats_flat.gather(2, y0x0_idx.long()).view(N, C, HH, WW)
v2 = feats_flat.gather(2, y1x0_idx.long()).view(N, C, HH, WW)
v3 = feats_flat.gather(2, y0x1_idx.long()).view(N, C, HH, WW)
v4 = feats_flat.gather(2, y1x1_idx.long()).view(N, C, HH, WW)
# Compute the weights for the four samples
w1 = ((x1 - X) * (y1 - Y)).view(N, 1, HH, WW).expand(N, C, HH, WW)
w2 = ((x1 - X) * (Y - y0)).view(N, 1, HH, WW).expand(N, C, HH, WW)
w3 = ((X - x0) * (y1 - Y)).view(N, 1, HH, WW).expand(N, C, HH, WW)
w4 = ((X - x0) * (Y - y0)).view(N, 1, HH, WW).expand(N, C, HH, WW)
# Multiply the samples by the weights to give our interpolated results.
out = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
return out
def tensor_linspace(start, end, steps=10):
"""
Vectorized version of torch.linspace.
Inputs:
- start: Tensor of any shape
- end: Tensor of the same shape as start
- steps: Integer
Returns:
- out: Tensor of shape start.size() + (steps,), such that
out.select(-1, 0) == start, out.select(-1, -1) == end,
and the other elements of out linearly interpolate between
start and end.
"""
assert start.size() == end.size()
view_size = start.size() + (1,)
w_size = (1,) * start.dim() + (steps,)
out_size = start.size() + (steps,)
start_w = torch.linspace(1, 0, steps=steps).to(start)
start_w = start_w.view(w_size).expand(out_size)
end_w = torch.linspace(0, 1, steps=steps).to(start)
end_w = end_w.view(w_size).expand(out_size)
start = start.contiguous().view(view_size).expand(out_size)
end = end.contiguous().view(view_size).expand(out_size)
out = start_w * start + end_w * end
return out
if __name__ == '__main__':
import numpy as np
from scipy.misc import imread, imsave, imresize
cat = imresize(imread('cat.jpg'), (256, 256))
dog = imresize(imread('dog.jpg'), (256, 256))
feats = torch.stack([
torch.from_numpy(cat.transpose(2, 0, 1).astype(np.float32)),
torch.from_numpy(dog.transpose(2, 0, 1).astype(np.float32))],
dim=0)
boxes = torch.FloatTensor([
[0, 0, 1, 1],
[0.25, 0.25, 0.75, 0.75],
[0, 0, 0.5, 0.5],
])
box_to_feats = torch.LongTensor([1, 0, 1]).cuda()
feats, boxes = feats.cuda(), boxes.cuda()
crops = crop_bbox_batch_cudnn(feats, boxes, box_to_feats, 128)
for i in range(crops.size(0)):
crop_np = crops.data[i].cpu().numpy().transpose(1, 2, 0).astype(np.uint8)
imsave('out%d.png' % i, crop_np)
|
{"hexsha": "8a8100390c92b3d7acd7f0594e0ceb0746314459", "size": 10375, "ext": "py", "lang": "Python", "max_stars_repo_path": "sg2im/bilinear.py", "max_stars_repo_name": "peter-rich/Conditional-Imitation-bedroom", "max_stars_repo_head_hexsha": "f3ee95f64d4e27c67cbcbadd08754f7bcdd0699e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1271, "max_stars_repo_stars_event_min_datetime": "2018-06-29T16:34:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T17:25:23.000Z", "max_issues_repo_path": "sg2im/bilinear.py", "max_issues_repo_name": "peter-rich/Conditional-Imitation-bedroom", "max_issues_repo_head_hexsha": "f3ee95f64d4e27c67cbcbadd08754f7bcdd0699e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2018-06-30T21:20:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:22:06.000Z", "max_forks_repo_path": "sg2im/bilinear.py", "max_forks_repo_name": "Spichon/sg2im", "max_forks_repo_head_hexsha": "44df7f67d8c8ee5d05024ea1b7e6a9103a0b69d9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 245, "max_forks_repo_forks_event_min_datetime": "2018-06-29T22:43:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T23:38:39.000Z", "avg_line_length": 34.0163934426, "max_line_length": 83, "alphanum_fraction": 0.6342168675, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3469}
|
import numpy as np
from scipy.interpolate import interp1d
from scipy import integrate
from scipy.stats import norm
from sphericosmo.cosmocontainer import *
from sphericosmo.sphericalpower import *
def SetupPiTau(piOption,zLimits,cosmoCont):
zCurve=cosmoCont.zCurve
tauCurve=cosmoCont.taus
withinRange=getIndicesInRedshiftRange(zLimits, cosmoCont)
if piOption==1:
###Option 1
#Here we have a uniform dN/dz
dzCurve=zCurve[1:]-zCurve[:-1]
dzCentral=np.zeros(len(zCurve))
for j in range(len(zCurve)):
if j-1>=0 and j<len(dzCurve):
dzCentral[j]=(dzCurve[j-1]+dzCurve[j])/2.0
zRange=np.sum(dzCentral[withinRange])
Pi_z=np.zeros(len(tauCurve))
for j in withinRange:
Pi_z[j]=1.0/zRange
Pi_tau=Pi_z*np.gradient(zCurve,edge_order=2)/np.gradient(tauCurve,edge_order=2)
elif piOption==2:
###Option 2
#Here Pi_tau is used in place of r^2 n_C, but normalized to be integrated over tau
Pi_tau=np.zeros(len(tauCurve))
Pi_tau[withinRange]=1.0
Pi_tau/=integrate.trapz((tauCurve[-1]-tauCurve)**2*Pi_tau,tauCurve)
Pi_tau*=(tauCurve[-1]-tauCurve)**2
elif piOption==3:
###Option 3
#Here we use a pre-set dN/dz
Pi_tau=dN_dzNorm(zCurve,zCutIndex)*np.gradient(zCurve,edge_order=2)/np.gradient(tauCurve,edge_order=2)
elif piOption==4:
###Option 4
#Here we use a Gaussian dN/dz
mu=(zLimits[1]+zLimits[0])/2.0
sigma=(zLimits[1]-zLimits[0])/2.0/3.0
Pi_z=np.zeros(len(zCurve))
Pi_z[withinRange]=norm.pdf(zCurve[withinRange], loc=mu, scale=sigma)
Pi_z/=integrate.trapz(Pi_z[withinRange],zCurve[withinRange])
Pi_tau=Pi_z*np.gradient(zCurve,edge_order=2)/np.gradient(tauCurve,edge_order=2)
else:
raise ValueError('Invalid piOption value provided. Valid options: 1,2,3,4')
return Pi_tau
def SetupPiTauFromZHist(zBinLimits, countHist, cosmoCont):
zCurve=cosmoCont.zCurve
tauCurve=cosmoCont.taus
zBinWidths=zBinLimits[1:]-zBinLimits[:-1]
#zBinCenters=(zBinLimits[1:]+zBinLimits[:-1])/2.0
#dN_dz = interp1d(zBinCenters, countHist/zBinWidths, kind='cubic', fill_value='extrapolate')
dN_dz = interp1d(zBinLimits, np.concatenate([countHist/zBinWidths,[0.0]]), kind='previous',
bounds_error=False, fill_value=0.0)
withinRange=getIndicesInRedshiftRange([zBinLimits[0],zBinLimits[-1]], cosmoCont)
Pi_z=np.zeros(len(zCurve))
Pi_z[withinRange]=dN_dz(zCurve[withinRange])
Pi_z/=integrate.trapz(Pi_z[withinRange],zCurve[withinRange])
Pi_tau=Pi_z*np.gradient(zCurve,edge_order=2)/np.gradient(tauCurve,edge_order=2)
return Pi_tau
def SetupPiTauForBinnedSN(piOption,zLimitList,cosmoCont):
#Setup a Pi_tau that is normalized to 1 integral (over tau) in each bin of zLimitList
zCurve=cosmoCont.zCurve
tauCurve=cosmoCont.taus
if piOption==1:
###Option 1
#Here we have a uniform dN/dz
Pi_z=np.zeros(len(tauCurve))
for i in range(len(zLimitList)):
withinRangeCurrentBin=getIndicesInRedshiftRange(zLimitList[i], cosmoCont)
Pi_z[withinRangeCurrentBin[1:-1]]=1.0
Pi_z[withinRangeCurrentBin]/=integrate.trapz(Pi_z[withinRangeCurrentBin],zCurve[withinRangeCurrentBin])
Pi_tau=Pi_z*np.gradient(zCurve,edge_order=2)/np.gradient(tauCurve,edge_order=2)
else:
raise ValueError('Invalid piOption value provided. Valid options: 1')
return Pi_tau
|
{"hexsha": "f675b53dc66c17ff85f68bfc46c03c6a75f3982c", "size": 3747, "ext": "py", "lang": "Python", "max_stars_repo_path": "sphericosmo/pitau.py", "max_stars_repo_name": "beckrob/SpheriCosmo", "max_stars_repo_head_hexsha": "a961c70763ce29112cfc2d69bd330601608d55e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sphericosmo/pitau.py", "max_issues_repo_name": "beckrob/SpheriCosmo", "max_issues_repo_head_hexsha": "a961c70763ce29112cfc2d69bd330601608d55e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sphericosmo/pitau.py", "max_forks_repo_name": "beckrob/SpheriCosmo", "max_forks_repo_head_hexsha": "a961c70763ce29112cfc2d69bd330601608d55e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6030534351, "max_line_length": 115, "alphanum_fraction": 0.6495863357, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1096}
|
#!/usr/bin/env python3
import os
import time
import cv2
import pycuda.autoinit # For initializing CUDA driver
import pycuda.driver as cuda
from utils.yolo_classes import get_cls_dict
from utils.display import open_window, set_display, show_fps
from utils.visualization import BBoxVisualization
from utils.yolo_with_plugins import TrtYOLO
import rospy
import rospkg
from yolov4_trt_ros.msg import Detector2DArray
from yolov4_trt_ros.msg import Detector2D
from vision_msgs.msg import BoundingBox2D
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class yolov4(object):
def __init__(self):
""" Constructor """
self.bridge = CvBridge()
self.init_params()
self.init_yolo()
self.cuda_ctx = cuda.Device(0).make_context()
self.trt_yolo = TrtYOLO(
(self.model_path + self.model), (self.h, self.w), self.category_num)
def __del__(self):
""" Destructor """
self.cuda_ctx.pop()
del self.trt_yolo
del self.cuda_ctx
def clean_up(self):
""" Backup destructor: Release cuda memory """
if self.trt_yolo is not None:
self.cuda_ctx.pop()
del self.trt_yolo
del self.cuda_ctx
def init_params(self):
""" Initializes ros parameters """
rospack = rospkg.RosPack()
package_path = rospack.get_path("yolov4_trt_ros")
self.video_topic = rospy.get_param("/video_topic", "/video_source/raw")
self.model = rospy.get_param("/model", "yolov4Custom")
self.model_path = rospy.get_param(
"/model_path", package_path + "/yolo/")
self.category_num = rospy.get_param("/category_number", 10)
self.input_shape = rospy.get_param("/input_shape", "416")
self.conf_th = rospy.get_param("/confidence_threshold", 0.5)
self.show_img = rospy.get_param("/show_image", True)
self.image_sub = rospy.Subscriber(
self.video_topic, Image, self.img_callback, queue_size=1, buff_size=1920*1080*3)
self.detection_pub = rospy.Publisher(
"detections", Detector2DArray, queue_size=1)
self.overlay_pub = rospy.Publisher(
"/result/overlay", Image, queue_size=1)
def init_yolo(self):
""" Initialises yolo parameters required for trt engine """
if self.model.find('-') == -1:
self.model = self.model + "-" + self.input_shape
yolo_dim = self.model.split('-')[-1]
if 'x' in yolo_dim:
dim_split = yolo_dim.split('x')
if len(dim_split) != 2:
raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)
self.w, self.h = int(dim_split[0]), int(dim_split[1])
else:
self.h = self.w = int(yolo_dim)
if self.h % 32 != 0 or self.w % 32 != 0:
raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)
cls_dict = get_cls_dict(self.category_num)
self.vis = BBoxVisualization(cls_dict)
def img_callback(self, ros_img):
"""Continuously capture images from camera and do object detection """
tic = time.time()
# converts from ros_img to cv_img for processing
try:
cv_img = self.bridge.imgmsg_to_cv2(
ros_img, desired_encoding="bgr8")
rospy.logdebug("ROS Image converted for processing")
except CvBridgeError as e:
rospy.loginfo("Failed to convert image %s", str(e))
if cv_img is not None:
boxes, confs, clss = self.trt_yolo.detect(cv_img, self.conf_th)
cv_img = self.vis.draw_bboxes(cv_img, boxes, confs, clss)
toc = time.time()
fps = 1.0 / (toc - tic)
self.publisher(boxes, confs, clss)
if self.show_img:
cv_img = show_fps(cv_img, fps)
cv2.imshow("YOLOv4 DETECTION RESULTS", cv_img)
cv2.waitKey(1)
# converts back to ros_img type for publishing
try:
overlay_img = self.bridge.cv2_to_imgmsg(
cv_img, encoding="passthrough")
rospy.logdebug("CV Image converted for publishing")
self.overlay_pub.publish(overlay_img)
except CvBridgeError as e:
rospy.loginfo("Failed to convert image %s", str(e))
def publisher(self, boxes, confs, clss):
""" Publishes to detector_msgs
Parameters:
boxes (List(List(int))) : Bounding boxes of all objects
confs (List(double)) : Probability scores of all objects
clss (List(int)) : Class ID of all classes
"""
detection2d = Detector2DArray()
detection = Detector2D()
detection2d.header.stamp = rospy.Time.now()
detection2d.header.frame_id = "camera" # change accordingly
for i in range(len(boxes)):
# boxes : xmin, ymin, xmax, ymax
for _ in boxes:
detection.header.stamp = rospy.Time.now()
detection.header.frame_id = "camera" # change accordingly
detection.results.id = clss[i]
detection.results.score = confs[i]
detection.bbox.center.x = boxes[i][0] + (boxes[i][2] - boxes[i][0])/2
detection.bbox.center.y = boxes[i][1] + (boxes[i][3] - boxes[i][1])/2
detection.bbox.center.theta = 0.0 # change if required
detection.bbox.size_x = abs(boxes[i][0] - boxes[i][2])
detection.bbox.size_y = abs(boxes[i][1] - boxes[i][3])
detection2d.detections.append(detection)
self.detection_pub.publish(detection2d)
def main():
yolo = yolov4()
rospy.init_node('yolov4_trt_ros', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
rospy.on_shutdown(yolo.clean_up())
print("Shutting down")
if __name__ == '__main__':
main()
|
{"hexsha": "4c0439fdfb595b2edea247c0e03847d0968f88e4", "size": 5969, "ext": "py", "lang": "Python", "max_stars_repo_path": "trt_yolo_v4.py", "max_stars_repo_name": "privvyledge/yolov4_trt_ros", "max_stars_repo_head_hexsha": "502a8b6cc61bf18e04033496eca11f39c242f439", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trt_yolo_v4.py", "max_issues_repo_name": "privvyledge/yolov4_trt_ros", "max_issues_repo_head_hexsha": "502a8b6cc61bf18e04033496eca11f39c242f439", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trt_yolo_v4.py", "max_forks_repo_name": "privvyledge/yolov4_trt_ros", "max_forks_repo_head_hexsha": "502a8b6cc61bf18e04033496eca11f39c242f439", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5028901734, "max_line_length": 92, "alphanum_fraction": 0.6059641481, "include": true, "reason": "import pycuda", "num_tokens": 1428}
|
import numpy as np
import pandas as pd
import pickle
from scipy.integrate import odeint
from scipy.integrate import solve_ivp
import matplotlib
import matplotlib.pyplot as plt
np.random.seed(10)
#Function to compute equilibrium constant
def compute_K(vi, Ai ,Bi, Ci, Di, Gi, Hi, T_K):
#Inputs:
# - vi: Stoichiometric vector of the given reaction
# - Ai, Bi, Ci, Di: Empirical values
# - H_i: Vector of enthalpies, kJ/mol
# - G_i: Vector of Gibbs free energies, kJ/mol
#Output:
# - K : Value of the equilibrium constant
T0_K = 298.15; DIV_K = T_K/T0_K;
A = np.dot(vi, Ai); B = np.dot(vi, Bi); C = np.dot(vi, Ci);
G = np.dot(vi, Gi); H = np.dot(vi, Hi); D = np.dot(vi, Di);
K0 = np.exp(-G * 1000 / (8.314 * T0_K));
K1 = np.exp((H * 1000 / (8.314 * T0_K)) * (1 - T0_K / T_K));
K2 = np.exp(A * (np.log(DIV_K) - (DIV_K - 1) / DIV_K) + 0.5 * B * T0_K * (DIV_K - 1) ** 2 / DIV_K
+ (1/6) * C * T0_K ** 2 * (DIV_K-1) ** 2 * (DIV_K + 2) / DIV_K
+ 0.5 * D * (DIV_K - 1) ** 2 / (T0_K * DIV_K) ** 2);
K = K0 * K1 * K2;
return K
#Conservation equation
def conservation_eq(F_v, tau, k_v, T_C, FN2, model):
#Inputs:
# - tau: Space time, gcat min molNaphtha-1
# - F_v: Vector of flow rates, Dimensionless, Fi/FNaphtha0
# - k_v: Vector of kinetics, c.u.
# - T_C: Temperature, C
# - FN2: Flow rate of N2, Dimensionless, FN2/FNaphtha0
#Outputs:
# - Solved mass balances of dimensionless flow rates
T_K = T_C + 273.15;
# WGS reaction (CO + H2O <=> H2 + CO2)
v_WGS = np.array([-1, -1, 1, 1])
A_WGS = np.array([3.376, 3.47, 3.249, 5.457])
B_WGS = np.array([.557, 1.45, .422, 1.045])*1e-3;
C_WGS = np.array([0, 0, 0, 0])*1e-6;
D_WGS = np.array([-.031, .121, .083, -1.157])*1e5;
G_WGS = np.array([-137.4, -228.8, 0, -394.6]); #KJ/mol
H_WGS = np.array([-110.525, -241.818, 0, -393.509]); #KJ/mol
# SRM Reaction (CH4 + H2O <=> 3H2 + CO)
v_SRM = np.array([-1, -1, 3, 1]);
A_SRM = np.array([1.702, 3.47, 3.249, 3.376]);
B_SRM = np.array([9.081, 1.45, .422, .557])*1e-3;
C_SRM = np.array([-2.164, 0, 0, 0])*1e-6;
D_SRM = np.array([0, .121, .083, -.031])*1e5;
G_SRM = np.array([-50.46, -228.8, 0, -137.4]); #KJ/mol
H_SRM = np.array([-74.52, -241.818, 0, -110.525]); #KJ/mol
#Compute equilibrium constants for WGS, SRM and DRM
K_WGS = compute_K(v_WGS, A_WGS, B_WGS, C_WGS, D_WGS, G_WGS, H_WGS, T_K);
K_SRM = compute_K(v_SRM, A_SRM, B_SRM, C_SRM, D_SRM, G_SRM, H_SRM, T_K);
FT = np.sum(F_v) + FN2; p = F_v / FT;
#Rate constants from Arrhenius
k_SRN = k_v[0] * np.exp((-k_v[1] / 8.31446) *(1 / T_K));
k_WGS = k_v[2] * np.exp((-k_v[3] / 8.31446) *(1 / T_K));
k_SRM = k_v[4] * np.exp((-k_v[5] / 8.31446) *(1 / T_K));
#Adsorption constants
K_N = k_v[6] * np.exp(k_v[7] / (8.31446 * T_K));
K_H2O = k_v[8] * np.exp(k_v[9] / (8.31446 * T_K));
#Experimental power coefficients
a = k_v[10]
b = k_v[11]
#Reaction rates
if model == 'LH, molecular adsorption, different site':
r_SRN = k_SRN * K_N * K_H2O * p[0] * p[1] / (1 + K_N * p[0]) / (1 + K_H2O * p[1]);
elif model == 'LH, molecular adsorption, same site':
r_SRN = k_SRN * K_N * K_H2O * p[0] * p[1] / ((1 + K_N * p[0] + K_H2O * p[1]) ** 2);
elif model == 'LH, dissociative adsorption, different site':
r_SRN = k_SRN * K_N * K_H2O * p[0] * p[1] / ((1 + K_N * p[0] * p[5] / p[1] + K_H2O * p[1] / p[5]) ** 2);
elif model == 'LH, dissociative adsorption, same site':
r_SRN = k_SRN * K_N * K_H2O * p[0] * p[1] / ((1 + np.sqrt(np.maximum((K_N * p[0]), 0)) + np.sqrt(np.maximum((K_H2O * p[1]), 0))) ** 2);
elif model == 'ER, associative':
r_SRN = k_SRN * K_N * p[0] * p[1] / (1 + K_N * p[0]);
elif model == 'ER, dissociative':
r_SRN = k_SRN * K_N * p[0] * p[1] / (1 + np.sqrt(np.maximum((K_N * p[0]), 0)));
elif model == 'LH, dissociative (HC) and molecular (H2O), same site':
r_SRN = k_SRN * K_N * K_H2O * p[0] * p[1] / ((1 + np.sqrt(np.maximum((K_N * p[0]), 0)) + K_H2O * p[1]) ** 2);
elif model == 'Power Law':
r_SRN = k_SRN * (np.maximum((p[0]), 0) ** a) * (np.maximum((p[1]), 0) ** b);
r_WGS = k_WGS * (p[3] * p[1] - p[5] * p[2] / K_WGS);
r_SRM = k_SRM * (p[4] * p[1] - (p[5] ** 3) * p[3] / K_SRM);
#ODEs
s_m = np.array([[-1, 0, 0], [-6.7, -1, -1], [0, 1, 0], [6.7, -1, 1], [0, 0, -1], [6.7 + 7.7, 1, 3]])
r_m = np.array([r_SRN, r_WGS, r_SRM]);
r_i = np.dot(s_m, r_m);
return r_i
#Functon to run a single set of ODEs, for a given tauspan, initial conditions, temperature and set of k values
def run_ODE(tauspan, F0_v, T_K, k_v, model, ivp = False):
F0N2 = F0_v[-1]
F0_v = F0_v[:-1]
T_C = T_K - 273.15
if ivp == True:
res = solve_ivp(lambda tau, F_v: conservation_eq(F_v, tau, k_v, T_C, F0N2, model),
[tauspan[0], tauspan[-1]], F0_v, t_eval = tauspan, method = 'RK45'); res = res.y.T;
else:
args = (k_v, T_C, F0N2, model)
res = odeint(conservation_eq, F0_v, tauspan, args, mxstep = 50000)
return tauspan, res
global ne, nl, df_exp, df0_exp
#Read data from experimental excel
df_exp = pd.read_excel('Raw_Data.xlsx')
#Extract initial conditions that need simulating
df0_exp = df_exp[df_exp[df_exp.columns[0]] == 0]
#Calculate number of experiments to perform simulation
ne = len(df_exp[df_exp[df_exp.columns[0]] == 0])
#Calculate number of points at which ODEs should be solved
nl = len(df_exp[df_exp.columns[0]].unique())
def multiple_ODEs(k_v, model, ivp = False):
F = []
for j in range(ne):
index_no = df0_exp.iloc[j].name
T_K = df0_exp[df0_exp.columns[1]].iloc[j]
F0_v = df0_exp[df0_exp.columns[2:9]].iloc[j].values
F0N2 = df0_exp[df0_exp.columns[8]].iloc[j]
df_j = df_exp.iloc[j * nl : nl * (j + 1),:]
tauspan = df_j[(df_j[df_exp.columns[2]] > -0.1) & (df_j[df_exp.columns[2]] < 1.1)][df_exp.columns[0]].values
tau, F_sol = run_ODE(tauspan, F0_v, T_K, k_v, model, ivp)
F.append(F_sol)
F = np.concatenate(F).ravel()
return F
def pick_params(params_dict, instances_per_model, distribution = False):
params = np.empty((len(params_dict), instances_per_model))
i = 0
for parameter in params_dict:
params[i,:] = np.random.uniform(params_dict[parameter][0], params_dict[parameter][1], instances_per_model)
i += 1
plt.figure(figsize = (14,8))
i = 0
for parameter in params_dict:
plt.subplot(3,4,i+1)
count, bins, ignored = plt.hist(params[i,:], 10)
plt.plot(bins, np.ones_like(bins) * np.mean(count), linewidth = 2, color = 'r')
plt.title('{}'.format(parameter))
plt.ticklabel_format(style = 'sci', axis = 'x', scilimits = (0,0))
plt.locator_params(axis = "x", nbins = 6)
plt.tight_layout()
i +=1
plt.savefig('Params_distribution.png')
if distribution == True:
plt.show()
plt.close()
return params
def perform_model(model, params, sigmar, sigmac, instances_per_model):
sol = []
print('\nFollowing model: {}'.format(model))
for i in range(instances_per_model):
print('Attempting {} instance...'.format(i + 1))
k_v = params[:,i]
try:
F = multiple_ODEs(k_v, model)
epsilon = np.random.multivariate_normal(np.zeros(F.shape[0]), (np.identity(F.shape[0]) * (sigmar ** 2 * F / 100 + sigmac ** 2)))
except:
print('Exception ivp!\n')
F = multiple_ODEs(k_v, model, ivp = True)
epsilon = np.random.multivariate_normal(np.zeros(F.shape[0]), (np.identity(F.shape[0]) * (sigmar ** 2 * F / 100 + sigmac ** 2)))
sol.append(F + epsilon)
sol = np.asarray(sol)
df = pd.DataFrame(sol)
df['Label'] = model
return df
def in_silico(models, params_dict, instances_per_model, sigmar, sigmac, distribution):
Data = []
print('Parameters sampled!')
params = pick_params(params_dict, instances_per_model, distribution)
pd.DataFrame(params.T, columns = list(params_dict.keys())).rename_axis('Samples').round(3).to_excel('Params_sampled.xlsx')
for model in models:
df_model = perform_model(model, params, sigmar, sigmac, instances_per_model)
Data.append(df_model)
print('\nDone!')
Data = pd.concat(Data, axis = 0).reset_index()
Data = Data.drop(Data.columns[0], axis = 1)
return Data
params_dict = {'k0_SNR' : np.array([1.0E+07, 1.0E+08]),
'Ea_SNR' : np.array([6.0E+04, 9.0E+04]),
'k0_WGS' : np.array([2.0E+05, 3.0E+05]),
'Ea_WGS' : np.array([4.0E+04, 7.0E+04]),
'k0_SMR' : np.array([2.1E+11, 2.2E+11]),
'Ea_SMR' : np.array([1.2E+05, 1.5E+05]),
'K0_A' : np.array([1.0E-02, 5.0E-02]),
'AH_A' : np.array([1.0E+04, 3.0E+04]),
'K0_B' : np.array([1.0E-03, 1.0E-02]),
'AH_B' : np.array([3.0E+04, 5.0E+04]),
'a' : np.array([0.25, 3]),
'b' : np.array([0.25, 3])}
instances_per_model = 500
models = ['LH, molecular adsorption, different site',
'LH, molecular adsorption, same site',
'LH, dissociative adsorption, different site',
'LH, dissociative adsorption, same site',
'ER, associative',
'ER, dissociative',
'LH, dissociative (HC) and molecular (H2O), same site',
'Power Law']
sigmar = 0.0
sigmac = 0.0
distribution = False
text_file = open('README_In_Silico.txt', 'a')
with open('README_In_Silico.txt','w') as file:
file.write('Kinetic parameters: \n')
file.write('\n{''\n')
for k in sorted (params_dict.keys()):
file.write("'%s':'%s', \n" % (k, params_dict[k]))
file.write('}\n')
file.write('\nInstances per model = %s\n' % instances_per_model)
file.write('\nModels: \n')
file.write('\n')
for k in models:
file.write("'%s'\n" % (k))
file.write('\nNoise parameters: \n')
file.write('\nSigmaR = %s\n' % sigmar)
file.write('SigmaC = %s\n' % sigmac)
Data = in_silico(models, params_dict, instances_per_model, sigmar, sigmac, distribution)
# Save Data for Naphtha Reforming in csv and xlsx format
Data.to_csv('Data_in_silico' + '_' + str(instances_per_model) + '.csv')
Data.to_excel('Data_in_silico' + '_' + str(instances_per_model) + '.xlsx')
#Save the list with models tested
filename = 'model_list' + '_' + str(instances_per_model) + '.sav'
outfile = open(filename,'wb')
pickle.dump(models, outfile)
outfile.close()
|
{"hexsha": "96b84de971c70f52a12f2fe9523cfb9addd8eda2", "size": 10854, "ext": "py", "lang": "Python", "max_stars_repo_path": "In_Silico.py", "max_stars_repo_name": "MuRE-group/ANN_Reforming_Modeling", "max_stars_repo_head_hexsha": "88aa586a4003fff1ff7116bc33611c4c69e8655e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "In_Silico.py", "max_issues_repo_name": "MuRE-group/ANN_Reforming_Modeling", "max_issues_repo_head_hexsha": "88aa586a4003fff1ff7116bc33611c4c69e8655e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "In_Silico.py", "max_forks_repo_name": "MuRE-group/ANN_Reforming_Modeling", "max_forks_repo_head_hexsha": "88aa586a4003fff1ff7116bc33611c4c69e8655e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0598006645, "max_line_length": 143, "alphanum_fraction": 0.5656900682, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3878}
|
module Sampling
export ListSampler, RejectionSampler, UniformSampler
export create_sampler
using Random
using DataStructures
using QXContexts.Contexts
# Module containing sampler objects which provide different levels of sampling features.
# Each sampler has a constructor which takes a context to perform sampling in and a set
# of keyword arguments that control the sampling behavior.
#
# Sampler(ctx; kwargs...): Initialise the sampler
#
# Each sampler is also callable with arguments that control it's execution
#
# (s::Sampler)(kwargs...): Perform sampling and return sampling results
#
"""Abstract type for samplers"""
abstract type AbstractSampler end
"""Functions to generate random bitstrings"""
random_bitstring(rng, num_qubits) = prod(rand(rng, ["0", "1"], num_qubits))
random_bitstrings(rng, num_qubits, num_samples) = [random_bitstring(rng, num_qubits) for _ in 1:num_samples]
###############################################################################
# ListSampler
###############################################################################
"""
A Sampler struct to compute the amplitudes for a list of bitstrings.
"""
struct ListSampler <: AbstractSampler
ctx::AbstractContext
list::Vector{String}
end
"""
ListSampler(ctx
;bitstrings::Vector{String}=String[],
rank::Integer=0,
comm_size::Integer=1,
kwargs...)
Constructor for a ListSampler to produce a portion of the given `bitstrings` determined by
the given `rank` and `comm_size`.
"""
function ListSampler(ctx
;bitstrings::Vector{String}=String[],
kwargs...)
if haskey(kwargs, :num_samples)
n = kwargs[:num_samples]
n = min(n, length(bitstrings))
else
n = length(bitstrings)
end
ListSampler(ctx, bitstrings[1:n])
end
"""
(s::ListSampler)(max_amplitudes=nothing, kwargs...)
Callable for ListSampler struct. Calculates amplitudes for each bitstring in the list
"""
function (s::ListSampler)(;max_amplitudes=nothing, kwargs...)
bs = if max_amplitudes === nothing
s.list
else s.list[1:min(max_amplitudes, length(s.list))] end
amps = ctxmap(x -> compute_amplitude!(s.ctx, x; kwargs...), s.ctx, bs)
amps = ctxgather(s.ctx, amps)
if amps !== nothing return (bs, amps) end
end
create_sampler(ctx, sampler_params) = get_constructor(sampler_params[:method])(ctx ;sampler_params[:params]...)
get_constructor(func_name::String) = getfield(Main, Symbol(func_name*"Sampler"))
###############################################################################
# RejectionSampler
###############################################################################
"""
A Sampler struct to use rejection sampling to produce output.
"""
mutable struct RejectionSampler <: AbstractSampler
ctx::AbstractContext
num_qubits::Integer
num_samples::Integer
M::Real
fix_M::Bool
rng::MersenneTwister
end
"""
function RejectionSampler(;num_qubits::Integer,
num_samples::Integer,
M::Real=0.0001,
fix_M::Bool=false,
seed::Integer=42,
kwargs...)
Constructor for a RejectionSampler to produce and accept a number of bitstrings.
"""
function RejectionSampler(ctx::AbstractContext;
num_qubits::Integer,
num_samples::Integer,
M::Real=0.0001,
fix_M::Bool=false,
seed::Integer=42,
kwargs...)
# Evenly divide the number of bitstrings to be sampled amongst the subgroups of ranks.
# num_samples = get_rank_size(num_samples, comm_size, rank)
rng = MersenneTwister(seed) # TODO: should somehow add the rank to the seed, maybe with get_rank(ctx)?
RejectionSampler(ctx, num_qubits, num_samples, M, fix_M, rng)
end
"""
(s::RejectionSampler)(max_amplitudes=nothing, kwargs...)
Callable for RejectionSampler struct. Computes amplitudes for uniformly distributed bitstrings and corrects the distribution
using a rejection step.
"""
function (s::RejectionSampler)(;max_amplitudes=nothing, kwargs...)
num_samples = max_amplitudes === nothing ? s.num_samples : max_amplitudes
N = 2^s.num_qubits
M = s.M
samples = Samples()
accepted = 0
while accepted < num_samples
# produce cadidate bitstrings
bitstrings = random_bitstrings(s.rng, s.num_qubits, num_samples-accepted)
# compute amplitudes for the bitstrings
amps = [compute_amplitude!(s.ctx, bs; kwargs...) for bs in bitstrings]
# bs_amp_pairs = [bs => compute_amplitude!(s.ctx, bs; kwargs...) for bs in bitstrings if !(bs in keys(samples.amplitudes))]
# Record the computed amplitudes and update M if required
for (bs, amp) in zip(bitstrings, amps)
samples.amplitudes[bs] = amp
Np = N * abs(amp)^2
s.fix_M || (M = max(Np, M))
end
s.fix_M || (M = ctxreduce(max, s.ctx, M))
# Conduct a rejection step for each bitstring to correct the distribution of samples.
for (bs, amp) in zip(bitstrings, amps)
Np = N * abs(amp)^2 # This is computed twice
if rand(s.rng) < Np / M
accepted += 1
samples.bitstrings_counts[bs] += 1
end
end
end
ctxgather(s.ctx, samples)
samples
end
###############################################################################
# UniformSampler
###############################################################################
"""
A Sampler struct to uniformly sample bitstrings and compute their amplitudes.
"""
mutable struct UniformSampler <: AbstractSampler
ctx::AbstractContext
num_qubits::Integer
num_samples::Integer
rng::MersenneTwister
end
"""
UniformSampler(ctx::AbstractContext;
num_qubits::Integer,
num_samples::Integer,
seed::Integer=42,
kwargs...)
Constructor for a UniformSampler to uniformly sample bitstrings.
"""
function UniformSampler(ctx::AbstractContext;
num_qubits::Integer,
num_samples::Integer,
seed::Integer=42,
kwargs...)
# Evenly divide the number of bitstrings to be sampled amongst the subgroups of ranks.
# num_samples = (num_samples ÷ comm_size) + (rank < num_samples % comm_size)
rng = MersenneTwister(seed)
UniformSampler(ctx, num_qubits, num_samples, rng)
end
"""
(s::UniformSampler)(max_amplitudes=nothing, kwargs...)
Callable for UniformSampler struct. Computes amplitudes for uniformly distributed bitstrings.
"""
function (s::UniformSampler)(;max_amplitudes=nothing, kwargs...)
num_samples = max_amplitudes === nothing ? s.num_samples : max_amplitudes
bs = random_bitstrings(s.rng, s.num_qubits, num_samples)
amps = ctxmap(x -> compute_amplitude!(s.ctx, x; kwargs...), s.ctx, bs)
amps = ctxgather(s.ctx, amps)
(bs, amps)
end
###############################################################################
# Sampler Struct
###############################################################################
"""
Struct to hold the results of a simulation.
"""
struct Samples{T}
bitstrings_counts::DefaultDict{String, <:Integer}
amplitudes::Dict{String, T}
end
Samples() = Samples(DefaultDict{String, Int}(0), Dict{String, ComplexF32}())
Base.length(s::Samples) = sum(values(s.bitstrings_counts))
Base.unique(s::Samples) = keys(s.bitstrings_counts)
end
|
{"hexsha": "28fdedf0354b2287d2ca154b2d6a0857291778b3", "size": 7735, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/sampling.jl", "max_stars_repo_name": "JuliaQX/QXRun.jl", "max_stars_repo_head_hexsha": "9d1a271a56372d79a78fdeccd00d6efb45078702", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-31T15:42:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T11:05:25.000Z", "max_issues_repo_path": "src/sampling.jl", "max_issues_repo_name": "JuliaQX/QXContexts.jl", "max_issues_repo_head_hexsha": "e2ba704640d3ffa2852b482e52424e404f6c2448", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2021-03-31T11:12:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T09:33:39.000Z", "max_forks_repo_path": "src/sampling.jl", "max_forks_repo_name": "JuliaQX/QXRun.jl", "max_forks_repo_head_hexsha": "9d1a271a56372d79a78fdeccd00d6efb45078702", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-23T14:42:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-23T14:42:34.000Z", "avg_line_length": 33.3405172414, "max_line_length": 131, "alphanum_fraction": 0.596509373, "num_tokens": 1667}
|
import numpy as np
import pygame
import sys
import math
def main():
ROW_COUNT = 6
COLUMN_COUNT = 7
BLUE = (0, 0 ,230)
BLACK = (0,0,0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
def create_board():
board = np.zeros((ROW_COUNT,COLUMN_COUNT))
return board
def drop_peice(col, board, row, peice):
board[row][col] = peice
def is_valid_location(col, board):
return board[ROW_COUNT-1][col] == 0
def get_next_open_row(col, board):
for r in range(ROW_COUNT):
if board[r][col] == 0:
return r
def print_board(board):
print(np.flipud(board))
def winning_move(board, peice):
#horizontal locations for win
for c in range(COLUMN_COUNT-3):
for r in range(ROW_COUNT):
if board[r][c] == peice and board[r][c+1] == peice and board[r][c+2] == peice and board[r][c+3] == peice:
return True
#vertical locations for win
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT-3):
if board[r][c] == peice and board[r+1][c] == peice and board[r+2][c] == peice and board[r+3][c] == peice:
return True
#Check Positive Slope Diagonals
for c in range(COLUMN_COUNT-3):
for r in range(ROW_COUNT-3):
if board[r][c] == peice and board[r+1][c+1] == peice and board[r+2][c+2] == peice and board[r+3][c+3] == peice:
return True
#Check Negative Slope Diagonals
for c in range(COLUMN_COUNT-3):
for r in range(3, ROW_COUNT):
if board[r][c] == peice and board[r-1][c+1] == peice and board[r-2][c+2] == peice and board[r-3][c+3] == peice:
return True
def draw_board(board):
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
pygame.draw.rect(screen, BLUE, (c*SQUARESIZE, r*SQUARESIZE+SQUARESIZE, SQUARESIZE, SQUARESIZE))
pygame.draw.circle(screen, BLACK, (int(c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)), RADIUS)
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
if board[r][c] == 1:
pygame.draw.circle(screen, RED, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)
elif board[r][c] == 2:
pygame.draw.circle(screen, YELLOW, (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)
pygame.display.update()
board = create_board()
#print_board(board)
game_over = False
playerturn = 0
pygame.init()
SQUARESIZE = 100
RADIUS = int(SQUARESIZE/2 - 5)
width = COLUMN_COUNT * SQUARESIZE
height = (ROW_COUNT+1) * SQUARESIZE
size = (width, height)
screen = pygame.display.set_mode(size)
draw_board(board)
pygame.display.update()
font = pygame.font.SysFont("monospace", 75)
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEMOTION:
pygame.draw.rect(screen, BLACK, (0,0,width,SQUARESIZE))
posx = event.pos[0]
if playerturn == 0:
pygame.draw.circle(screen, RED, (posx, int(SQUARESIZE/2)), RADIUS)
else:
pygame.draw.circle(screen, YELLOW, (posx, int(SQUARESIZE/2)), RADIUS)
pygame.display.update()
if event.type == pygame.MOUSEBUTTONDOWN:
# print(event.pos)
pygame.draw.rect(screen, BLACK, (0,0,width,SQUARESIZE))
if playerturn == 0:
posx = event.pos[0]
col = int(math.floor(posx/SQUARESIZE))
if is_valid_location(col, board):
row = get_next_open_row(col, board)
drop_peice(col, board, row, 1)
if winning_move(board, 1):
print("Player 1 Wins!")
label = font.render("Player 1 Wins!!!", 1, RED)
screen.blit(label, (40,10))
game_over = True
else:
posx = event.pos[0]
col = int(math.floor(posx/SQUARESIZE))
if is_valid_location(col, board):
row = get_next_open_row(col, board)
drop_peice(col, board, row, 2)
if winning_move(board, 2):
print("Player 2 Wins!")
label = font.render("Player 2 Wins!!!", 1, YELLOW)
screen.blit(label, (40,10))
game_over = True
print_board(board)
draw_board(board)
playerturn +=1
playerturn = playerturn%2
if game_over:
pygame.time.wait(2000)
import menu
menu.main()
|
{"hexsha": "3c19fb5b97957f5ae9cc71172ab433cc97a531f3", "size": 4110, "ext": "py", "lang": "Python", "max_stars_repo_path": "connect4 copy.py", "max_stars_repo_name": "yijiehuang0/connect4AI", "max_stars_repo_head_hexsha": "5a134ded8009fd51210a96ba2169920bf1b19aa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "connect4 copy.py", "max_issues_repo_name": "yijiehuang0/connect4AI", "max_issues_repo_head_hexsha": "5a134ded8009fd51210a96ba2169920bf1b19aa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "connect4 copy.py", "max_forks_repo_name": "yijiehuang0/connect4AI", "max_forks_repo_head_hexsha": "5a134ded8009fd51210a96ba2169920bf1b19aa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6883116883, "max_line_length": 122, "alphanum_fraction": 0.6501216545, "include": true, "reason": "import numpy", "num_tokens": 1277}
|
#/usr/bin/env python
import math
import numpy as np
import time
import torch
import torch.nn as nn
NEG_INF = -float("inf")
def logsumexp(*args):
if all(a==NEG_INF for a in args):
return NEG_INF
a_max = max(args)
lsp = math.log(sum(math.exp(a - a_max) for a in args))
return a_max + lsp
def log_softmax(acts, axis):
acts = acts - np.max(acts, axis=axis, keepdims=True)
probs = np.sum(np.exp(acts), axis=axis, keepdims=True)
log_probs = acts - np.log(probs)
return log_probs
def forward(log_probs, elabels, blank, enable_bypass=True):
T, V = log_probs.shape
L = elabels.shape[0]
#print("T: {}, V:{}, L:{}".format(T, V, L))
alphas = np.full((T, L), NEG_INF)
alphas[0, 0] = log_probs[0, blank]
alphas[0, 1] = log_probs[0, elabels[1]]
bypass = 0
for t in range(1, T):
for l in range(L):
if enable_bypass and l < (L-2) - 2*(T-1-t):
bypass += 1
continue # alphas[t, l] = NEG_INF
else:
alphas[t, l] = alphas[t-1, l]
if l >= 1: # blank or duplicate
alphas[t, l] = logsumexp(alphas[t, l], alphas[t-1, l-1])
if l >= 2 and elabels[l] != elabels[l-2]: # different label
alphas[t, l] = logsumexp(alphas[t, l], alphas[t-1, l-2])
alphas[t, l] = alphas[t, l] + log_probs[t, elabels[l]]
#print('forward bypass rate: {}'.format(float(bypass)/(L*(T-1))))
return alphas, -logsumexp(alphas[T-1, L-1], alphas[T-1, L-2])
def backward(log_probs, elabels, blank, enable_bypass=True):
T, V = log_probs.shape
L = elabels.shape[0]
#print("T: {}, V:{}, L:{}".format(T, V, L))
betas = np.full((T, L), NEG_INF)
betas[T-1, L-2] = log_probs[T-1, elabels[L-2]]
betas[T-1, L-1] = log_probs[T-1, blank] # elabels[L-1]
bypass = 0
for t in reversed(range(T-1)):
for l in reversed(range(L)):
if enable_bypass and l > 2*(t+1) + 1:
bypass += 1
continue # betas[t, l] = NEG_INF
else:
betas[t, l] = betas[t+1, l]
if l < L-1: # blank or duplicate
betas[t, l] = logsumexp(betas[t, l], betas[t+1, l+1])
if l < L-2 and elabels[l] != elabels[l+2]: # different label
betas[t, l] = logsumexp(betas[t, l], betas[t+1, l+2])
betas[t, l] = betas[t, l] + log_probs[t, elabels[l]]
#print('backward bypass rate: {}'.format(float(bypass)/(L*(T-2))))
return betas, -logsumexp(betas[0, 0], betas[0, 1])
def find_label_index(elabels, k):
B = []
for l, v in enumerate(elabels):
if k == v:
B.append(l)
return B
'''
Gradient computation on softmax output.
'''
def compute_gradient(log_probs, alphas, betas, elabels, blank):
T, V = log_probs.shape
L = elabels.shape[0]
gradients = np.full(log_probs.shape, NEG_INF)
log_p = logsumexp(betas[0, 0], betas[0, 1])
B = []
for t in range(T):
for k in range(V):
B = find_label_index(elabels, k)
albetas = []
for l in B:
albetas.append(alphas[t, l] + betas[t, l])
res = logsumexp(*albetas)
gradients[t, k] = res - log_p - 2*log_probs[t, k]
gradients = np.exp(gradients)
gradients = -gradients
return gradients
def test_logsumexp():
inputs = np.random.rand(10)
r = logsumexp(*inputs)
cinputs = torch.from_numpy(inputs)
torch_r = torch.logsumexp(cinputs, dim=0).detach().item()
print(r)
print(torch_r)
assert np.allclose(r, torch_r, atol=1e-18, rtol=1e-18), \
"logsumexp impl mismatch!"
def test_log_softmax():
inputs = np.random.rand(2, 10)
log_probs1 = log_softmax(inputs, 1)
print(log_probs1)
print(np.sum(np.exp(log_probs1), axis=1, keepdims=True))
cinputs = torch.from_numpy(inputs)
log_probs2 = torch.nn.functional.log_softmax(cinputs, dim=1)
assert np.allclose(log_probs1, log_probs2, atol=1e-8, rtol=1e-8), \
"log_softmax impl mismatch!"
def test(T, V, L):
inputs = np.random.rand(T, V)
labels = np.random.randint(1, V, L)
#print('softmax input: {}'.format(inputs))
#print('output labels: {}'.format(labels))
log_probs = log_softmax(inputs, axis=1)
#print('log_probs: {}'.format(log_probs))
blank = 0
elabels = [0]
for l in labels:
elabels.append(l)
elabels.append(blank)
elabels = np.array(elabels)
start = time.time()
alphas, loss = forward(log_probs, elabels, blank)
print('loss: {}, used time: {}'.format(loss, time.time() - start))
start = time.time()
betas, loss_b = backward(log_probs, elabels, blank, enable_bypass=False)
print('loss(backward): {}, used time: {}'.format(loss_b, time.time() - start))
assert np.allclose(loss, loss_b, atol=1e-8, rtol=1e-8), \
"forward and backward loss impl mismatch!"
# check loss by prob sum
for _ in range(10):
t = np.random.randint(1, T)
log_prob_sum = NEG_INF
for l in range(2*L+1):
log_prob_sum = logsumexp(log_prob_sum,
alphas[t, l] + betas[t, l] - log_probs[t, elabels[l]])
assert np.allclose(loss, -log_prob_sum, atol=1e-8, rtol=1e-8), \
"forward and backward loss impl mismatch!"
# compare loss against pytorch ctc loss
ctc_loss = nn.CTCLoss(reduction='none')
ptinput = torch.from_numpy(log_probs)
ptinput = ptinput.unsqueeze(0)
ptinput = ptinput.transpose(0,1)
target = torch.from_numpy(labels)
input_lengths = torch.full(size=(1,), fill_value=T, dtype=torch.long)
target_lengths = torch.full(size=(1,), fill_value=L, dtype=torch.long)
start = time.time()
pt_loss = ctc_loss(ptinput, target, input_lengths, target_lengths).item()
print('pt_loss: {}, used time: {}'.format(pt_loss, time.time() - start))
assert np.allclose(loss, pt_loss, atol=1e-8, rtol=1e-8), \
"ctc loss is mismatched against pytorch impl"
# check gradients
grads = compute_gradient(log_probs, alphas, betas, elabels, blank)
if __name__ == '__main__':
#test_logsumexp()
#test_log_softmax()
T = 200 # frames
V = 50 # vocab size
L = 20 # labels
test(T, V, L)
|
{"hexsha": "fd12a8fe18a68b47d91919cc89e962495b5ee98a", "size": 6346, "ext": "py", "lang": "Python", "max_stars_repo_path": "ctc.py", "max_stars_repo_name": "robinn37/my_ctc", "max_stars_repo_head_hexsha": "616c0fa3963a4307907480e387af85f48428933a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-07T07:14:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T07:14:16.000Z", "max_issues_repo_path": "ctc.py", "max_issues_repo_name": "robinn37/my_ctc", "max_issues_repo_head_hexsha": "616c0fa3963a4307907480e387af85f48428933a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ctc.py", "max_forks_repo_name": "robinn37/my_ctc", "max_forks_repo_head_hexsha": "616c0fa3963a4307907480e387af85f48428933a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.935828877, "max_line_length": 82, "alphanum_fraction": 0.5836747558, "include": true, "reason": "import numpy", "num_tokens": 1950}
|
seed!(1337)
x = randn(10000)
@testset "default params" begin
p = @inferred histogram(x)
@test_reference(
"references/histogram/default.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
@test_reference(
"references/histogram/default_nocolor.txt",
@io2str(print(IOContext(::IO, :color=>false), p)),
render = BeforeAfterFull()
)
p = @inferred histogram(x, closed = :left)
@test_reference(
"references/histogram/default.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
hist = fit(Histogram, x, closed = :left)
p = @inferred histogram(hist)
@test_reference(
"references/histogram/default.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = @inferred histogram(x*100)
@test_reference(
"references/histogram/default_1e2.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = @inferred histogram(x*0.01)
@test_reference(
"references/histogram/default_1e-2.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = @inferred histogram(x, xscale = log10)
@test_reference(
"references/histogram/log10.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = @inferred histogram(x, xlabel = "custom label", xscale = log10)
@test_reference(
"references/histogram/log10_label.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = @inferred histogram([0.1f0, 0.1f0, 0f0])
@test_reference(
"references/histogram/float32.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
end
@testset "hist params" begin
hist = fit(Histogram, x, nbins = 5, closed = :right)
p = @inferred histogram(hist)
@test_reference(
"references/histogram/hist_params.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = @inferred histogram(x, nbins = 5, closed = :right)
@test_reference(
"references/histogram/hist_params.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = nothing
p = @test_logs (:warn, r"`bins`.+deprecated") @inferred histogram(x, bins = 5, closed = :right)
@test_reference(
"references/histogram/hist_params.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = nothing
p = @test_logs (:warn, r"deprecated") @inferred histogram(x, 5, closed = :right)
@test_reference(
"references/histogram/hist_params.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
end
@testset "keyword arguments" begin
p = @inferred histogram(
x,
title = "My Histogram",
xlabel = "Absolute Frequency",
color = :blue,
margin = 7,
padding = 3,
)
@test_reference(
"references/histogram/parameters1.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = @inferred histogram(
x,
title = "My Histogram",
xlabel = "Absolute Frequency",
color = :blue,
margin = 7,
padding = 3,
labels = false,
)
@test_reference(
"references/histogram/parameters1_nolabels.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
p = @inferred histogram(
x,
title = "My Histogram",
xlabel = "Absolute Frequency",
color = :yellow,
border = :solid,
symb = "=",
width = 50
)
@test_reference(
"references/histogram/parameters2.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
# same but with Char as symb
p = @inferred histogram(
x,
title = "My Histogram",
xlabel = "Absolute Frequency",
color = :yellow,
border = :solid,
symb = '=',
width = 50
)
@test_reference(
"references/histogram/parameters2.txt",
@io2str(print(IOContext(::IO, :color=>true), p)),
render = BeforeAfterFull()
)
end
|
{"hexsha": "c6faab209645d9b248123fcddd673fd3ae0e49a7", "size": 4584, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/tst_histogram.jl", "max_stars_repo_name": "simonbyrne/UnicodePlots.jl", "max_stars_repo_head_hexsha": "625ba36b2dc402839a1401a6e90a650c7668eb06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/tst_histogram.jl", "max_issues_repo_name": "simonbyrne/UnicodePlots.jl", "max_issues_repo_head_hexsha": "625ba36b2dc402839a1401a6e90a650c7668eb06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/tst_histogram.jl", "max_forks_repo_name": "simonbyrne/UnicodePlots.jl", "max_forks_repo_head_hexsha": "625ba36b2dc402839a1401a6e90a650c7668eb06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.56, "max_line_length": 99, "alphanum_fraction": 0.5772251309, "num_tokens": 1214}
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import io
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from flask import Flask, request, jsonify
# Import model
model = keras.models.load_model("<your-model.h5>")
# Convert image to array
def transform_image(img):
imgs = []
img = img.resize((150,150))
img = img_to_array(img)
img = img.astype(np.float32) / 255
imgs.append(img)
imgs = tf.image.resize(img, [150,150])
imgs = np.expand_dims(imgs, axis=0)
return imgs
# Prediction function
def predict(x):
predictions = model(x)
pred = np.argmax(predictions, axis=1)
return pred
# Initialize Flask server with error handling
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
file = request.files.get('file')
if file is None or file.filename == "":
return jsonify({"error": "no file"})
try:
image_bytes = file.read()
pillow_img = Image.open(io.BytesIO(image_bytes))
prediction = predict(transform_image(pillow_img))
data = {"prediction": int(prediction)}
return jsonify(data)
except Exception as e:
return jsonify({"error": str(e)})
return "OK"
if __name__ == "__main__":
app.run(debug=True)
|
{"hexsha": "b94302ca5181d94d729be9c2305647cbea931bc7", "size": 1589, "ext": "py", "lang": "Python", "max_stars_repo_path": "Cloud Computing Development/ml-flask-deployment/main.py", "max_stars_repo_name": "emnopal/bangkit-final-project", "max_stars_repo_head_hexsha": "b0642cb64d916a01101e35844fbf244bafdea4a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-03T14:07:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T08:00:36.000Z", "max_issues_repo_path": "Cloud Computing Development/ml-flask-deployment/main.py", "max_issues_repo_name": "emnopal/bangkit-final-project", "max_issues_repo_head_hexsha": "b0642cb64d916a01101e35844fbf244bafdea4a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Cloud Computing Development/ml-flask-deployment/main.py", "max_forks_repo_name": "emnopal/bangkit-final-project", "max_forks_repo_head_hexsha": "b0642cb64d916a01101e35844fbf244bafdea4a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-06-03T12:50:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T07:08:27.000Z", "avg_line_length": 26.0491803279, "max_line_length": 92, "alphanum_fraction": 0.6173694147, "include": true, "reason": "import numpy", "num_tokens": 353}
|
[STATEMENT]
lemma set_child_nodes_pointers_preserved:
assumes "w \<in> set_child_nodes_locs object_ptr"
assumes "h \<turnstile> w \<rightarrow>\<^sub>h h'"
shows "object_ptr_kinds h = object_ptr_kinds h'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. object_ptr_kinds h = object_ptr_kinds h'
[PROOF STEP]
using assms(1) object_ptr_kinds_preserved[OF writes_singleton2 assms(2)]
[PROOF STATE]
proof (prove)
using this:
w \<in> set_child_nodes_locs object_ptr
(\<And>h h' wa object_ptr. \<lbrakk>wa \<in> {w}; h \<turnstile> wa \<rightarrow>\<^sub>h h'\<rbrakk> \<Longrightarrow> preserved (get_M\<^sub>O\<^sub>b\<^sub>j\<^sub>e\<^sub>c\<^sub>t object_ptr RObject.nothing) h h') \<Longrightarrow> object_ptr_kinds h = object_ptr_kinds h'
goal (1 subgoal):
1. object_ptr_kinds h = object_ptr_kinds h'
[PROOF STEP]
by(auto simp add: set_child_nodes_locs_impl all_args_def a_set_child_nodes_locs_def
split: if_splits)
|
{"llama_tokens": 373, "file": "Core_SC_DOM_common_Core_DOM_Functions", "length": 2}
|
import unittest
from theano import theano, tensor as tt
import numpy as np
import pymc3 as pm
from pymc3.distributions import HalfCauchy, Normal
from pymc3 import Potential, Deterministic
from pymc3.theanof import generator
class NewModel(pm.Model):
def __init__(self, name='', model=None):
super(NewModel, self).__init__(name, model)
assert pm.modelcontext(None) is self
# 1) init variables with Var method
self.Var('v1', pm.Normal.dist())
self.v2 = pm.Normal('v2', mu=0, sd=1)
# 2) Potentials and Deterministic variables with method too
# be sure that names will not overlap with other same models
pm.Deterministic('d', tt.constant(1))
pm.Potential('p', tt.constant(1))
class DocstringModel(pm.Model):
def __init__(self, mean=0, sd=1, name='', model=None):
super(DocstringModel, self).__init__(name, model)
self.Var('v1', Normal.dist(mu=mean, sd=sd))
Normal('v2', mu=mean, sd=sd)
Normal('v3', mu=mean, sd=HalfCauchy('sd', beta=10, testval=1.))
Deterministic('v3_sq', self.v3 ** 2)
Potential('p1', tt.constant(1))
class TestBaseModel(unittest.TestCase):
def test_setattr_properly_works(self):
with pm.Model() as model:
pm.Normal('v1')
self.assertEqual(len(model.vars), 1)
with pm.Model('sub') as submodel:
submodel.Var('v1', pm.Normal.dist())
self.assertTrue(hasattr(submodel, 'v1'))
self.assertEqual(len(submodel.vars), 1)
self.assertEqual(len(model.vars), 2)
with submodel:
submodel.Var('v2', pm.Normal.dist())
self.assertTrue(hasattr(submodel, 'v2'))
self.assertEqual(len(submodel.vars), 2)
self.assertEqual(len(model.vars), 3)
def test_context_passes_vars_to_parent_model(self):
with pm.Model() as model:
# a set of variables is created
NewModel()
# another set of variables are created but with prefix 'another'
usermodel2 = NewModel(name='another')
# you can enter in a context with submodel
with usermodel2:
usermodel2.Var('v3', pm.Normal.dist())
pm.Normal('v4')
# this variable is created in parent model too
self.assertIn('another_v2', model.named_vars)
self.assertIn('another_v3', model.named_vars)
self.assertIn('another_v3', usermodel2.named_vars)
self.assertIn('another_v4', model.named_vars)
self.assertIn('another_v4', usermodel2.named_vars)
self.assertTrue(hasattr(usermodel2, 'v3'))
self.assertTrue(hasattr(usermodel2, 'v2'))
self.assertTrue(hasattr(usermodel2, 'v4'))
# When you create a class based model you should follow some rules
with model:
m = NewModel('one_more')
self.assertTrue(m.d is model['one_more_d'])
self.assertTrue(m['d'] is model['one_more_d'])
self.assertTrue(m['one_more_d'] is model['one_more_d'])
class TestNested(unittest.TestCase):
def test_nest_context_works(self):
with pm.Model() as m:
new = NewModel()
with new:
self.assertTrue(
pm.modelcontext(None) is new
)
self.assertTrue(
pm.modelcontext(None) is m
)
self.assertIn('v1', m.named_vars)
self.assertIn('v2', m.named_vars)
def test_named_context(self):
with pm.Model() as m:
NewModel(name='new')
self.assertIn('new_v1', m.named_vars)
self.assertIn('new_v2', m.named_vars)
def test_docstring_example1(self):
usage1 = DocstringModel()
self.assertIn('v1', usage1.named_vars)
self.assertIn('v2', usage1.named_vars)
self.assertIn('v3', usage1.named_vars)
self.assertIn('v3_sq', usage1.named_vars)
self.assertTrue(len(usage1.potentials), 1)
def test_docstring_example2(self):
with pm.Model() as model:
DocstringModel(name='prefix')
self.assertIn('prefix_v1', model.named_vars)
self.assertIn('prefix_v2', model.named_vars)
self.assertIn('prefix_v3', model.named_vars)
self.assertIn('prefix_v3_sq', model.named_vars)
self.assertTrue(len(model.potentials), 1)
def test_duplicates_detection(self):
with pm.Model():
DocstringModel(name='prefix')
self.assertRaises(ValueError, DocstringModel, name='prefix')
def test_model_root(self):
with pm.Model() as model:
self.assertTrue(model is model.root)
with pm.Model() as sub:
self.assertTrue(model is sub.root)
class TestScaling(unittest.TestCase):
def test_density_scaling(self):
with pm.Model() as model1:
Normal('n', observed=[[1]], total_size=1)
p1 = theano.function([], model1.logpt)
with pm.Model() as model2:
Normal('n', observed=[[1]], total_size=2)
p2 = theano.function([], model2.logpt)
self.assertEqual(p1() * 2, p2())
def test_density_scaling_with_genarator(self):
# We have different size generators
def gen1():
i = 0
while True:
yield np.ones((10, 100)) * i
i += 1
def gen2():
i = 0
while True:
yield np.ones((20, 100)) * i
i += 1
# We have same size models
with pm.Model() as model1:
Normal('n', observed=gen1(), total_size=100)
p1 = theano.function([], model1.logpt)
with pm.Model() as model2:
gen_var = generator(gen2())
Normal('n', observed=gen_var, total_size=100)
p2 = theano.function([], model2.logpt)
# We want densities to be equal
for _ in range(10):
np.testing.assert_almost_equal(p1(), p2())
# Done
|
{"hexsha": "232d1a8090a2273791e9cdeb2d4f61e824501f8b", "size": 6059, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymc3/tests/test_model.py", "max_stars_repo_name": "vpolisky/pymc3", "max_stars_repo_head_hexsha": "87cdd712c86321121c2ed3150764f3d847f5083c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pymc3/tests/test_model.py", "max_issues_repo_name": "vpolisky/pymc3", "max_issues_repo_head_hexsha": "87cdd712c86321121c2ed3150764f3d847f5083c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pymc3/tests/test_model.py", "max_forks_repo_name": "vpolisky/pymc3", "max_forks_repo_head_hexsha": "87cdd712c86321121c2ed3150764f3d847f5083c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-13T10:31:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-13T10:31:26.000Z", "avg_line_length": 36.9451219512, "max_line_length": 76, "alphanum_fraction": 0.5913517082, "include": true, "reason": "import numpy,import theano,from theano,import pymc3,from pymc3", "num_tokens": 1427}
|
/*************************************************************************
* Copyright (C) 2017-2019 Barcelona Supercomputing Center *
* Centro Nacional de Supercomputacion *
* All rights reserved. *
* *
* This file is part of NORNS, a service that allows other programs to *
* start, track and manage asynchronous transfers of data resources *
* between different storage backends. *
* *
* See AUTHORS file in the top level directory for information regarding *
* developers and contributors. *
* *
* This software was developed as part of the EC H2020 funded project *
* NEXTGenIO (Project ID: 671951). *
* www.nextgenio.eu *
* *
* Permission is hereby granted, free of charge, to any person obtaining *
* a copy of this software and associated documentation files (the *
* "Software"), to deal in the Software without restriction, including *
* without limitation the rights to use, copy, modify, merge, publish, *
* distribute, sublicense, and/or sell copies of the Software, and to *
* permit persons to whom the Software is furnished to do so, subject to *
* the following conditions: *
* *
* The above copyright notice and this permission notice shall be *
* included in all copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, *
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF *
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND *
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS *
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN *
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN *
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*************************************************************************/
#include <boost/version.hpp>
#include <cmath>
#include <limits>
#include "task-stats.hpp"
#include "task-info.hpp"
#include "logger.hpp"
namespace norns {
namespace io {
task_info::task_info(const iotask_id tid,
const iotask_type type,
const bool is_remote,
const auth::credentials& auth,
const backend_ptr src_backend,
const resource_info_ptr src_rinfo,
const backend_ptr dst_backend,
const resource_info_ptr dst_rinfo,
const boost::any& ctx) :
m_id(tid),
m_type(type),
m_is_remote(is_remote),
m_auth(auth),
m_src_backend(src_backend),
m_src_rinfo(src_rinfo),
m_dst_backend(dst_backend),
m_dst_rinfo(dst_rinfo),
m_ctx(ctx),
m_status(task_status::pending),
m_task_error(urd_error::success),
m_sys_error(),
m_bandwidth(std::numeric_limits<double>::quiet_NaN()),
m_sent_bytes(),
m_total_bytes() {
if(!src_rinfo) {
return;
}
std::error_code ec;
m_total_bytes = src_backend->get_size(src_rinfo, ec);
// make sure that m_total_bytes is 0 if an error occurred
if(ec) {
m_total_bytes = 0;
}
}
task_info::~task_info() { }
iotask_id
task_info::id() const {
return m_id;
}
iotask_type
task_info::type() const {
return m_type;
}
bool
task_info::is_remote() const {
return m_is_remote;
}
auth::credentials
task_info::auth() const {
return m_auth;
}
task_info::backend_ptr
task_info::src_backend() const {
return m_src_backend;
}
task_info::resource_info_ptr
task_info::src_rinfo() const {
return m_src_rinfo;
}
task_info::backend_ptr
task_info::dst_backend() const {
return m_dst_backend;
}
task_info::resource_info_ptr
task_info::dst_rinfo() const {
return m_dst_rinfo;
}
boost::any
task_info::context() const {
return m_ctx;
}
void
task_info::set_context(const boost::any& ctx) {
m_ctx = ctx;
}
void
task_info::clear_context() {
#if BOOST_VERSION <= 105500
m_ctx = boost::any();
#else
m_ctx.clear();
#endif
}
task_status
task_info::status() const {
boost::shared_lock<boost::shared_mutex> lock(m_mutex);
return m_status;
}
void
task_info::update_status(const task_status st) {
boost::unique_lock<boost::shared_mutex> lock(m_mutex);
m_status = st;
}
void
task_info::update_status(const task_status st, const urd_error ec,
const std::error_code& sc) {
boost::unique_lock<boost::shared_mutex> lock(m_mutex);
m_status = st;
m_task_error = ec;
m_sys_error = sc;
}
urd_error
task_info::task_error() const {
boost::shared_lock<boost::shared_mutex> lock(m_mutex);
return m_task_error;
}
std::error_code
task_info::sys_error() const {
return m_sys_error;
}
std::size_t
task_info::sent_bytes() const {
boost::shared_lock<boost::shared_mutex> lock(m_mutex);
return m_sent_bytes;
}
std::size_t
task_info::total_bytes() const {
boost::shared_lock<boost::shared_mutex> lock(m_mutex);
return m_total_bytes;
}
task_stats
task_info::stats() const {
boost::shared_lock<boost::shared_mutex> lock(m_mutex);
return task_stats(m_status, m_task_error, m_sys_error,
m_total_bytes, m_total_bytes - m_sent_bytes);
}
double
task_info::bandwidth() const {
boost::shared_lock<boost::shared_mutex> lock(m_mutex);
return m_bandwidth;
}
void
task_info::update_bandwidth(std::size_t bytes, double usecs) {
boost::unique_lock<boost::shared_mutex> lock(m_mutex);
m_bandwidth = (static_cast<double>(bytes)/(1024*1024) / (usecs/1e6));
LOGGER_DEBUG("[{}] {}({}, {}) => {}", m_id, __FUNCTION__, bytes, usecs, m_bandwidth);
}
void
task_info::record_transfer(std::size_t bytes, double usecs) {
boost::unique_lock<boost::shared_mutex> lock(m_mutex);
m_sent_bytes += bytes;
m_bandwidth = (static_cast<double>(bytes)/(1024*1024) / (usecs/1e6));
LOGGER_DEBUG("[{}] {}({}, {}) => {}", m_id, __FUNCTION__, bytes, usecs, m_bandwidth);
}
boost::shared_lock<boost::shared_mutex>
task_info::lock_shared() const {
boost::shared_lock<boost::shared_mutex> lock(m_mutex);
return lock;
}
boost::unique_lock<boost::shared_mutex>
task_info::lock_unique() const {
boost::unique_lock<boost::shared_mutex> lock(m_mutex);
return lock;
}
} // namespace io
} // namespace norns
|
{"hexsha": "8ff386624badf8711bc8f23ed918bbd1a494fc00", "size": 7143, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/io/task-info.cpp", "max_stars_repo_name": "bsc-ssrg/NORNS", "max_stars_repo_head_hexsha": "4fd2d181019eceadb8b1b04a94e3756476326239", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-11-11T11:34:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-08T02:13:48.000Z", "max_issues_repo_path": "src/io/task-info.cpp", "max_issues_repo_name": "bsc-ssrg/NORNS", "max_issues_repo_head_hexsha": "4fd2d181019eceadb8b1b04a94e3756476326239", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/io/task-info.cpp", "max_forks_repo_name": "bsc-ssrg/NORNS", "max_forks_repo_head_hexsha": "4fd2d181019eceadb8b1b04a94e3756476326239", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2669491525, "max_line_length": 89, "alphanum_fraction": 0.5893882122, "num_tokens": 1523}
|
theory HOL_Specific
imports Base "~~/src/HOL/Library/Old_Datatype" "~~/src/HOL/Library/Old_Recdef"
"~~/src/Tools/Adhoc_Overloading"
begin
chapter \<open>Higher-Order Logic\<close>
text \<open>Isabelle/HOL is based on Higher-Order Logic, a polymorphic
version of Church's Simple Theory of Types. HOL can be best
understood as a simply-typed version of classical set theory. The
logic was first implemented in Gordon's HOL system
@{cite "mgordon-hol"}. It extends Church's original logic
@{cite "church40"} by explicit type variables (naive polymorphism) and
a sound axiomatization scheme for new types based on subsets of
existing types.
Andrews's book @{cite andrews86} is a full description of the
original Church-style higher-order logic, with proofs of correctness
and completeness wrt.\ certain set-theoretic interpretations. The
particular extensions of Gordon-style HOL are explained semantically
in two chapters of the 1993 HOL book @{cite pitts93}.
Experience with HOL over decades has demonstrated that higher-order
logic is widely applicable in many areas of mathematics and computer
science. In a sense, Higher-Order Logic is simpler than First-Order
Logic, because there are fewer restrictions and special cases. Note
that HOL is \emph{weaker} than FOL with axioms for ZF set theory,
which is traditionally considered the standard foundation of regular
mathematics, but for most applications this does not matter. If you
prefer ML to Lisp, you will probably prefer HOL to ZF.
\medskip The syntax of HOL follows @{text "\<lambda>"}-calculus and
functional programming. Function application is curried. To apply
the function @{text f} of type @{text "\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"} to the
arguments @{text a} and @{text b} in HOL, you simply write @{text "f
a b"} (as in ML or Haskell). There is no ``apply'' operator; the
existing application of the Pure @{text "\<lambda>"}-calculus is re-used.
Note that in HOL @{text "f (a, b)"} means ``@{text "f"} applied to
the pair @{text "(a, b)"} (which is notation for @{text "Pair a
b"}). The latter typically introduces extra formal efforts that can
be avoided by currying functions by default. Explicit tuples are as
infrequent in HOL formalizations as in good ML or Haskell programs.
\medskip Isabelle/HOL has a distinct feel, compared to other
object-logics like Isabelle/ZF. It identifies object-level types
with meta-level types, taking advantage of the default
type-inference mechanism of Isabelle/Pure. HOL fully identifies
object-level functions with meta-level functions, with native
abstraction and application.
These identifications allow Isabelle to support HOL particularly
nicely, but they also mean that HOL requires some sophistication
from the user. In particular, an understanding of Hindley-Milner
type-inference with type-classes, which are both used extensively in
the standard libraries and applications. Beginners can set
@{attribute show_types} or even @{attribute show_sorts} to get more
explicit information about the result of type-inference.\<close>
chapter \<open>Derived specification elements\<close>
section \<open>Inductive and coinductive definitions \label{sec:hol-inductive}\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "inductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@{command_def (HOL) "inductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@{command_def (HOL) "coinductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@{command_def (HOL) "coinductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@{command_def "print_inductives"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@{attribute_def (HOL) mono} & : & @{text attribute} \\
\end{matharray}
An \emph{inductive definition} specifies the least predicate or set
@{text R} closed under given rules: applying a rule to elements of
@{text R} yields a result within @{text R}. For example, a
structural operational semantics is an inductive definition of an
evaluation relation.
Dually, a \emph{coinductive definition} specifies the greatest
predicate or set @{text R} that is consistent with given rules:
every element of @{text R} can be seen as arising by applying a rule
to elements of @{text R}. An important example is using
bisimulation relations to formalise equivalence of processes and
infinite data structures.
Both inductive and coinductive definitions are based on the
Knaster-Tarski fixed-point theorem for complete lattices. The
collection of introduction rules given by the user determines a
functor on subsets of set-theoretic relations. The required
monotonicity of the recursion scheme is proven as a prerequisite to
the fixed-point definition and the resulting consequences. This
works by pushing inclusion through logical connectives and any other
operator that might be wrapped around recursive occurrences of the
defined relation: there must be a monotonicity theorem of the form
@{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for each premise @{text "\<M> R t"} in an
introduction rule. The default rule declarations of Isabelle/HOL
already take care of most common situations.
@{rail \<open>
(@@{command (HOL) inductive} | @@{command (HOL) inductive_set} |
@@{command (HOL) coinductive} | @@{command (HOL) coinductive_set})
@{syntax target}? \<newline>
@{syntax "fixes"} (@'for' @{syntax "fixes"})? (@'where' clauses)? \<newline>
(@'monos' @{syntax thmrefs})?
;
clauses: (@{syntax thmdecl}? @{syntax prop} + '|')
;
@@{attribute (HOL) mono} (() | 'add' | 'del')
\<close>}
\begin{description}
\item @{command (HOL) "inductive"} and @{command (HOL)
"coinductive"} define (co)inductive predicates from the introduction
rules.
The propositions given as @{text "clauses"} in the @{keyword
"where"} part are either rules of the usual @{text "\<And>/\<Longrightarrow>"} format
(with arbitrary nesting), or equalities using @{text "\<equiv>"}. The
latter specifies extra-logical abbreviations in the sense of
@{command_ref abbreviation}. Introducing abstract syntax
simultaneously with the actual introduction rules is occasionally
useful for complex specifications.
The optional @{keyword "for"} part contains a list of parameters of
the (co)inductive predicates that remain fixed throughout the
definition, in contrast to arguments of the relation that may vary
in each occurrence within the given @{text "clauses"}.
The optional @{keyword "monos"} declaration contains additional
\emph{monotonicity theorems}, which are required for each operator
applied to a recursive set in the introduction rules.
\item @{command (HOL) "inductive_set"} and @{command (HOL)
"coinductive_set"} are wrappers for to the previous commands for
native HOL predicates. This allows to define (co)inductive sets,
where multiple arguments are simulated via tuples.
\item @{command "print_inductives"} prints (co)inductive definitions and
monotonicity rules.
\item @{attribute (HOL) mono} declares monotonicity rules in the
context. These rule are involved in the automated monotonicity
proof of the above inductive and coinductive definitions.
\end{description}
\<close>
subsection \<open>Derived rules\<close>
text \<open>A (co)inductive definition of @{text R} provides the following
main theorems:
\begin{description}
\item @{text R.intros} is the list of introduction rules as proven
theorems, for the recursive predicates (or sets). The rules are
also available individually, using the names given them in the
theory file;
\item @{text R.cases} is the case analysis (or elimination) rule;
\item @{text R.induct} or @{text R.coinduct} is the (co)induction
rule;
\item @{text R.simps} is the equation unrolling the fixpoint of the
predicate one step.
\end{description}
When several predicates @{text "R\<^sub>1, \<dots>, R\<^sub>n"} are
defined simultaneously, the list of introduction rules is called
@{text "R\<^sub>1_\<dots>_R\<^sub>n.intros"}, the case analysis rules are
called @{text "R\<^sub>1.cases, \<dots>, R\<^sub>n.cases"}, and the list
of mutual induction rules is called @{text
"R\<^sub>1_\<dots>_R\<^sub>n.inducts"}.
\<close>
subsection \<open>Monotonicity theorems\<close>
text \<open>The context maintains a default set of theorems that are used
in monotonicity proofs. New rules can be declared via the
@{attribute (HOL) mono} attribute. See the main Isabelle/HOL
sources for some examples. The general format of such monotonicity
theorems is as follows:
\begin{itemize}
\item Theorems of the form @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for proving
monotonicity of inductive definitions whose introduction rules have
premises involving terms such as @{text "\<M> R t"}.
\item Monotonicity theorems for logical operators, which are of the
general form @{text "(\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> (\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> \<longrightarrow> \<dots>"}. For example, in
the case of the operator @{text "\<or>"}, the corresponding theorem is
\[
\infer{@{text "P\<^sub>1 \<or> P\<^sub>2 \<longrightarrow> Q\<^sub>1 \<or> Q\<^sub>2"}}{@{text "P\<^sub>1 \<longrightarrow> Q\<^sub>1"} & @{text "P\<^sub>2 \<longrightarrow> Q\<^sub>2"}}
\]
\item De Morgan style equations for reasoning about the ``polarity''
of expressions, e.g.
\[
@{prop "\<not> \<not> P \<longleftrightarrow> P"} \qquad\qquad
@{prop "\<not> (P \<and> Q) \<longleftrightarrow> \<not> P \<or> \<not> Q"}
\]
\item Equations for reducing complex operators to more primitive
ones whose monotonicity can easily be proved, e.g.
\[
@{prop "(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q"} \qquad\qquad
@{prop "Ball A P \<equiv> \<forall>x. x \<in> A \<longrightarrow> P x"}
\]
\end{itemize}
\<close>
subsubsection \<open>Examples\<close>
text \<open>The finite powerset operator can be defined inductively like this:\<close>
inductive_set Fin :: "'a set \<Rightarrow> 'a set set" for A :: "'a set"
where
empty: "{} \<in> Fin A"
| insert: "a \<in> A \<Longrightarrow> B \<in> Fin A \<Longrightarrow> insert a B \<in> Fin A"
text \<open>The accessible part of a relation is defined as follows:\<close>
inductive acc :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> bool"
for r :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<prec>" 50)
where acc: "(\<And>y. y \<prec> x \<Longrightarrow> acc r y) \<Longrightarrow> acc r x"
text \<open>Common logical connectives can be easily characterized as
non-recursive inductive definitions with parameters, but without
arguments.\<close>
inductive AND for A B :: bool
where "A \<Longrightarrow> B \<Longrightarrow> AND A B"
inductive OR for A B :: bool
where "A \<Longrightarrow> OR A B"
| "B \<Longrightarrow> OR A B"
inductive EXISTS for B :: "'a \<Rightarrow> bool"
where "B a \<Longrightarrow> EXISTS B"
text \<open>Here the @{text "cases"} or @{text "induct"} rules produced by
the @{command inductive} package coincide with the expected
elimination rules for Natural Deduction. Already in the original
article by Gerhard Gentzen @{cite "Gentzen:1935"} there is a hint that
each connective can be characterized by its introductions, and the
elimination can be constructed systematically.\<close>
section \<open>Recursive functions \label{sec:recursion}\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "primrec"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@{command_def (HOL) "fun"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@{command_def (HOL) "function"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
@{command_def (HOL) "termination"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
@{command_def (HOL) "fun_cases"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
\end{matharray}
@{rail \<open>
@@{command (HOL) primrec} @{syntax target}? @{syntax "fixes"} @'where' equations
;
(@@{command (HOL) fun} | @@{command (HOL) function}) @{syntax target}? functionopts?
@{syntax "fixes"} \<newline> @'where' equations
;
equations: (@{syntax thmdecl}? @{syntax prop} + '|')
;
functionopts: '(' (('sequential' | 'domintros') + ',') ')'
;
@@{command (HOL) termination} @{syntax term}?
;
@@{command (HOL) fun_cases} (@{syntax thmdecl}? @{syntax prop} + @'and')
\<close>}
\begin{description}
\item @{command (HOL) "primrec"} defines primitive recursive
functions over datatypes (see also @{command_ref (HOL) datatype}).
The given @{text equations} specify reduction rules that are produced
by instantiating the generic combinator for primitive recursion that
is available for each datatype.
Each equation needs to be of the form:
@{text [display] "f x\<^sub>1 \<dots> x\<^sub>m (C y\<^sub>1 \<dots> y\<^sub>k) z\<^sub>1 \<dots> z\<^sub>n = rhs"}
such that @{text C} is a datatype constructor, @{text rhs} contains
only the free variables on the left-hand side (or from the context),
and all recursive occurrences of @{text "f"} in @{text "rhs"} are of
the form @{text "f \<dots> y\<^sub>i \<dots>"} for some @{text i}. At most one
reduction rule for each constructor can be given. The order does
not matter. For missing constructors, the function is defined to
return a default value, but this equation is made difficult to
access for users.
The reduction rules are declared as @{attribute simp} by default,
which enables standard proof methods like @{method simp} and
@{method auto} to normalize expressions of @{text "f"} applied to
datatype constructions, by simulating symbolic computation via
rewriting.
\item @{command (HOL) "function"} defines functions by general
wellfounded recursion. A detailed description with examples can be
found in @{cite "isabelle-function"}. The function is specified by a
set of (possibly conditional) recursive equations with arbitrary
pattern matching. The command generates proof obligations for the
completeness and the compatibility of patterns.
The defined function is considered partial, and the resulting
simplification rules (named @{text "f.psimps"}) and induction rule
(named @{text "f.pinduct"}) are guarded by a generated domain
predicate @{text "f_dom"}. The @{command (HOL) "termination"}
command can then be used to establish that the function is total.
\item @{command (HOL) "fun"} is a shorthand notation for ``@{command
(HOL) "function"}~@{text "(sequential)"}, followed by automated
proof attempts regarding pattern matching and termination. See
@{cite "isabelle-function"} for further details.
\item @{command (HOL) "termination"}~@{text f} commences a
termination proof for the previously defined function @{text f}. If
this is omitted, the command refers to the most recent function
definition. After the proof is closed, the recursive equations and
the induction principle is established.
\item @{command (HOL) "fun_cases"} generates specialized elimination
rules for function equations. It expects one or more function equations
and produces rules that eliminate the given equalities, following the cases
given in the function definition.
\end{description}
Recursive definitions introduced by the @{command (HOL) "function"}
command accommodate reasoning by induction (cf.\ @{method induct}):
rule @{text "f.induct"} refers to a specific induction rule, with
parameters named according to the user-specified equations. Cases
are numbered starting from 1. For @{command (HOL) "primrec"}, the
induction principle coincides with structural recursion on the
datatype where the recursion is carried out.
The equations provided by these packages may be referred later as
theorem list @{text "f.simps"}, where @{text f} is the (collective)
name of the functions defined. Individual equations may be named
explicitly as well.
The @{command (HOL) "function"} command accepts the following
options.
\begin{description}
\item @{text sequential} enables a preprocessor which disambiguates
overlapping patterns by making them mutually disjoint. Earlier
equations take precedence over later ones. This allows to give the
specification in a format very similar to functional programming.
Note that the resulting simplification and induction rules
correspond to the transformed specification, not the one given
originally. This usually means that each equation given by the user
may result in several theorems. Also note that this automatic
transformation only works for ML-style datatype patterns.
\item @{text domintros} enables the automated generation of
introduction rules for the domain predicate. While mostly not
needed, they can be helpful in some proofs about partial functions.
\end{description}
\<close>
subsubsection \<open>Example: evaluation of expressions\<close>
text \<open>Subsequently, we define mutual datatypes for arithmetic and
boolean expressions, and use @{command primrec} for evaluation
functions that follow the same recursive structure.\<close>
datatype 'a aexp =
IF "'a bexp" "'a aexp" "'a aexp"
| Sum "'a aexp" "'a aexp"
| Diff "'a aexp" "'a aexp"
| Var 'a
| Num nat
and 'a bexp =
Less "'a aexp" "'a aexp"
| And "'a bexp" "'a bexp"
| Neg "'a bexp"
text \<open>\medskip Evaluation of arithmetic and boolean expressions\<close>
primrec evala :: "('a \<Rightarrow> nat) \<Rightarrow> 'a aexp \<Rightarrow> nat"
and evalb :: "('a \<Rightarrow> nat) \<Rightarrow> 'a bexp \<Rightarrow> bool"
where
"evala env (IF b a1 a2) = (if evalb env b then evala env a1 else evala env a2)"
| "evala env (Sum a1 a2) = evala env a1 + evala env a2"
| "evala env (Diff a1 a2) = evala env a1 - evala env a2"
| "evala env (Var v) = env v"
| "evala env (Num n) = n"
| "evalb env (Less a1 a2) = (evala env a1 < evala env a2)"
| "evalb env (And b1 b2) = (evalb env b1 \<and> evalb env b2)"
| "evalb env (Neg b) = (\<not> evalb env b)"
text \<open>Since the value of an expression depends on the value of its
variables, the functions @{const evala} and @{const evalb} take an
additional parameter, an \emph{environment} that maps variables to
their values.
\medskip Substitution on expressions can be defined similarly. The
mapping @{text f} of type @{typ "'a \<Rightarrow> 'a aexp"} given as a
parameter is lifted canonically on the types @{typ "'a aexp"} and
@{typ "'a bexp"}, respectively.
\<close>
primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp"
and substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp"
where
"substa f (IF b a1 a2) = IF (substb f b) (substa f a1) (substa f a2)"
| "substa f (Sum a1 a2) = Sum (substa f a1) (substa f a2)"
| "substa f (Diff a1 a2) = Diff (substa f a1) (substa f a2)"
| "substa f (Var v) = f v"
| "substa f (Num n) = Num n"
| "substb f (Less a1 a2) = Less (substa f a1) (substa f a2)"
| "substb f (And b1 b2) = And (substb f b1) (substb f b2)"
| "substb f (Neg b) = Neg (substb f b)"
text \<open>In textbooks about semantics one often finds substitution
theorems, which express the relationship between substitution and
evaluation. For @{typ "'a aexp"} and @{typ "'a bexp"}, we can prove
such a theorem by mutual induction, followed by simplification.
\<close>
lemma subst_one:
"evala env (substa (Var (v := a')) a) = evala (env (v := evala env a')) a"
"evalb env (substb (Var (v := a')) b) = evalb (env (v := evala env a')) b"
by (induct a and b) simp_all
lemma subst_all:
"evala env (substa s a) = evala (\<lambda>x. evala env (s x)) a"
"evalb env (substb s b) = evalb (\<lambda>x. evala env (s x)) b"
by (induct a and b) simp_all
subsubsection \<open>Example: a substitution function for terms\<close>
text \<open>Functions on datatypes with nested recursion are also defined
by mutual primitive recursion.\<close>
datatype ('a, 'b) "term" = Var 'a | App 'b "('a, 'b) term list"
text \<open>A substitution function on type @{typ "('a, 'b) term"} can be
defined as follows, by working simultaneously on @{typ "('a, 'b)
term list"}:\<close>
primrec subst_term :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term \<Rightarrow> ('a, 'b) term" and
subst_term_list :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term list \<Rightarrow> ('a, 'b) term list"
where
"subst_term f (Var a) = f a"
| "subst_term f (App b ts) = App b (subst_term_list f ts)"
| "subst_term_list f [] = []"
| "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
text \<open>The recursion scheme follows the structure of the unfolded
definition of type @{typ "('a, 'b) term"}. To prove properties of this
substitution function, mutual induction is needed:
\<close>
lemma "subst_term (subst_term f1 \<circ> f2) t = subst_term f1 (subst_term f2 t)" and
"subst_term_list (subst_term f1 \<circ> f2) ts = subst_term_list f1 (subst_term_list f2 ts)"
by (induct t and ts rule: subst_term.induct subst_term_list.induct) simp_all
subsubsection \<open>Example: a map function for infinitely branching trees\<close>
text \<open>Defining functions on infinitely branching datatypes by
primitive recursion is just as easy.
\<close>
datatype 'a tree = Atom 'a | Branch "nat \<Rightarrow> 'a tree"
primrec map_tree :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a tree \<Rightarrow> 'b tree"
where
"map_tree f (Atom a) = Atom (f a)"
| "map_tree f (Branch ts) = Branch (\<lambda>x. map_tree f (ts x))"
text \<open>Note that all occurrences of functions such as @{text ts}
above must be applied to an argument. In particular, @{term
"map_tree f \<circ> ts"} is not allowed here.\<close>
text \<open>Here is a simple composition lemma for @{term map_tree}:\<close>
lemma "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
by (induct t) simp_all
subsection \<open>Proof methods related to recursive definitions\<close>
text \<open>
\begin{matharray}{rcl}
@{method_def (HOL) pat_completeness} & : & @{text method} \\
@{method_def (HOL) relation} & : & @{text method} \\
@{method_def (HOL) lexicographic_order} & : & @{text method} \\
@{method_def (HOL) size_change} & : & @{text method} \\
@{method_def (HOL) induction_schema} & : & @{text method} \\
\end{matharray}
@{rail \<open>
@@{method (HOL) relation} @{syntax term}
;
@@{method (HOL) lexicographic_order} (@{syntax clasimpmod} * )
;
@@{method (HOL) size_change} ( orders (@{syntax clasimpmod} * ) )
;
@@{method (HOL) induction_schema}
;
orders: ( 'max' | 'min' | 'ms' ) *
\<close>}
\begin{description}
\item @{method (HOL) pat_completeness} is a specialized method to
solve goals regarding the completeness of pattern matching, as
required by the @{command (HOL) "function"} package (cf.\
@{cite "isabelle-function"}).
\item @{method (HOL) relation}~@{text R} introduces a termination
proof using the relation @{text R}. The resulting proof state will
contain goals expressing that @{text R} is wellfounded, and that the
arguments of recursive calls decrease with respect to @{text R}.
Usually, this method is used as the initial proof step of manual
termination proofs.
\item @{method (HOL) "lexicographic_order"} attempts a fully
automated termination proof by searching for a lexicographic
combination of size measures on the arguments of the function. The
method accepts the same arguments as the @{method auto} method,
which it uses internally to prove local descents. The @{syntax
clasimpmod} modifiers are accepted (as for @{method auto}).
In case of failure, extensive information is printed, which can help
to analyse the situation (cf.\ @{cite "isabelle-function"}).
\item @{method (HOL) "size_change"} also works on termination goals,
using a variation of the size-change principle, together with a
graph decomposition technique (see @{cite krauss_phd} for details).
Three kinds of orders are used internally: @{text max}, @{text min},
and @{text ms} (multiset), which is only available when the theory
@{text Multiset} is loaded. When no order kinds are given, they are
tried in order. The search for a termination proof uses SAT solving
internally.
For local descent proofs, the @{syntax clasimpmod} modifiers are
accepted (as for @{method auto}).
\item @{method (HOL) induction_schema} derives user-specified
induction rules from well-founded induction and completeness of
patterns. This factors out some operations that are done internally
by the function package and makes them available separately. See
@{file "~~/src/HOL/ex/Induction_Schema.thy"} for examples.
\end{description}
\<close>
subsection \<open>Functions with explicit partiality\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "partial_function"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@{attribute_def (HOL) "partial_function_mono"} & : & @{text attribute} \\
\end{matharray}
@{rail \<open>
@@{command (HOL) partial_function} @{syntax target}?
'(' @{syntax nameref} ')' @{syntax "fixes"} \<newline>
@'where' @{syntax thmdecl}? @{syntax prop}
\<close>}
\begin{description}
\item @{command (HOL) "partial_function"}~@{text "(mode)"} defines
recursive functions based on fixpoints in complete partial
orders. No termination proof is required from the user or
constructed internally. Instead, the possibility of non-termination
is modelled explicitly in the result type, which contains an
explicit bottom element.
Pattern matching and mutual recursion are currently not supported.
Thus, the specification consists of a single function described by a
single recursive equation.
There are no fixed syntactic restrictions on the body of the
function, but the induced functional must be provably monotonic
wrt.\ the underlying order. The monotonicity proof is performed
internally, and the definition is rejected when it fails. The proof
can be influenced by declaring hints using the
@{attribute (HOL) partial_function_mono} attribute.
The mandatory @{text mode} argument specifies the mode of operation
of the command, which directly corresponds to a complete partial
order on the result type. By default, the following modes are
defined:
\begin{description}
\item @{text option} defines functions that map into the @{type
option} type. Here, the value @{term None} is used to model a
non-terminating computation. Monotonicity requires that if @{term
None} is returned by a recursive call, then the overall result must
also be @{term None}. This is best achieved through the use of the
monadic operator @{const "Option.bind"}.
\item @{text tailrec} defines functions with an arbitrary result
type and uses the slightly degenerated partial order where @{term
"undefined"} is the bottom element. Now, monotonicity requires that
if @{term undefined} is returned by a recursive call, then the
overall result must also be @{term undefined}. In practice, this is
only satisfied when each recursive call is a tail call, whose result
is directly returned. Thus, this mode of operation allows the
definition of arbitrary tail-recursive functions.
\end{description}
Experienced users may define new modes by instantiating the locale
@{const "partial_function_definitions"} appropriately.
\item @{attribute (HOL) partial_function_mono} declares rules for
use in the internal monotonicity proofs of partial function
definitions.
\end{description}
\<close>
subsection \<open>Old-style recursive function definitions (TFL)\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "recdef"} & : & @{text "theory \<rightarrow> theory)"} \\
@{command_def (HOL) "recdef_tc"}@{text "\<^sup>*"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
\end{matharray}
The old TFL commands @{command (HOL) "recdef"} and @{command (HOL)
"recdef_tc"} for defining recursive are mostly obsolete; @{command
(HOL) "function"} or @{command (HOL) "fun"} should be used instead.
@{rail \<open>
@@{command (HOL) recdef} ('(' @'permissive' ')')? \<newline>
@{syntax name} @{syntax term} (@{syntax prop} +) hints?
;
recdeftc @{syntax thmdecl}? tc
;
hints: '(' @'hints' ( recdefmod * ) ')'
;
recdefmod: (('recdef_simp' | 'recdef_cong' | 'recdef_wf')
(() | 'add' | 'del') ':' @{syntax thmrefs}) | @{syntax clasimpmod}
;
tc: @{syntax nameref} ('(' @{syntax nat} ')')?
\<close>}
\begin{description}
\item @{command (HOL) "recdef"} defines general well-founded
recursive functions (using the TFL package), see also
@{cite "isabelle-HOL"}. The ``@{text "(permissive)"}'' option tells
TFL to recover from failed proof attempts, returning unfinished
results. The @{text recdef_simp}, @{text recdef_cong}, and @{text
recdef_wf} hints refer to auxiliary rules to be used in the internal
automated proof process of TFL. Additional @{syntax clasimpmod}
declarations may be given to tune the context of the Simplifier
(cf.\ \secref{sec:simplifier}) and Classical reasoner (cf.\
\secref{sec:classical}).
\item @{command (HOL) "recdef_tc"}~@{text "c (i)"} recommences the
proof for leftover termination condition number @{text i} (default
1) as generated by a @{command (HOL) "recdef"} definition of
constant @{text c}.
Note that in most cases, @{command (HOL) "recdef"} is able to finish
its internal proofs without manual intervention.
\end{description}
\medskip Hints for @{command (HOL) "recdef"} may be also declared
globally, using the following attributes.
\begin{matharray}{rcl}
@{attribute_def (HOL) recdef_simp} & : & @{text attribute} \\
@{attribute_def (HOL) recdef_cong} & : & @{text attribute} \\
@{attribute_def (HOL) recdef_wf} & : & @{text attribute} \\
\end{matharray}
@{rail \<open>
(@@{attribute (HOL) recdef_simp} | @@{attribute (HOL) recdef_cong} |
@@{attribute (HOL) recdef_wf}) (() | 'add' | 'del')
\<close>}
\<close>
section \<open>Old-style datatypes \label{sec:hol-datatype}\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "old_datatype"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "old_rep_datatype"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
\end{matharray}
@{rail \<open>
@@{command (HOL) old_datatype} (spec + @'and')
;
@@{command (HOL) old_rep_datatype} ('(' (@{syntax name} +) ')')? (@{syntax term} +)
;
spec: @{syntax typespec_sorts} @{syntax mixfix}? '=' (cons + '|')
;
cons: @{syntax name} (@{syntax type} * ) @{syntax mixfix}?
\<close>}
\begin{description}
\item @{command (HOL) "old_datatype"} defines old-style inductive
datatypes in HOL.
\item @{command (HOL) "old_rep_datatype"} represents existing types as
old-style datatypes.
\end{description}
These commands are mostly obsolete; @{command (HOL) "datatype"}
should be used instead.
See @{cite "isabelle-HOL"} for more details on datatypes, but beware of
the old-style theory syntax being used there! Apart from proper
proof methods for case-analysis and induction, there are also
emulations of ML tactics @{method (HOL) case_tac} and @{method (HOL)
induct_tac} available, see \secref{sec:hol-induct-tac}; these admit
to refer directly to the internal structure of subgoals (including
internally bound parameters).
\<close>
subsubsection \<open>Examples\<close>
text \<open>We define a type of finite sequences, with slightly different
names than the existing @{typ "'a list"} that is already in @{theory
Main}:\<close>
datatype 'a seq = Empty | Seq 'a "'a seq"
text \<open>We can now prove some simple lemma by structural induction:\<close>
lemma "Seq x xs \<noteq> xs"
proof (induct xs arbitrary: x)
case Empty
txt \<open>This case can be proved using the simplifier: the freeness
properties of the datatype are already declared as @{attribute
simp} rules.\<close>
show "Seq x Empty \<noteq> Empty"
by simp
next
case (Seq y ys)
txt \<open>The step case is proved similarly.\<close>
show "Seq x (Seq y ys) \<noteq> Seq y ys"
using \<open>Seq y ys \<noteq> ys\<close> by simp
qed
text \<open>Here is a more succinct version of the same proof:\<close>
lemma "Seq x xs \<noteq> xs"
by (induct xs arbitrary: x) simp_all
section \<open>Records \label{sec:hol-record}\<close>
text \<open>
In principle, records merely generalize the concept of tuples, where
components may be addressed by labels instead of just position. The
logical infrastructure of records in Isabelle/HOL is slightly more
advanced, though, supporting truly extensible record schemes. This
admits operations that are polymorphic with respect to record
extension, yielding ``object-oriented'' effects like (single)
inheritance. See also @{cite "NaraschewskiW-TPHOLs98"} for more
details on object-oriented verification and record subtyping in HOL.
\<close>
subsection \<open>Basic concepts\<close>
text \<open>
Isabelle/HOL supports both \emph{fixed} and \emph{schematic} records
at the level of terms and types. The notation is as follows:
\begin{center}
\begin{tabular}{l|l|l}
& record terms & record types \\ \hline
fixed & @{text "\<lparr>x = a, y = b\<rparr>"} & @{text "\<lparr>x :: A, y :: B\<rparr>"} \\
schematic & @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} &
@{text "\<lparr>x :: A, y :: B, \<dots> :: M\<rparr>"} \\
\end{tabular}
\end{center}
\noindent The ASCII representation of @{text "\<lparr>x = a\<rparr>"} is @{text
"(| x = a |)"}.
A fixed record @{text "\<lparr>x = a, y = b\<rparr>"} has field @{text x} of value
@{text a} and field @{text y} of value @{text b}. The corresponding
type is @{text "\<lparr>x :: A, y :: B\<rparr>"}, assuming that @{text "a :: A"}
and @{text "b :: B"}.
A record scheme like @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} contains fields
@{text x} and @{text y} as before, but also possibly further fields
as indicated by the ``@{text "\<dots>"}'' notation (which is actually part
of the syntax). The improper field ``@{text "\<dots>"}'' of a record
scheme is called the \emph{more part}. Logically it is just a free
variable, which is occasionally referred to as ``row variable'' in
the literature. The more part of a record scheme may be
instantiated by zero or more further components. For example, the
previous scheme may get instantiated to @{text "\<lparr>x = a, y = b, z =
c, \<dots> = m'\<rparr>"}, where @{text m'} refers to a different more part.
Fixed records are special instances of record schemes, where
``@{text "\<dots>"}'' is properly terminated by the @{text "() :: unit"}
element. In fact, @{text "\<lparr>x = a, y = b\<rparr>"} is just an abbreviation
for @{text "\<lparr>x = a, y = b, \<dots> = ()\<rparr>"}.
\medskip Two key observations make extensible records in a simply
typed language like HOL work out:
\begin{enumerate}
\item the more part is internalized, as a free term or type
variable,
\item field names are externalized, they cannot be accessed within
the logic as first-class values.
\end{enumerate}
\medskip In Isabelle/HOL record types have to be defined explicitly,
fixing their field names and types, and their (optional) parent
record. Afterwards, records may be formed using above syntax, while
obeying the canonical order of fields as given by their declaration.
The record package provides several standard operations like
selectors and updates. The common setup for various generic proof
tools enable succinct reasoning patterns. See also the Isabelle/HOL
tutorial @{cite "isabelle-hol-book"} for further instructions on using
records in practice.
\<close>
subsection \<open>Record specifications\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "record"} & : & @{text "theory \<rightarrow> theory"} \\
\end{matharray}
@{rail \<open>
@@{command (HOL) record} @{syntax typespec_sorts} '=' \<newline>
(@{syntax type} '+')? (constdecl +)
;
constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
\<close>}
\begin{description}
\item @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
\<dots> c\<^sub>n :: \<sigma>\<^sub>n"} defines extensible record type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"},
derived from the optional parent record @{text "\<tau>"} by adding new
field components @{text "c\<^sub>i :: \<sigma>\<^sub>i"} etc.
The type variables of @{text "\<tau>"} and @{text "\<sigma>\<^sub>i"} need to be
covered by the (distinct) parameters @{text "\<alpha>\<^sub>1, \<dots>,
\<alpha>\<^sub>m"}. Type constructor @{text t} has to be new, while @{text
\<tau>} needs to specify an instance of an existing record type. At
least one new field @{text "c\<^sub>i"} has to be specified.
Basically, field names need to belong to a unique record. This is
not a real restriction in practice, since fields are qualified by
the record name internally.
The parent record specification @{text \<tau>} is optional; if omitted
@{text t} becomes a root record. The hierarchy of all records
declared within a theory context forms a forest structure, i.e.\ a
set of trees starting with a root record each. There is no way to
merge multiple parent records!
For convenience, @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} is made a
type abbreviation for the fixed record type @{text "\<lparr>c\<^sub>1 ::
\<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n\<rparr>"}, likewise is @{text
"(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m, \<zeta>) t_scheme"} made an abbreviation for
@{text "\<lparr>c\<^sub>1 :: \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n, \<dots> ::
\<zeta>\<rparr>"}.
\end{description}
\<close>
subsection \<open>Record operations\<close>
text \<open>
Any record definition of the form presented above produces certain
standard operations. Selectors and updates are provided for any
field, including the improper one ``@{text more}''. There are also
cumulative record constructor functions. To simplify the
presentation below, we assume for now that @{text "(\<alpha>\<^sub>1, \<dots>,
\<alpha>\<^sub>m) t"} is a root record with fields @{text "c\<^sub>1 ::
\<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n"}.
\medskip \textbf{Selectors} and \textbf{updates} are available for
any field (including ``@{text more}''):
\begin{matharray}{lll}
@{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
@{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
\end{matharray}
There is special syntax for application of updates: @{text "r\<lparr>x :=
a\<rparr>"} abbreviates term @{text "x_update a r"}. Further notation for
repeated updates is also available: @{text "r\<lparr>x := a\<rparr>\<lparr>y := b\<rparr>\<lparr>z :=
c\<rparr>"} may be written @{text "r\<lparr>x := a, y := b, z := c\<rparr>"}. Note that
because of postfix notation the order of fields shown here is
reverse than in the actual term. Since repeated updates are just
function applications, fields may be freely permuted in @{text "\<lparr>x
:= a, y := b, z := c\<rparr>"}, as far as logical equality is concerned.
Thus commutativity of independent updates can be proven within the
logic for any two fields, but not as a general theorem.
\medskip The \textbf{make} operation provides a cumulative record
constructor function:
\begin{matharray}{lll}
@{text "t.make"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
\end{matharray}
\medskip We now reconsider the case of non-root records, which are
derived of some parent. In general, the latter may depend on
another parent as well, resulting in a list of \emph{ancestor
records}. Appending the lists of fields of all ancestors results in
a certain field prefix. The record package automatically takes care
of this by lifting operations over this context of ancestor fields.
Assuming that @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} has ancestor
fields @{text "b\<^sub>1 :: \<rho>\<^sub>1, \<dots>, b\<^sub>k :: \<rho>\<^sub>k"},
the above record operations will get the following types:
\medskip
\begin{tabular}{lll}
@{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
@{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow>
\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow>
\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
@{text "t.make"} & @{text "::"} & @{text "\<rho>\<^sub>1 \<Rightarrow> \<dots> \<rho>\<^sub>k \<Rightarrow> \<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow>
\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
\end{tabular}
\medskip
\noindent Some further operations address the extension aspect of a
derived record scheme specifically: @{text "t.fields"} produces a
record fragment consisting of exactly the new fields introduced here
(the result may serve as a more part elsewhere); @{text "t.extend"}
takes a fixed record and adds a given more part; @{text
"t.truncate"} restricts a record scheme to a fixed record.
\medskip
\begin{tabular}{lll}
@{text "t.fields"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
@{text "t.extend"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr> \<Rightarrow>
\<zeta> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
@{text "t.truncate"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
\end{tabular}
\medskip
\noindent Note that @{text "t.make"} and @{text "t.fields"} coincide
for root records.
\<close>
subsection \<open>Derived rules and proof tools\<close>
text \<open>
The record package proves several results internally, declaring
these facts to appropriate proof tools. This enables users to
reason about record structures quite conveniently. Assume that
@{text t} is a record type as specified above.
\begin{enumerate}
\item Standard conversions for selectors or updates applied to
record constructor terms are made part of the default Simplifier
context; thus proofs by reduction of basic operations merely require
the @{method simp} method without further arguments. These rules
are available as @{text "t.simps"}, too.
\item Selectors applied to updated records are automatically reduced
by an internal simplification procedure, which is also part of the
standard Simplifier setup.
\item Inject equations of a form analogous to @{prop "(x, y) = (x',
y') \<equiv> x = x' \<and> y = y'"} are declared to the Simplifier and Classical
Reasoner as @{attribute iff} rules. These rules are available as
@{text "t.iffs"}.
\item The introduction rule for record equality analogous to @{text
"x r = x r' \<Longrightarrow> y r = y r' \<dots> \<Longrightarrow> r = r'"} is declared to the Simplifier,
and as the basic rule context as ``@{attribute intro}@{text "?"}''.
The rule is called @{text "t.equality"}.
\item Representations of arbitrary record expressions as canonical
constructor terms are provided both in @{method cases} and @{method
induct} format (cf.\ the generic proof methods of the same name,
\secref{sec:cases-induct}). Several variations are available, for
fixed records, record schemes, more parts etc.
The generic proof methods are sufficiently smart to pick the most
sensible rule according to the type of the indicated record
expression: users just need to apply something like ``@{text "(cases
r)"}'' to a certain proof problem.
\item The derived record operations @{text "t.make"}, @{text
"t.fields"}, @{text "t.extend"}, @{text "t.truncate"} are \emph{not}
treated automatically, but usually need to be expanded by hand,
using the collective fact @{text "t.defs"}.
\end{enumerate}
\<close>
subsubsection \<open>Examples\<close>
text \<open>See @{file "~~/src/HOL/ex/Records.thy"}, for example.\<close>
section \<open>Typedef axiomatization \label{sec:hol-typedef}\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "typedef"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
\end{matharray}
A Gordon/HOL-style type definition is a certain axiom scheme that
identifies a new type with a subset of an existing type. More
precisely, the new type is defined by exhibiting an existing type
@{text \<tau>}, a set @{text "A :: \<tau> set"}, and a theorem that proves
@{prop "\<exists>x. x \<in> A"}. Thus @{text A} is a non-empty subset of @{text
\<tau>}, and the new type denotes this subset. New functions are
postulated that establish an isomorphism between the new type and
the subset. In general, the type @{text \<tau>} may involve type
variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} which means that the type definition
produces a type constructor @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} depending on
those type arguments.
The axiomatization can be considered a ``definition'' in the sense of the
particular set-theoretic interpretation of HOL @{cite pitts93}, where the
universe of types is required to be downwards-closed wrt.\ arbitrary
non-empty subsets. Thus genuinely new types introduced by @{command
"typedef"} stay within the range of HOL models by construction.
In contrast, the command @{command_ref type_synonym} from Isabelle/Pure
merely introduces syntactic abbreviations, without any logical
significance. Thus it is more faithful to the idea of a genuine type
definition, but less powerful in practice.
@{rail \<open>
@@{command (HOL) typedef} abs_type '=' rep_set
;
abs_type: @{syntax typespec_sorts} @{syntax mixfix}?
;
rep_set: @{syntax term} (@'morphisms' @{syntax name} @{syntax name})?
\<close>}
\begin{description}
\item @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"} produces an
axiomatization (\secref{sec:axiomatizations}) for a type definition in the
background theory of the current context, depending on a non-emptiness
result of the set @{text A} that needs to be proven here. The set @{text
A} may contain type variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} as specified on the
LHS, but no term variables.
Even though a local theory specification, the newly introduced type
constructor cannot depend on parameters or assumptions of the
context: this is structurally impossible in HOL. In contrast, the
non-emptiness proof may use local assumptions in unusual situations,
which could result in different interpretations in target contexts:
the meaning of the bijection between the representing set @{text A}
and the new type @{text t} may then change in different application
contexts.
For @{command (HOL) "typedef"}~@{text "t = A"} the newly introduced
type @{text t} is accompanied by a pair of morphisms to relate it to
the representing set over the old type. By default, the injection
from type to set is called @{text Rep_t} and its inverse @{text
Abs_t}: An explicit @{keyword (HOL) "morphisms"} specification
allows to provide alternative names.
The core axiomatization uses the locale predicate @{const
type_definition} as defined in Isabelle/HOL. Various basic
consequences of that are instantiated accordingly, re-using the
locale facts with names derived from the new type constructor. Thus
the generic @{thm type_definition.Rep} is turned into the specific
@{text "Rep_t"}, for example.
Theorems @{thm type_definition.Rep}, @{thm
type_definition.Rep_inverse}, and @{thm type_definition.Abs_inverse}
provide the most basic characterization as a corresponding
injection/surjection pair (in both directions). The derived rules
@{thm type_definition.Rep_inject} and @{thm
type_definition.Abs_inject} provide a more convenient version of
injectivity, suitable for automated proof tools (e.g.\ in
declarations involving @{attribute simp} or @{attribute iff}).
Furthermore, the rules @{thm type_definition.Rep_cases}~/ @{thm
type_definition.Rep_induct}, and @{thm type_definition.Abs_cases}~/
@{thm type_definition.Abs_induct} provide alternative views on
surjectivity. These rules are already declared as set or type rules
for the generic @{method cases} and @{method induct} methods,
respectively.
\end{description}
\<close>
subsubsection \<open>Examples\<close>
text \<open>Type definitions permit the introduction of abstract data
types in a safe way, namely by providing models based on already
existing types. Given some abstract axiomatic description @{text P}
of a type, this involves two steps:
\begin{enumerate}
\item Find an appropriate type @{text \<tau>} and subset @{text A} which
has the desired properties @{text P}, and make a type definition
based on this representation.
\item Prove that @{text P} holds for @{text \<tau>} by lifting @{text P}
from the representation.
\end{enumerate}
You can later forget about the representation and work solely in
terms of the abstract properties @{text P}.
\medskip The following trivial example pulls a three-element type
into existence within the formal logical environment of HOL.\<close>
typedef three = "{(True, True), (True, False), (False, True)}"
by blast
definition "One = Abs_three (True, True)"
definition "Two = Abs_three (True, False)"
definition "Three = Abs_three (False, True)"
lemma three_distinct: "One \<noteq> Two" "One \<noteq> Three" "Two \<noteq> Three"
by (simp_all add: One_def Two_def Three_def Abs_three_inject)
lemma three_cases:
fixes x :: three obtains "x = One" | "x = Two" | "x = Three"
by (cases x) (auto simp: One_def Two_def Three_def Abs_three_inject)
text \<open>Note that such trivial constructions are better done with
derived specification mechanisms such as @{command datatype}:\<close>
datatype three' = One' | Two' | Three'
text \<open>This avoids re-doing basic definitions and proofs from the
primitive @{command typedef} above.\<close>
section \<open>Functorial structure of types\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "functor"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
\end{matharray}
@{rail \<open>
@@{command (HOL) functor} (@{syntax name} ':')? @{syntax term}
\<close>}
\begin{description}
\item @{command (HOL) "functor"}~@{text "prefix: m"} allows to
prove and register properties about the functorial structure of type
constructors. These properties then can be used by other packages
to deal with those type constructors in certain type constructions.
Characteristic theorems are noted in the current local theory. By
default, they are prefixed with the base name of the type
constructor, an explicit prefix can be given alternatively.
The given term @{text "m"} is considered as \emph{mapper} for the
corresponding type constructor and must conform to the following
type pattern:
\begin{matharray}{lll}
@{text "m"} & @{text "::"} &
@{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>k \<Rightarrow> (\<^vec>\<alpha>\<^sub>n) t \<Rightarrow> (\<^vec>\<beta>\<^sub>n) t"} \\
\end{matharray}
\noindent where @{text t} is the type constructor, @{text
"\<^vec>\<alpha>\<^sub>n"} and @{text "\<^vec>\<beta>\<^sub>n"} are distinct
type variables free in the local theory and @{text "\<sigma>\<^sub>1"},
\ldots, @{text "\<sigma>\<^sub>k"} is a subsequence of @{text "\<alpha>\<^sub>1 \<Rightarrow>
\<beta>\<^sub>1"}, @{text "\<beta>\<^sub>1 \<Rightarrow> \<alpha>\<^sub>1"}, \ldots,
@{text "\<alpha>\<^sub>n \<Rightarrow> \<beta>\<^sub>n"}, @{text "\<beta>\<^sub>n \<Rightarrow>
\<alpha>\<^sub>n"}.
\end{description}
\<close>
section \<open>Quotient types\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "quotient_type"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
@{command_def (HOL) "quotient_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
@{command_def (HOL) "print_quotmapsQ3"} & : & @{text "context \<rightarrow>"}\\
@{command_def (HOL) "print_quotientsQ3"} & : & @{text "context \<rightarrow>"}\\
@{command_def (HOL) "print_quotconsts"} & : & @{text "context \<rightarrow>"}\\
@{method_def (HOL) "lifting"} & : & @{text method} \\
@{method_def (HOL) "lifting_setup"} & : & @{text method} \\
@{method_def (HOL) "descending"} & : & @{text method} \\
@{method_def (HOL) "descending_setup"} & : & @{text method} \\
@{method_def (HOL) "partiality_descending"} & : & @{text method} \\
@{method_def (HOL) "partiality_descending_setup"} & : & @{text method} \\
@{method_def (HOL) "regularize"} & : & @{text method} \\
@{method_def (HOL) "injection"} & : & @{text method} \\
@{method_def (HOL) "cleaning"} & : & @{text method} \\
@{attribute_def (HOL) "quot_thm"} & : & @{text attribute} \\
@{attribute_def (HOL) "quot_lifted"} & : & @{text attribute} \\
@{attribute_def (HOL) "quot_respect"} & : & @{text attribute} \\
@{attribute_def (HOL) "quot_preserve"} & : & @{text attribute} \\
\end{matharray}
The quotient package defines a new quotient type given a raw type
and a partial equivalence relation. The package also historically
includes automation for transporting definitions and theorems.
But most of this automation was superseded by the Lifting and Transfer
packages. The user should consider using these two new packages for
lifting definitions and transporting theorems.
@{rail \<open>
@@{command (HOL) quotient_type} (spec)
;
spec: @{syntax typespec} @{syntax mixfix}? '=' \<newline>
@{syntax type} '/' ('partial' ':')? @{syntax term} \<newline>
(@'morphisms' @{syntax name} @{syntax name})? (@'parametric' @{syntax thmref})?
\<close>}
@{rail \<open>
@@{command (HOL) quotient_definition} constdecl? @{syntax thmdecl}? \<newline>
@{syntax term} 'is' @{syntax term}
;
constdecl: @{syntax name} ('::' @{syntax type})? @{syntax mixfix}?
\<close>}
@{rail \<open>
@@{method (HOL) lifting} @{syntax thmrefs}?
;
@@{method (HOL) lifting_setup} @{syntax thmrefs}?
\<close>}
\begin{description}
\item @{command (HOL) "quotient_type"} defines a new quotient type @{text \<tau>}. The
injection from a quotient type to a raw type is called @{text
rep_\<tau>}, its inverse @{text abs_\<tau>} unless explicit @{keyword (HOL)
"morphisms"} specification provides alternative names. @{command
(HOL) "quotient_type"} requires the user to prove that the relation
is an equivalence relation (predicate @{text equivp}), unless the
user specifies explicitly @{text partial} in which case the
obligation is @{text part_equivp}. A quotient defined with @{text
partial} is weaker in the sense that less things can be proved
automatically.
The command internally proves a Quotient theorem and sets up the Lifting
package by the command @{command (HOL) setup_lifting}. Thus the Lifting
and Transfer packages can be used also with quotient types defined by
@{command (HOL) "quotient_type"} without any extra set-up. The parametricity
theorem for the equivalence relation R can be provided as an extra argument
of the command and is passed to the corresponding internal call of @{command (HOL) setup_lifting}.
This theorem allows the Lifting package to generate a stronger transfer rule for equality.
\end{description}
The most of the rest of the package was superseded by the Lifting and Transfer
packages. The user should consider using these two new packages for
lifting definitions and transporting theorems.
\begin{description}
\item @{command (HOL) "quotient_definition"} defines a constant on
the quotient type.
\item @{command (HOL) "print_quotmapsQ3"} prints quotient map
functions.
\item @{command (HOL) "print_quotientsQ3"} prints quotients.
\item @{command (HOL) "print_quotconsts"} prints quotient constants.
\item @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
methods match the current goal with the given raw theorem to be
lifted producing three new subgoals: regularization, injection and
cleaning subgoals. @{method (HOL) "lifting"} tries to apply the
heuristics for automatically solving these three subgoals and
leaves only the subgoals unsolved by the heuristics to the user as
opposed to @{method (HOL) "lifting_setup"} which leaves the three
subgoals unsolved.
\item @{method (HOL) "descending"} and @{method (HOL)
"descending_setup"} try to guess a raw statement that would lift
to the current subgoal. Such statement is assumed as a new subgoal
and @{method (HOL) "descending"} continues in the same way as
@{method (HOL) "lifting"} does. @{method (HOL) "descending"} tries
to solve the arising regularization, injection and cleaning
subgoals with the analogous method @{method (HOL)
"descending_setup"} which leaves the four unsolved subgoals.
\item @{method (HOL) "partiality_descending"} finds the regularized
theorem that would lift to the current subgoal, lifts it and
leaves as a subgoal. This method can be used with partial
equivalence quotients where the non regularized statements would
not be true. @{method (HOL) "partiality_descending_setup"} leaves
the injection and cleaning subgoals unchanged.
\item @{method (HOL) "regularize"} applies the regularization
heuristics to the current subgoal.
\item @{method (HOL) "injection"} applies the injection heuristics
to the current goal using the stored quotient respectfulness
theorems.
\item @{method (HOL) "cleaning"} applies the injection cleaning
heuristics to the current subgoal using the stored quotient
preservation theorems.
\item @{attribute (HOL) quot_lifted} attribute tries to
automatically transport the theorem to the quotient type.
The attribute uses all the defined quotients types and quotient
constants often producing undesired results or theorems that
cannot be lifted.
\item @{attribute (HOL) quot_respect} and @{attribute (HOL)
quot_preserve} attributes declare a theorem as a respectfulness
and preservation theorem respectively. These are stored in the
local theory store and used by the @{method (HOL) "injection"}
and @{method (HOL) "cleaning"} methods respectively.
\item @{attribute (HOL) quot_thm} declares that a certain theorem
is a quotient extension theorem. Quotient extension theorems
allow for quotienting inside container types. Given a polymorphic
type that serves as a container, a map function defined for this
container using @{command (HOL) "functor"} and a relation
map defined for for the container type, the quotient extension
theorem should be @{term "Quotient3 R Abs Rep \<Longrightarrow> Quotient3
(rel_map R) (map Abs) (map Rep)"}. Quotient extension theorems
are stored in a database and are used all the steps of lifting
theorems.
\end{description}
\<close>
section \<open>Definition by specification \label{sec:hol-specification}\<close>
text \<open>
\begin{matharray}{rcl}
@{command_def (HOL) "specification"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
\end{matharray}
@{rail \<open>
@@{command (HOL) specification} '(' (decl +) ')' \<newline>
(@{syntax thmdecl}? @{syntax prop} +)
;
decl: (@{syntax name} ':')? @{syntax term} ('(' @'overloaded' ')')?
\<close>}
\begin{description}
\item @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
goal stating the existence of terms with the properties specified to
hold for the constants given in @{text decls}. After finishing the
proof, the theory will be augmented with definitions for the given
constants, as well as with theorems stating the properties for these
constants.
@{text decl} declares a constant to be defined by the
specification given. The definition for the constant @{text c} is
bound to the name @{text c_def} unless a theorem name is given in
the declaration. Overloaded constants should be declared as such.
\end{description}
\<close>
section \<open>Adhoc overloading of constants\<close>
text \<open>
\begin{tabular}{rcll}
@{command_def "adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@{command_def "no_adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
@{attribute_def "show_variants"} & : & @{text "attribute"} & default @{text false} \\
\end{tabular}
\medskip
Adhoc overloading allows to overload a constant depending on
its type. Typically this involves the introduction of an
uninterpreted constant (used for input and output) and the addition
of some variants (used internally). For examples see
@{file "~~/src/HOL/ex/Adhoc_Overloading_Examples.thy"} and
@{file "~~/src/HOL/Library/Monad_Syntax.thy"}.
@{rail \<open>
(@@{command adhoc_overloading} | @@{command no_adhoc_overloading})
(@{syntax nameref} (@{syntax term} + ) + @'and')
\<close>}
\begin{description}
\item @{command "adhoc_overloading"}~@{text "c v\<^sub>1 ... v\<^sub>n"}
associates variants with an existing constant.
\item @{command "no_adhoc_overloading"} is similar to
@{command "adhoc_overloading"}, but removes the specified variants
from the present context.
\item @{attribute "show_variants"} controls printing of variants
of overloaded constants. If enabled, the internally used variants
are printed instead of their respective overloaded constants. This
is occasionally useful to check whether the system agrees with a
user's expectations about derived variants.
\end{description}
\<close>
chapter \<open>Proof tools\<close>
section \<open>Adhoc tuples\<close>
text \<open>
\begin{matharray}{rcl}
@{attribute_def (HOL) split_format}@{text "\<^sup>*"} & : & @{text attribute} \\
\end{matharray}
@{rail \<open>
@@{attribute (HOL) split_format} ('(' 'complete' ')')?
\<close>}
\begin{description}
\item @{attribute (HOL) split_format}\ @{text "(complete)"} causes
arguments in function applications to be represented canonically
according to their tuple type structure.
Note that this operation tends to invent funny names for new local
parameters introduced.
\end{description}
\<close>
section \<open>Transfer package\<close>
text \<open>
\begin{matharray}{rcl}
@{method_def (HOL) "transfer"} & : & @{text method} \\
@{method_def (HOL) "transfer'"} & : & @{text method} \\
@{method_def (HOL) "transfer_prover"} & : & @{text method} \\
@{attribute_def (HOL) "Transfer.transferred"} & : & @{text attribute} \\
@{attribute_def (HOL) "untransferred"} & : & @{text attribute} \\
@{attribute_def (HOL) "transfer_rule"} & : & @{text attribute} \\
@{attribute_def (HOL) "transfer_domain_rule"} & : & @{text attribute} \\
@{attribute_def (HOL) "relator_eq"} & : & @{text attribute} \\
@{attribute_def (HOL) "relator_domain"} & : & @{text attribute} \\
\end{matharray}
\begin{description}
\item @{method (HOL) "transfer"} method replaces the current subgoal
with a logically equivalent one that uses different types and
constants. The replacement of types and constants is guided by the
database of transfer rules. Goals are generalized over all free
variables by default; this is necessary for variables whose types
change, but can be overridden for specific variables with e.g.
@{text "transfer fixing: x y z"}.
\item @{method (HOL) "transfer'"} is a variant of @{method (HOL)
transfer} that allows replacing a subgoal with one that is
logically stronger (rather than equivalent). For example, a
subgoal involving equality on a quotient type could be replaced
with a subgoal involving equality (instead of the corresponding
equivalence relation) on the underlying raw type.
\item @{method (HOL) "transfer_prover"} method assists with proving
a transfer rule for a new constant, provided the constant is
defined in terms of other constants that already have transfer
rules. It should be applied after unfolding the constant
definitions.
\item @{attribute (HOL) "untransferred"} proves the same equivalent theorem
as @{method (HOL) "transfer"} internally does.
\item @{attribute (HOL) Transfer.transferred} works in the opposite
direction than @{method (HOL) "transfer'"}. E.g., given the transfer
relation @{text "ZN x n \<equiv> (x = int n)"}, corresponding transfer rules and the theorem
@{text "\<forall>x::int \<in> {0..}. x < x + 1"}, the attribute would prove
@{text "\<forall>n::nat. n < n + 1"}. The attribute is still in experimental
phase of development.
\item @{attribute (HOL) "transfer_rule"} attribute maintains a
collection of transfer rules, which relate constants at two
different types. Typical transfer rules may relate different type
instances of the same polymorphic constant, or they may relate an
operation on a raw type to a corresponding operation on an
abstract type (quotient or subtype). For example:
@{text "((A ===> B) ===> list_all2 A ===> list_all2 B) map map"}\\
@{text "(cr_int ===> cr_int ===> cr_int) (\<lambda>(x,y) (u,v). (x+u, y+v)) plus"}
Lemmas involving predicates on relations can also be registered
using the same attribute. For example:
@{text "bi_unique A \<Longrightarrow> (list_all2 A ===> op =) distinct distinct"}\\
@{text "\<lbrakk>bi_unique A; bi_unique B\<rbrakk> \<Longrightarrow> bi_unique (rel_prod A B)"}
Preservation of predicates on relations (@{text "bi_unique, bi_total,
right_unique, right_total, left_unique, left_total"}) with the respect to a relator
is proved automatically if the involved type is BNF
@{cite "isabelle-datatypes"} without dead variables.
\item @{attribute (HOL) "transfer_domain_rule"} attribute maintains a collection
of rules, which specify a domain of a transfer relation by a predicate.
E.g., given the transfer relation @{text "ZN x n \<equiv> (x = int n)"},
one can register the following transfer domain rule:
@{text "Domainp ZN = (\<lambda>x. x \<ge> 0)"}. The rules allow the package to produce
more readable transferred goals, e.g., when quantifiers are transferred.
\item @{attribute (HOL) relator_eq} attribute collects identity laws
for relators of various type constructors, e.g. @{term "rel_set
(op =) = (op =)"}. The @{method (HOL) transfer} method uses these
lemmas to infer transfer rules for non-polymorphic constants on
the fly. For examples see @{file
"~~/src/HOL/Lifting_Set.thy"} or @{file "~~/src/HOL/Lifting.thy"}.
This property is proved automatically if the involved type is BNF without dead variables.
\item @{attribute_def (HOL) "relator_domain"} attribute collects rules
describing domains of relators by predicators. E.g.,
@{term "Domainp (rel_set T) = (\<lambda>A. Ball A (Domainp T))"}. This allows the package
to lift transfer domain rules through type constructors. For examples see @{file
"~~/src/HOL/Lifting_Set.thy"} or @{file "~~/src/HOL/Lifting.thy"}.
This property is proved automatically if the involved type is BNF without dead variables.
\end{description}
Theoretical background can be found in @{cite "Huffman-Kuncar:2013:lifting_transfer"}.
\<close>
section \<open>Lifting package\<close>
text \<open>
The Lifting package allows users to lift terms of the raw type to the abstract type, which is
a necessary step in building a library for an abstract type. Lifting defines a new constant
by combining coercion functions (Abs and Rep) with the raw term. It also proves an appropriate
transfer rule for the Transfer package and, if possible, an equation for the code generator.
The Lifting package provides two main commands: @{command (HOL) "setup_lifting"} for initializing
the package to work with a new type, and @{command (HOL) "lift_definition"} for lifting constants.
The Lifting package works with all four kinds of type abstraction: type copies, subtypes,
total quotients and partial quotients.
Theoretical background can be found in @{cite "Huffman-Kuncar:2013:lifting_transfer"}.
\begin{matharray}{rcl}
@{command_def (HOL) "setup_lifting"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
@{command_def (HOL) "lift_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
@{command_def (HOL) "lifting_forget"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
@{command_def (HOL) "lifting_update"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
@{command_def (HOL) "print_quot_maps"} & : & @{text "context \<rightarrow>"}\\
@{command_def (HOL) "print_quotients"} & : & @{text "context \<rightarrow>"}\\
@{attribute_def (HOL) "quot_map"} & : & @{text attribute} \\
@{attribute_def (HOL) "relator_eq_onp"} & : & @{text attribute} \\
@{attribute_def (HOL) "relator_mono"} & : & @{text attribute} \\
@{attribute_def (HOL) "relator_distr"} & : & @{text attribute} \\
@{attribute_def (HOL) "quot_del"} & : & @{text attribute} \\
@{attribute_def (HOL) "lifting_restore"} & : & @{text attribute} \\
\end{matharray}
@{rail \<open>
@@{command (HOL) setup_lifting} ('(' 'no_code' ')')? \<newline>
@{syntax thmref} @{syntax thmref}? (@'parametric' @{syntax thmref})?;
\<close>}
@{rail \<open>
@@{command (HOL) lift_definition} @{syntax name} '::' @{syntax type} @{syntax mixfix}? \<newline>
'is' @{syntax term} (@'parametric' (@{syntax thmref}+))?;
\<close>}
@{rail \<open>
@@{command (HOL) lifting_forget} @{syntax nameref};
\<close>}
@{rail \<open>
@@{command (HOL) lifting_update} @{syntax nameref};
\<close>}
@{rail \<open>
@@{attribute (HOL) lifting_restore} @{syntax thmref} (@{syntax thmref} @{syntax thmref})?;
\<close>}
\begin{description}
\item @{command (HOL) "setup_lifting"} Sets up the Lifting package
to work with a user-defined type.
The command supports two modes. The first one is a low-level mode when
the user must provide as a first
argument of @{command (HOL) "setup_lifting"} a
quotient theorem @{term "Quotient R Abs Rep T"}. The
package configures a transfer rule for equality, a domain transfer
rules and sets up the @{command_def (HOL) "lift_definition"}
command to work with the abstract type. An optional theorem @{term "reflp R"}, which certifies that
the equivalence relation R is total,
can be provided as a second argument. This allows the package to generate stronger transfer
rules. And finally, the parametricity theorem for R can be provided as a third argument.
This allows the package to generate a stronger transfer rule for equality.
Users generally will not prove the @{text Quotient} theorem manually for
new types, as special commands exist to automate the process.
When a new subtype is defined by @{command (HOL) typedef}, @{command (HOL) "lift_definition"}
can be used in its
second mode, where only the type_definition theorem @{text "type_definition Rep Abs A"}
is used as an argument of the command. The command internally proves the corresponding
Quotient theorem and registers it with @{command (HOL) setup_lifting} using its first mode.
For quotients, the command @{command (HOL) quotient_type} can be used. The command defines
a new quotient type and similarly to the previous case, the corresponding Quotient theorem is proved
and registered by @{command (HOL) setup_lifting}.
The command @{command (HOL) "setup_lifting"} also sets up the code generator
for the new type. Later on, when a new constant is defined by @{command (HOL) "lift_definition"},
the Lifting package proves and registers a code equation (if there is one) for the new constant.
If the option @{text "no_code"} is specified, the Lifting package does not set up the code
generator and as a consequence no code equations involving an abstract type are registered
by @{command (HOL) "lift_definition"}.
\item @{command (HOL) "lift_definition"} @{text "f :: \<tau>"} @{keyword (HOL) "is"} @{text t}
Defines a new function @{text f} with an abstract type @{text \<tau>}
in terms of a corresponding operation @{text t} on a
representation type. More formally, if @{text "t :: \<sigma>"}, then
the command builds a term @{text "F"} as a corresponding combination of abstraction
and representation functions such that @{text "F :: \<sigma> \<Rightarrow> \<tau>" } and
defines @{text f} is as @{text "f \<equiv> F t"}.
The term @{text t} does not have to be necessarily a constant but it can be any term.
The command opens a proof environment and the user must discharge
a respectfulness proof obligation. For a type copy, i.e., a typedef with @{text
UNIV}, the obligation is discharged automatically. The proof goal is
presented in a user-friendly, readable form. A respectfulness
theorem in the standard format @{text f.rsp} and a transfer rule
@{text f.transfer} for the Transfer package are generated by the
package.
The user can specify a parametricity theorems for @{text t} after the keyword
@{keyword "parametric"}, which allows the command
to generate parametric transfer rules for @{text f}.
For each constant defined through trivial quotients (type copies or
subtypes) @{text f.rep_eq} is generated. The equation is a code certificate
that defines @{text f} using the representation function.
For each constant @{text f.abs_eq} is generated. The equation is unconditional
for total quotients. The equation defines @{text f} using
the abstraction function.
Integration with [@{attribute code} abstract]: For subtypes (e.g.,
corresponding to a datatype invariant, such as dlist), @{command
(HOL) "lift_definition"} uses a code certificate theorem
@{text f.rep_eq} as a code equation.
Integration with [@{attribute code} equation]: For total quotients, @{command
(HOL) "lift_definition"} uses @{text f.abs_eq} as a code equation.
\item @{command (HOL) lifting_forget} and @{command (HOL) lifting_update}
These two commands serve for storing and deleting the set-up of
the Lifting package and corresponding transfer rules defined by this package.
This is useful for hiding of type construction details of an abstract type
when the construction is finished but it still allows additions to this construction
when this is later necessary.
Whenever the Lifting package is set up with a new abstract type @{text "\<tau>"} by
@{command_def (HOL) "lift_definition"}, the package defines a new bundle
that is called @{text "\<tau>.lifting"}. This bundle already includes set-up for the Lifting package.
The new transfer rules
introduced by @{command (HOL) "lift_definition"} can be stored in the bundle by
the command @{command (HOL) "lifting_update"} @{text "\<tau>.lifting"}.
The command @{command (HOL) "lifting_forget"} @{text "\<tau>.lifting"} deletes set-up of the Lifting
package
for @{text \<tau>} and deletes all the transfer rules that were introduced
by @{command (HOL) "lift_definition"} using @{text \<tau>} as an abstract type.
The stored set-up in a bundle can be reintroduced by the Isar commands for including a bundle
(@{command "include"}, @{keyword "includes"} and @{command "including"}).
\item @{command (HOL) "print_quot_maps"} prints stored quotient map
theorems.
\item @{command (HOL) "print_quotients"} prints stored quotient
theorems.
\item @{attribute (HOL) quot_map} registers a quotient map
theorem, a theorem showing how to "lift" quotients over type constructors.
E.g., @{term "Quotient R Abs Rep T \<Longrightarrow>
Quotient (rel_set R) (image Abs) (image Rep) (rel_set T)"}.
For examples see @{file
"~~/src/HOL/Lifting_Set.thy"} or @{file "~~/src/HOL/Lifting.thy"}.
This property is proved automatically if the involved type is BNF without dead variables.
\item @{attribute (HOL) relator_eq_onp} registers a theorem that
shows that a relator applied to an equality restricted by a predicate @{term P} (i.e., @{term
"eq_onp P"}) is equal
to a predicator applied to the @{term P}. The combinator @{const eq_onp} is used for
internal encoding of proper subtypes. Such theorems allows the package to hide @{text
eq_onp} from a user in a user-readable form of a
respectfulness theorem. For examples see @{file
"~~/src/HOL/Lifting_Set.thy"} or @{file "~~/src/HOL/Lifting.thy"}.
This property is proved automatically if the involved type is BNF without dead variables.
\item @{attribute (HOL) "relator_mono"} registers a property describing a monotonicity of a relator.
E.g., @{term "A \<le> B \<Longrightarrow> rel_set A \<le> rel_set B"}.
This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
when a parametricity theorem for the raw term is specified and also for the reflexivity prover.
For examples see @{file
"~~/src/HOL/Lifting_Set.thy"} or @{file "~~/src/HOL/Lifting.thy"}.
This property is proved automatically if the involved type is BNF without dead variables.
\item @{attribute (HOL) "relator_distr"} registers a property describing a distributivity
of the relation composition and a relator. E.g.,
@{text "rel_set R \<circ>\<circ> rel_set S = rel_set (R \<circ>\<circ> S)"}.
This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
when a parametricity theorem for the raw term is specified.
When this equality does not hold unconditionally (e.g., for the function type), the user can specified
each direction separately and also register multiple theorems with different set of assumptions.
This attribute can be used only after the monotonicity property was already registered by
@{attribute (HOL) "relator_mono"}. For examples see @{file
"~~/src/HOL/Lifting_Set.thy"} or @{file "~~/src/HOL/Lifting.thy"}.
This property is proved automatically if the involved type is BNF without dead variables.
\item @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem
from the Lifting infrastructure and thus de-register the corresponding quotient.
This effectively causes that @{command (HOL) lift_definition} will not
do any lifting for the corresponding type. This attribute is rather used for low-level
manipulation with set-up of the Lifting package because @{command (HOL) lifting_forget} is
preferred for normal usage.
\item @{attribute (HOL) lifting_restore} @{text "Quotient_thm pcr_def pcr_cr_eq_thm"}
registers the Quotient theorem @{text Quotient_thm} in the Lifting infrastructure
and thus sets up lifting for an abstract type @{text \<tau>} (that is defined by @{text Quotient_thm}).
Optional theorems @{text pcr_def} and @{text pcr_cr_eq_thm} can be specified to register
the parametrized
correspondence relation for @{text \<tau>}. E.g., for @{text "'a dlist"}, @{text pcr_def} is
@{text "pcr_dlist A \<equiv> list_all2 A \<circ>\<circ> cr_dlist"} and @{text pcr_cr_eq_thm} is
@{text "pcr_dlist op= = op="}.
This attribute is rather used for low-level
manipulation with set-up of the Lifting package because using of the bundle @{text \<tau>.lifting}
together with the commands @{command (HOL) lifting_forget} and @{command (HOL) lifting_update} is
preferred for normal usage.
\item Integration with the BNF package @{cite "isabelle-datatypes"}:
As already mentioned, the theorems that are registered
by the following attributes are proved and registered automatically if the involved type
is BNF without dead variables: @{attribute (HOL) quot_map}, @{attribute (HOL) relator_eq_onp},
@{attribute (HOL) "relator_mono"}, @{attribute (HOL) "relator_distr"}. Also the definition of a
relator and predicator is provided automatically. Moreover, if the BNF represents a datatype,
simplification rules for a predicator are again proved automatically.
\end{description}
\<close>
section \<open>Coercive subtyping\<close>
text \<open>
\begin{matharray}{rcl}
@{attribute_def (HOL) coercion} & : & @{text attribute} \\
@{attribute_def (HOL) coercion_enabled} & : & @{text attribute} \\
@{attribute_def (HOL) coercion_map} & : & @{text attribute} \\
\end{matharray}
Coercive subtyping allows the user to omit explicit type
conversions, also called \emph{coercions}. Type inference will add
them as necessary when parsing a term. See
@{cite "traytel-berghofer-nipkow-2011"} for details.
@{rail \<open>
@@{attribute (HOL) coercion} (@{syntax term})?
;
@@{attribute (HOL) coercion_map} (@{syntax term})?
\<close>}
\begin{description}
\item @{attribute (HOL) "coercion"}~@{text "f"} registers a new
coercion function @{text "f :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2"} where @{text "\<sigma>\<^sub>1"} and
@{text "\<sigma>\<^sub>2"} are type constructors without arguments. Coercions are
composed by the inference algorithm if needed. Note that the type
inference algorithm is complete only if the registered coercions
form a lattice.
\item @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
new map function to lift coercions through type constructors. The
function @{text "map"} must conform to the following type pattern
\begin{matharray}{lll}
@{text "map"} & @{text "::"} &
@{text "f\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> f\<^sub>n \<Rightarrow> (\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t \<Rightarrow> (\<beta>\<^sub>1, \<dots>, \<beta>\<^sub>n) t"} \\
\end{matharray}
where @{text "t"} is a type constructor and @{text "f\<^sub>i"} is of type
@{text "\<alpha>\<^sub>i \<Rightarrow> \<beta>\<^sub>i"} or @{text "\<beta>\<^sub>i \<Rightarrow> \<alpha>\<^sub>i"}. Registering a map function
overwrites any existing map function for this particular type
constructor.
\item @{attribute (HOL) "coercion_enabled"} enables the coercion
inference algorithm.
\end{description}
\<close>
section \<open>Arithmetic proof support\<close>
text \<open>
\begin{matharray}{rcl}
@{method_def (HOL) arith} & : & @{text method} \\
@{attribute_def (HOL) arith} & : & @{text attribute} \\
@{attribute_def (HOL) arith_split} & : & @{text attribute} \\
\end{matharray}
\begin{description}
\item @{method (HOL) arith} decides linear arithmetic problems (on
types @{text nat}, @{text int}, @{text real}). Any current facts
are inserted into the goal before running the procedure.
\item @{attribute (HOL) arith} declares facts that are supplied to
the arithmetic provers implicitly.
\item @{attribute (HOL) arith_split} attribute declares case split
rules to be expanded before @{method (HOL) arith} is invoked.
\end{description}
Note that a simpler (but faster) arithmetic prover is already
invoked by the Simplifier.
\<close>
section \<open>Intuitionistic proof search\<close>
text \<open>
\begin{matharray}{rcl}
@{method_def (HOL) iprover} & : & @{text method} \\
\end{matharray}
@{rail \<open>
@@{method (HOL) iprover} (@{syntax rulemod} *)
\<close>}
\begin{description}
\item @{method (HOL) iprover} performs intuitionistic proof search,
depending on specifically declared rules from the context, or given
as explicit arguments. Chained facts are inserted into the goal
before commencing proof search.
Rules need to be classified as @{attribute (Pure) intro},
@{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
``@{text "!"}'' indicator refers to ``safe'' rules, which may be
applied aggressively (without considering back-tracking later).
Rules declared with ``@{text "?"}'' are ignored in proof search (the
single-step @{method (Pure) rule} method still observes these). An
explicit weight annotation may be given as well; otherwise the
number of rule premises will be taken into account here.
\end{description}
\<close>
section \<open>Model Elimination and Resolution\<close>
text \<open>
\begin{matharray}{rcl}
@{method_def (HOL) "meson"} & : & @{text method} \\
@{method_def (HOL) "metis"} & : & @{text method} \\
\end{matharray}
@{rail \<open>
@@{method (HOL) meson} @{syntax thmrefs}?
;
@@{method (HOL) metis}
('(' ('partial_types' | 'full_types' | 'no_types' | @{syntax name}) ')')?
@{syntax thmrefs}?
\<close>}
\begin{description}
\item @{method (HOL) meson} implements Loveland's model elimination
procedure @{cite "loveland-78"}. See @{file
"~~/src/HOL/ex/Meson_Test.thy"} for examples.
\item @{method (HOL) metis} combines ordered resolution and ordered
paramodulation to find first-order (or mildly higher-order) proofs.
The first optional argument specifies a type encoding; see the
Sledgehammer manual @{cite "isabelle-sledgehammer"} for details. The
directory @{file "~~/src/HOL/Metis_Examples"} contains several small
theories developed to a large extent using @{method (HOL) metis}.
\end{description}
\<close>
section \<open>Algebraic reasoning via Gr\"obner bases\<close>
text \<open>
\begin{matharray}{rcl}
@{method_def (HOL) "algebra"} & : & @{text method} \\
@{attribute_def (HOL) algebra} & : & @{text attribute} \\
\end{matharray}
@{rail \<open>
@@{method (HOL) algebra}
('add' ':' @{syntax thmrefs})?
('del' ':' @{syntax thmrefs})?
;
@@{attribute (HOL) algebra} (() | 'add' | 'del')
\<close>}
\begin{description}
\item @{method (HOL) algebra} performs algebraic reasoning via
Gr\"obner bases, see also @{cite "Chaieb-Wenzel:2007"} and
@{cite \<open>\S3.2\<close> "Chaieb-thesis"}. The method handles deals with two main
classes of problems:
\begin{enumerate}
\item Universal problems over multivariate polynomials in a
(semi)-ring/field/idom; the capabilities of the method are augmented
according to properties of these structures. For this problem class
the method is only complete for algebraically closed fields, since
the underlying method is based on Hilbert's Nullstellensatz, where
the equivalence only holds for algebraically closed fields.
The problems can contain equations @{text "p = 0"} or inequations
@{text "q \<noteq> 0"} anywhere within a universal problem statement.
\item All-exists problems of the following restricted (but useful)
form:
@{text [display] "\<forall>x\<^sub>1 \<dots> x\<^sub>n.
e\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<and> \<dots> \<and> e\<^sub>m(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<longrightarrow>
(\<exists>y\<^sub>1 \<dots> y\<^sub>k.
p\<^sub>1\<^sub>1(x\<^sub>1, \<dots> ,x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>1\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0 \<and>
\<dots> \<and>
p\<^sub>t\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>t\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0)"}
Here @{text "e\<^sub>1, \<dots>, e\<^sub>n"} and the @{text "p\<^sub>i\<^sub>j"} are multivariate
polynomials only in the variables mentioned as arguments.
\end{enumerate}
The proof method is preceded by a simplification step, which may be
modified by using the form @{text "(algebra add: ths\<^sub>1 del: ths\<^sub>2)"}.
This acts like declarations for the Simplifier
(\secref{sec:simplifier}) on a private simpset for this tool.
\item @{attribute algebra} (as attribute) manages the default
collection of pre-simplification rules of the above proof method.
\end{description}
\<close>
subsubsection \<open>Example\<close>
text \<open>The subsequent example is from geometry: collinearity is
invariant by rotation.\<close>
type_synonym point = "int \<times> int"
fun collinear :: "point \<Rightarrow> point \<Rightarrow> point \<Rightarrow> bool" where
"collinear (Ax, Ay) (Bx, By) (Cx, Cy) \<longleftrightarrow>
(Ax - Bx) * (By - Cy) = (Ay - By) * (Bx - Cx)"
lemma collinear_inv_rotation:
assumes "collinear (Ax, Ay) (Bx, By) (Cx, Cy)" and "c\<^sup>2 + s\<^sup>2 = 1"
shows "collinear (Ax * c - Ay * s, Ay * c + Ax * s)
(Bx * c - By * s, By * c + Bx * s) (Cx * c - Cy * s, Cy * c + Cx * s)"
using assms by (algebra add: collinear.simps)
text \<open>
See also @{file "~~/src/HOL/ex/Groebner_Examples.thy"}.
\<close>
section \<open>Coherent Logic\<close>
text \<open>
\begin{matharray}{rcl}
@{method_def (HOL) "coherent"} & : & @{text method} \\
\end{matharray}
@{rail \<open>
@@{method (HOL) coherent} @{syntax thmrefs}?
\<close>}
\begin{description}
\item @{method (HOL) coherent} solves problems of \emph{Coherent
Logic} @{cite "Bezem-Coquand:2005"}, which covers applications in
confluence theory, lattice theory and projective geometry. See
@{file "~~/src/HOL/ex/Coherent.thy"} for some examples.
\end{description}
\<close>
section \<open>Proving propositions\<close>
text \<open>
In addition to the standard proof methods, a number of diagnosis
tools search for proofs and provide an Isar proof snippet on success.
These tools are available via the following commands.
\begin{matharray}{rcl}
@{command_def (HOL) "solve_direct"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
@{command_def (HOL) "try"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
@{command_def (HOL) "try0"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
@{command_def (HOL) "sledgehammer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
@{command_def (HOL) "sledgehammer_params"} & : & @{text "theory \<rightarrow> theory"}
\end{matharray}
@{rail \<open>
@@{command (HOL) try}
;
@@{command (HOL) try0} ( ( ( 'simp' | 'intro' | 'elim' | 'dest' ) ':' @{syntax thmrefs} ) + ) ?
@{syntax nat}?
;
@@{command (HOL) sledgehammer} ( '[' args ']' )? facts? @{syntax nat}?
;
@@{command (HOL) sledgehammer_params} ( ( '[' args ']' ) ? )
;
args: ( @{syntax name} '=' value + ',' )
;
facts: '(' ( ( ( ( 'add' | 'del' ) ':' ) ? @{syntax thmrefs} ) + ) ? ')'
\<close>} % FIXME check args "value"
\begin{description}
\item @{command (HOL) "solve_direct"} checks whether the current
subgoals can be solved directly by an existing theorem. Duplicate
lemmas can be detected in this way.
\item @{command (HOL) "try0"} attempts to prove a subgoal
using a combination of standard proof methods (@{method auto},
@{method simp}, @{method blast}, etc.). Additional facts supplied
via @{text "simp:"}, @{text "intro:"}, @{text "elim:"}, and @{text
"dest:"} are passed to the appropriate proof methods.
\item @{command (HOL) "try"} attempts to prove or disprove a subgoal
using a combination of provers and disprovers (@{command (HOL)
"solve_direct"}, @{command (HOL) "quickcheck"}, @{command (HOL)
"try0"}, @{command (HOL) "sledgehammer"}, @{command (HOL)
"nitpick"}).
\item @{command (HOL) "sledgehammer"} attempts to prove a subgoal
using external automatic provers (resolution provers and SMT
solvers). See the Sledgehammer manual @{cite "isabelle-sledgehammer"}
for details.
\item @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
"sledgehammer"} configuration options persistently.
\end{description}
\<close>
section \<open>Checking and refuting propositions\<close>
text \<open>
Identifying incorrect propositions usually involves evaluation of
particular assignments and systematic counterexample search. This
is supported by the following commands.
\begin{matharray}{rcl}
@{command_def (HOL) "value"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@{command_def (HOL) "values"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@{command_def (HOL) "quickcheck"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
@{command_def (HOL) "nitpick"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
@{command_def (HOL) "quickcheck_params"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "nitpick_params"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "quickcheck_generator"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "find_unused_assms"} & : & @{text "context \<rightarrow>"}
\end{matharray}
@{rail \<open>
@@{command (HOL) value} ( '[' @{syntax name} ']' )? modes? @{syntax term}
;
@@{command (HOL) values} modes? @{syntax nat}? @{syntax term}
;
(@@{command (HOL) quickcheck} | @@{command (HOL) nitpick})
( '[' args ']' )? @{syntax nat}?
;
(@@{command (HOL) quickcheck_params} |
@@{command (HOL) nitpick_params}) ( '[' args ']' )?
;
@@{command (HOL) quickcheck_generator} @{syntax nameref} \<newline>
'operations:' ( @{syntax term} +)
;
@@{command (HOL) find_unused_assms} @{syntax name}?
;
modes: '(' (@{syntax name} +) ')'
;
args: ( @{syntax name} '=' value + ',' )
\<close>} % FIXME check "value"
\begin{description}
\item @{command (HOL) "value"}~@{text t} evaluates and prints a
term; optionally @{text modes} can be specified, which are appended
to the current print mode; see \secref{sec:print-modes}.
Evaluation is tried first using ML, falling
back to normalization by evaluation if this fails.
Alternatively a specific evaluator can be selected using square
brackets; typical evaluators use the current set of code equations
to normalize and include @{text simp} for fully symbolic evaluation
using the simplifier, @{text nbe} for \emph{normalization by
evaluation} and \emph{code} for code generation in SML.
\item @{command (HOL) "values"}~@{text t} enumerates a set
comprehension by evaluation and prints its values up to the given
number of solutions; optionally @{text modes} can be specified,
which are appended to the current print mode; see
\secref{sec:print-modes}.
\item @{command (HOL) "quickcheck"} tests the current goal for
counterexamples using a series of assignments for its free
variables; by default the first subgoal is tested, an other can be
selected explicitly using an optional goal index. Assignments can
be chosen exhausting the search space up to a given size, or using a
fixed number of random assignments in the search space, or exploring
the search space symbolically using narrowing. By default,
quickcheck uses exhaustive testing. A number of configuration
options are supported for @{command (HOL) "quickcheck"}, notably:
\begin{description}
\item[@{text tester}] specifies which testing approach to apply.
There are three testers, @{text exhaustive}, @{text random}, and
@{text narrowing}. An unknown configuration option is treated as
an argument to tester, making @{text "tester ="} optional. When
multiple testers are given, these are applied in parallel. If no
tester is specified, quickcheck uses the testers that are set
active, i.e., configurations @{attribute
quickcheck_exhaustive_active}, @{attribute
quickcheck_random_active}, @{attribute
quickcheck_narrowing_active} are set to true.
\item[@{text size}] specifies the maximum size of the search space
for assignment values.
\item[@{text genuine_only}] sets quickcheck only to return genuine
counterexample, but not potentially spurious counterexamples due
to underspecified functions.
\item[@{text abort_potential}] sets quickcheck to abort once it
found a potentially spurious counterexample and to not continue
to search for a further genuine counterexample.
For this option to be effective, the @{text genuine_only} option
must be set to false.
\item[@{text eval}] takes a term or a list of terms and evaluates
these terms under the variable assignment found by quickcheck.
This option is currently only supported by the default
(exhaustive) tester.
\item[@{text iterations}] sets how many sets of assignments are
generated for each particular size.
\item[@{text no_assms}] specifies whether assumptions in
structured proofs should be ignored.
\item[@{text locale}] specifies how to process conjectures in
a locale context, i.e., they can be interpreted or expanded.
The option is a whitespace-separated list of the two words
@{text interpret} and @{text expand}. The list determines the
order they are employed. The default setting is to first use
interpretations and then test the expanded conjecture.
The option is only provided as attribute declaration, but not
as parameter to the command.
\item[@{text timeout}] sets the time limit in seconds.
\item[@{text default_type}] sets the type(s) generally used to
instantiate type variables.
\item[@{text report}] if set quickcheck reports how many tests
fulfilled the preconditions.
\item[@{text use_subtype}] if set quickcheck automatically lifts
conjectures to registered subtypes if possible, and tests the
lifted conjecture.
\item[@{text quiet}] if set quickcheck does not output anything
while testing.
\item[@{text verbose}] if set quickcheck informs about the current
size and cardinality while testing.
\item[@{text expect}] can be used to check if the user's
expectation was met (@{text no_expectation}, @{text
no_counterexample}, or @{text counterexample}).
\end{description}
These option can be given within square brackets.
Using the following type classes, the testers generate values and convert
them back into Isabelle terms for displaying counterexamples.
\begin{description}
\item[@{text exhaustive}] The parameters of the type classes @{class exhaustive}
and @{class full_exhaustive} implement the testing. They take a
testing function as a parameter, which takes a value of type @{typ "'a"}
and optionally produces a counterexample, and a size parameter for the test values.
In @{class full_exhaustive}, the testing function parameter additionally
expects a lazy term reconstruction in the type @{typ Code_Evaluation.term}
of the tested value.
The canonical implementation for @{text exhaustive} testers calls the given
testing function on all values up to the given size and stops as soon
as a counterexample is found.
\item[@{text random}] The operation @{const Quickcheck_Random.random}
of the type class @{class random} generates a pseudo-random
value of the given size and a lazy term reconstruction of the value
in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator
is defined in theory @{theory Random}.
\item[@{text narrowing}] implements Haskell's Lazy Smallcheck @{cite "runciman-naylor-lindblad"}
using the type classes @{class narrowing} and @{class partial_term_of}.
Variables in the current goal are initially represented as symbolic variables.
If the execution of the goal tries to evaluate one of them, the test engine
replaces it with refinements provided by @{const narrowing}.
Narrowing views every value as a sum-of-products which is expressed using the operations
@{const Quickcheck_Narrowing.cons} (embedding a value),
@{const Quickcheck_Narrowing.apply} (product) and @{const Quickcheck_Narrowing.sum} (sum).
The refinement should enable further evaluation of the goal.
For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"}
can be recursively defined as
@{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons [])
(Quickcheck_Narrowing.apply
(Quickcheck_Narrowing.apply
(Quickcheck_Narrowing.cons (op #))
narrowing)
narrowing)"}.
If a symbolic variable of type @{typ "_ list"} is evaluated, it is replaced by (i)~the empty
list @{term "[]"} and (ii)~by a non-empty list whose head and tail can then be recursively
refined if needed.
To reconstruct counterexamples, the operation @{const partial_term_of} transforms
@{text narrowing}'s deep representation of terms to the type @{typ Code_Evaluation.term}.
The deep representation models symbolic variables as
@{const Quickcheck_Narrowing.Narrowing_variable}, which are normally converted to
@{const Code_Evaluation.Free}, and refined values as
@{term "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i :: integer"}
denotes the index in the sum of refinements. In the above example for lists,
@{term "0"} corresponds to @{term "[]"} and @{term "1"}
to @{term "op #"}.
The command @{command (HOL) "code_datatype"} sets up @{const partial_term_of}
such that the @{term "i"}-th refinement is interpreted as the @{term "i"}-th constructor,
but it does not ensures consistency with @{const narrowing}.
\end{description}
\item @{command (HOL) "quickcheck_params"} changes @{command (HOL)
"quickcheck"} configuration options persistently.
\item @{command (HOL) "quickcheck_generator"} creates random and
exhaustive value generators for a given type and operations. It
generates values by using the operations as if they were
constructors of that type.
\item @{command (HOL) "nitpick"} tests the current goal for
counterexamples using a reduction to first-order relational
logic. See the Nitpick manual @{cite "isabelle-nitpick"} for details.
\item @{command (HOL) "nitpick_params"} changes @{command (HOL)
"nitpick"} configuration options persistently.
\item @{command (HOL) "find_unused_assms"} finds potentially superfluous
assumptions in theorems using quickcheck.
It takes the theory name to be checked for superfluous assumptions as
optional argument. If not provided, it checks the current theory.
Options to the internal quickcheck invocations can be changed with
common configuration declarations.
\end{description}
\<close>
section \<open>Unstructured case analysis and induction \label{sec:hol-induct-tac}\<close>
text \<open>
The following tools of Isabelle/HOL support cases analysis and
induction in unstructured tactic scripts; see also
\secref{sec:cases-induct} for proper Isar versions of similar ideas.
\begin{matharray}{rcl}
@{method_def (HOL) case_tac}@{text "\<^sup>*"} & : & @{text method} \\
@{method_def (HOL) induct_tac}@{text "\<^sup>*"} & : & @{text method} \\
@{method_def (HOL) ind_cases}@{text "\<^sup>*"} & : & @{text method} \\
@{command_def (HOL) "inductive_cases"}@{text "\<^sup>*"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
\end{matharray}
@{rail \<open>
@@{method (HOL) case_tac} @{syntax goal_spec}? @{syntax term} rule?
;
@@{method (HOL) induct_tac} @{syntax goal_spec}? (@{syntax insts} * @'and') rule?
;
@@{method (HOL) ind_cases} (@{syntax prop}+) (@'for' (@{syntax name}+))?
;
@@{command (HOL) inductive_cases} (@{syntax thmdecl}? (@{syntax prop}+) + @'and')
;
rule: 'rule' ':' @{syntax thmref}
\<close>}
\begin{description}
\item @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
to reason about inductive types. Rules are selected according to
the declarations by the @{attribute cases} and @{attribute induct}
attributes, cf.\ \secref{sec:cases-induct}. The @{command (HOL)
datatype} package already takes care of this.
These unstructured tactics feature both goal addressing and dynamic
instantiation. Note that named rule cases are \emph{not} provided
as would be by the proper @{method cases} and @{method induct} proof
methods (see \secref{sec:cases-induct}). Unlike the @{method
induct} method, @{method induct_tac} does not handle structured rule
statements, only the compact object-logic conclusion of the subgoal
being addressed.
\item @{method (HOL) ind_cases} and @{command (HOL)
"inductive_cases"} provide an interface to the internal @{ML_text
mk_cases} operation. Rules are simplified in an unrestricted
forward manner.
While @{method (HOL) ind_cases} is a proof method to apply the
result immediately as elimination rules, @{command (HOL)
"inductive_cases"} provides case split theorems at the theory level
for later use. The @{keyword "for"} argument of the @{method (HOL)
ind_cases} method allows to specify a list of variables that should
be generalized before applying the resulting rule.
\end{description}
\<close>
chapter \<open>Executable code\<close>
text \<open>For validation purposes, it is often useful to \emph{execute}
specifications. In principle, execution could be simulated by
Isabelle's inference kernel, i.e. by a combination of resolution and
simplification. Unfortunately, this approach is rather inefficient.
A more efficient way of executing specifications is to translate
them into a functional programming language such as ML.
Isabelle provides a generic framework to support code generation
from executable specifications. Isabelle/HOL instantiates these
mechanisms in a way that is amenable to end-user applications. Code
can be generated for functional programs (including overloading
using type classes) targeting SML @{cite SML}, OCaml @{cite OCaml},
Haskell @{cite "haskell-revised-report"} and Scala
@{cite "scala-overview-tech-report"}. Conceptually, code generation is
split up in three steps: \emph{selection} of code theorems,
\emph{translation} into an abstract executable view and
\emph{serialization} to a specific \emph{target language}.
Inductive specifications can be executed using the predicate
compiler which operates within HOL. See @{cite "isabelle-codegen"} for
an introduction.
\begin{matharray}{rcl}
@{command_def (HOL) "export_code"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@{attribute_def (HOL) code} & : & @{text attribute} \\
@{command_def (HOL) "code_datatype"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "print_codesetup"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@{attribute_def (HOL) code_unfold} & : & @{text attribute} \\
@{attribute_def (HOL) code_post} & : & @{text attribute} \\
@{attribute_def (HOL) code_abbrev} & : & @{text attribute} \\
@{command_def (HOL) "print_codeproc"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@{command_def (HOL) "code_thms"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@{command_def (HOL) "code_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
@{command_def (HOL) "code_reserved"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "code_printing"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "code_identifier"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "code_monad"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "code_reflect"} & : & @{text "theory \<rightarrow> theory"} \\
@{command_def (HOL) "code_pred"} & : & @{text "theory \<rightarrow> proof(prove)"}
\end{matharray}
@{rail \<open>
@@{command (HOL) export_code} ( @'open' ) ? ( constexpr + ) \<newline>
( ( @'in' target ( @'module_name' @{syntax string} ) ? \<newline>
( @'file' @{syntax string} ) ? ( '(' args ')' ) ?) + ) ?
;
const: @{syntax term}
;
constexpr: ( const | 'name._' | '_' )
;
typeconstructor: @{syntax nameref}
;
class: @{syntax nameref}
;
target: 'SML' | 'OCaml' | 'Haskell' | 'Scala' | 'Eval'
;
@@{attribute (HOL) code} ( 'del' | 'equation' | 'abstype' | 'abstract'
| 'drop:' ( const + ) | 'abort:' ( const + ) )?
;
@@{command (HOL) code_datatype} ( const + )
;
@@{attribute (HOL) code_unfold} ( 'del' ) ?
;
@@{attribute (HOL) code_post} ( 'del' ) ?
;
@@{attribute (HOL) code_abbrev}
;
@@{command (HOL) code_thms} ( constexpr + ) ?
;
@@{command (HOL) code_deps} ( constexpr + ) ?
;
@@{command (HOL) code_reserved} target ( @{syntax string} + )
;
symbol_const: ( @'constant' const )
;
symbol_typeconstructor: ( @'type_constructor' typeconstructor )
;
symbol_class: ( @'type_class' class )
;
symbol_class_relation: ( @'class_relation' class ( '<' | '\<subseteq>' ) class )
;
symbol_class_instance: ( @'class_instance' typeconstructor @'::' class )
;
symbol_module: ( @'code_module' name )
;
syntax: @{syntax string} | ( @'infix' | @'infixl' | @'infixr' ) @{syntax nat} @{syntax string}
;
printing_const: symbol_const ( '\<rightharpoonup>' | '=>' ) \<newline>
( '(' target ')' syntax ? + @'and' )
;
printing_typeconstructor: symbol_typeconstructor ( '\<rightharpoonup>' | '=>' ) \<newline>
( '(' target ')' syntax ? + @'and' )
;
printing_class: symbol_class ( '\<rightharpoonup>' | '=>' ) \<newline>
( '(' target ')' @{syntax string} ? + @'and' )
;
printing_class_relation: symbol_class_relation ( '\<rightharpoonup>' | '=>' ) \<newline>
( '(' target ')' @{syntax string} ? + @'and' )
;
printing_class_instance: symbol_class_instance ( '\<rightharpoonup>' | '=>' ) \<newline>
( '(' target ')' '-' ? + @'and' )
;
printing_module: symbol_module ( '\<rightharpoonup>' | '=>' ) \<newline>
( '(' target ')' ( @{syntax string} ( @'attach' ( const + ) ) ? ) ? + @'and' )
;
@@{command (HOL) code_printing} ( ( printing_const | printing_typeconstructor
| printing_class | printing_class_relation | printing_class_instance
| printing_module ) + '|' )
;
@@{command (HOL) code_identifier} ( ( symbol_const | symbol_typeconstructor
| symbol_class | symbol_class_relation | symbol_class_instance
| symbol_module ) ( '\<rightharpoonup>' | '=>' ) \<newline>
( '(' target ')' @{syntax string} ? + @'and' ) + '|' )
;
@@{command (HOL) code_monad} const const target
;
@@{command (HOL) code_reflect} @{syntax string} \<newline>
( @'datatypes' ( @{syntax string} '=' ( '_' | ( @{syntax string} + '|' ) + @'and' ) ) ) ? \<newline>
( @'functions' ( @{syntax string} + ) ) ? ( @'file' @{syntax string} ) ?
;
@@{command (HOL) code_pred} \<newline> ('(' @'modes' ':' modedecl ')')? \<newline> const
;
modedecl: (modes | ((const ':' modes) \<newline>
(@'and' ((const ':' modes @'and') +))?))
;
modes: mode @'as' const
\<close>}
\begin{description}
\item @{command (HOL) "export_code"} generates code for a given list
of constants in the specified target language(s). If no
serialization instruction is given, only abstract code is generated
internally.
Constants may be specified by giving them literally, referring to
all executable constants within a certain theory by giving @{text
"name._"}, or referring to \emph{all} executable constants currently
available by giving @{text "_"}.
By default, exported identifiers are minimized per module. This
can be suppressed by prepending @{keyword "open"} before the list
of contants.
By default, for each involved theory one corresponding name space
module is generated. Alternatively, a module name may be specified
after the @{keyword "module_name"} keyword; then \emph{all} code is
placed in this module.
For \emph{SML}, \emph{OCaml} and \emph{Scala} the file specification
refers to a single file; for \emph{Haskell}, it refers to a whole
directory, where code is generated in multiple files reflecting the
module hierarchy. Omitting the file specification denotes standard
output.
Serializers take an optional list of arguments in parentheses.
For \emph{Haskell} a module name prefix may be given using the
``@{text "root:"}'' argument; ``@{text string_classes}'' adds a
``@{verbatim "deriving (Read, Show)"}'' clause to each appropriate
datatype declaration.
\item @{attribute (HOL) code} declare code equations for code
generation. Variant @{text "code equation"} declares a conventional
equation as code equation. Variants @{text "code abstype"} and
@{text "code abstract"} declare abstract datatype certificates or
code equations on abstract datatype representations respectively.
Vanilla @{text "code"} falls back to @{text "code equation"}
or @{text "code abstype"} depending on the syntactic shape
of the underlying equation. Variant @{text "code del"}
deselects a code equation for code generation.
Variants @{text "code drop:"} and @{text "code abort:"} take
a list of constant as arguments and drop all code equations declared
for them. In the case of {text abort}, these constants then are
are not required to have a definition by means of code equations;
if needed these are implemented by program abort (exception) instead.
Usually packages introducing code equations provide a reasonable
default setup for selection.
\item @{command (HOL) "code_datatype"} specifies a constructor set
for a logical type.
\item @{command (HOL) "print_codesetup"} gives an overview on
selected code equations and code generator datatypes.
\item @{attribute (HOL) code_unfold} declares (or with option
``@{text "del"}'' removes) theorems which during preprocessing
are applied as rewrite rules to any code equation or evaluation
input.
\item @{attribute (HOL) code_post} declares (or with option ``@{text
"del"}'' removes) theorems which are applied as rewrite rules to any
result of an evaluation.
\item @{attribute (HOL) code_abbrev} declares (or with option ``@{text
"del"}'' removes) equations which are
applied as rewrite rules to any result of an evaluation and
symmetrically during preprocessing to any code equation or evaluation
input.
\item @{command (HOL) "print_codeproc"} prints the setup of the code
generator preprocessor.
\item @{command (HOL) "code_thms"} prints a list of theorems
representing the corresponding program containing all given
constants after preprocessing.
\item @{command (HOL) "code_deps"} visualizes dependencies of
theorems representing the corresponding program containing all given
constants after preprocessing.
\item @{command (HOL) "code_reserved"} declares a list of names as
reserved for a given target, preventing it to be shadowed by any
generated code.
\item @{command (HOL) "code_printing"} associates a series of symbols
(constants, type constructors, classes, class relations, instances,
module names) with target-specific serializations; omitting a serialization
deletes an existing serialization.
\item @{command (HOL) "code_monad"} provides an auxiliary mechanism
to generate monadic code for Haskell.
\item @{command (HOL) "code_identifier"} associates a a series of symbols
(constants, type constructors, classes, class relations, instances,
module names) with target-specific hints how these symbols shall be named.
These hints gain precedence over names for symbols with no hints at all.
Conflicting hints are subject to name disambiguation.
\emph{Warning:} It is at the discretion
of the user to ensure that name prefixes of identifiers in compound
statements like type classes or datatypes are still the same.
\item @{command (HOL) "code_reflect"} without a ``@{text "file"}''
argument compiles code into the system runtime environment and
modifies the code generator setup that future invocations of system
runtime code generation referring to one of the ``@{text
"datatypes"}'' or ``@{text "functions"}'' entities use these
precompiled entities. With a ``@{text "file"}'' argument, the
corresponding code is generated into that specified file without
modifying the code generator setup.
\item @{command (HOL) "code_pred"} creates code equations for a
predicate given a set of introduction rules. Optional mode
annotations determine which arguments are supposed to be input or
output. If alternative introduction rules are declared, one must
prove a corresponding elimination rule.
\end{description}
\<close>
end
|
{"author": "Josh-Tilles", "repo": "isabelle", "sha": "990accf749b8a6e037d25012258ecae20d59ca62", "save_path": "github-repos/isabelle/Josh-Tilles-isabelle", "path": "github-repos/isabelle/Josh-Tilles-isabelle/isabelle-990accf749b8a6e037d25012258ecae20d59ca62/src/Doc/Isar_Ref/HOL_Specific.thy"}
|
### A Pluto.jl notebook ###
# v0.14.5
using Markdown
using InteractiveUtils
# ╔═╡ f11023e5-8f7b-4f40-86d3-3407b61863d9
begin
using PlutoUI, Viznet, Compose, Plots
function shrink(a, b, da, db)
d = b .- a
r = sqrt(sum(abs2, d))
unitd = d ./ r
a .+ unitd .* da, b .- unitd .* db
end
end;
# ╔═╡ ce44f8bd-692e-4eab-9ba4-055b25e40c81
using ForwardDiff: Dual
# ╔═╡ 9a46597c-b1ee-4e3b-aed1-fd2874b6e77a
using BenchmarkTools
# ╔═╡ ccd38f52-104d-434a-aea3-dd94e571374f
using NiLang
# ╔═╡ f4230251-ba54-434a-b86b-f972c7389217
using MacroTools
# ╔═╡ 69dc2685-b70f-4a81-af30-f02e0054bd52
using NiLang.AD
# ╔═╡ 200f1848-0980-4185-919a-93ab2e7f788f
using SparseArrays
# ╔═╡ 30c191c5-642b-4062-98f3-643d314a054d
using LinearAlgebra
# ╔═╡ 864dbde7-b689-4165-a08e-6bbbd72190de
using Test
# ╔═╡ a1ef579e-4b66-4042-944e-7e27c660095e
md"""
```math
\newcommand{\comment}[1]{{\bf \color{blue}{\text{◂~ #1}}}}
```
"""
# ╔═╡ 100b4293-fd1e-4b9c-a831-5b79bc2a5ebe
begin
# left right layout
function leftright(a, b; width=600)
HTML("""
<style>
table.nohover tr:hover td {
background-color: white !important;
}</style>
<table width=$(width)px class="nohover" style="border:none">
<tr>
<td>$(html(a))</td>
<td>$(html(b))</td>
</tr></table>
""")
end
# up down layout
function updown(a, b; width=nothing)
HTML("""<table class="nohover" style="border:none" $(width === nothing ? "" : "width=$(width)px")>
<tr>
<td>$(html(a))</td>
</tr>
<tr>
<td>$(html(b))</td>
</tr></table>
""")
end
function highlight(str)
HTML("""<span style="background-color:yellow">$(str)</span>""")
end
end;
# ╔═╡ 9d11e058-a7d0-11eb-1d78-6592ff7a1b43
md"# An introduction to automatic differentiation
-- GiggleLiu"
# ╔═╡ b73157bf-1a77-47b8-8a06-8d6ec2045023
html"<button onclick='present()'>present</button>"
# ╔═╡ ec13e0a9-64ff-4f66-a5a6-5fef53428fa1
md"""
* What is automatic differentiation (AD)?
* A true history of AD
* Forward mode AD
* Reverse mode AD
* primitves on tensors (including Jax, pytorch et al.)
* primitves on elementary instructions (usually source code transformation based)
* defined on a reversible program
* Some applications in **scientific computing**
* solving the graph embedding problem
* inverse engineering a hamiltonian
* obtaining maximum independent set (MIS) configurations
* towards differentiating `expmv` ``\comment{will be used in our emulator}``
"""
# ╔═╡ f8b0d1ce-99f7-4729-b46e-126da540cbbe
md"""
## The true history of automatic differentiation
"""
# ╔═╡ 435ac19e-1c0c-4ee5-942d-f2a97c8c4d80
md"""
* 1964 ~ Robert Edwin Wengert, A simple automatic derivative evaluation program. ``\comment{first forward mode AD}``
* 1970 ~ Seppo Linnainmaa, Taylor expansion of the accumulated rounding error. ``\comment{first backward mode AD}``
* 1986 ~ Rumelhart, D. E., Hinton, G. E., and Williams, R. J., Learning representations by back-propagating errors.
* 1992 ~ Andreas Griewank, Achieving logarithmic growth of temporal and spatial complexity in reverse automatic differentiation. ``\comment{foundation of source code transformation based AD.}``
* 2000s ~ The boom of tensor based AD frameworks for machine learning.
* 2018 ~ People re-invented AD as differential programming ([wiki](https://en.wikipedia.org/wiki/Differentiable_programming) and this [quora answer](https://www.quora.com/What-is-Differentiable-Programming).)

* 2020 ~ Me, Differentiate everything with a reversible embeded domain-specific language ``\comment{AD based on reversible programming}``.
"""
# ╔═╡ 48ecd619-d01d-43ff-8b52-7c2566c3fa2b
md"## Forward mode automatic differentiation"
# ╔═╡ 4878ce45-40ff-4fae-98e7-1be41e930e4d
md"""
Forward mode AD attaches a infitesimal number $\epsilon$ to a variable, when applying a function $f$, it does the following transformation
```math
\begin{align}
f(x+g \epsilon) = f(x) + f'(x) g\epsilon + \mathcal{O}(\epsilon^2)
\end{align}
```
The higher order infinitesimal is ignored.
**In the program**, we can define a *dual number* with two fields, just like a complex number
```
f((x, g)) = (f(x), f'(x)*g)
```
"""
# ╔═╡ b2c1936c-2c27-4fbb-8183-e38c5e858483
res = sin(Dual(π/4, 2.0))
# ╔═╡ 8be1b812-fcac-404f-98aa-0571cb990f34
res === Dual(sin(π/4), cos(π/4)*2.0)
# ╔═╡ 33e0c762-c75e-44aa-bfe2-bff92dd1ace8
md"
We can apply this transformation consecutively, it reflects the chain rule.
```math
\begin{align}
\frac{\partial \vec y_{i+1}}{\partial x} &= \boxed{\frac{\partial \vec y_{i+1}}{\partial \vec y_i}}\frac{\partial \vec y_i}{\partial x}\\
&\text{local Jacobian}
\end{align}
```
"
# ╔═╡ c59c35ee-1907-4736-9893-e22c052150ca
let
lb = textstyle(:math, fontsize(8), width=0.5, height=0.5)
tb = textstyle(:default, fontsize(10), Compose.font("monospace"))
tb_big = textstyle(:default, fontsize(3.5), fill("white"), Compose.font("monospace"))
nb = nodestyle(:circle, fill("white"), Compose.stroke("black"); r=0.08)
tri = nodestyle(:triangle, Compose.stroke("transparent"), fill("black"); r=0.02)
eb = bondstyle(:default, linewidth(0.5mm))
ebr = bondstyle(:default, Compose.stroke("red"), linewidth(0.5mm))
ebd = bondstyle(:default, linewidth(0.5mm), dashed=true)
eba = bondstyle(:default, linewidth(0.5mm), Compose.arrow(), Compose.stroke("red"), Compose.fill("red"))
function arrow(x, y)
mid = (x .+ y) ./ 2
t = nodestyle(:triangle, fill("red"), θ=π/2-atan((y .- x)...)-1π/6)
ebr >> (x, y)
t >> mid
end
Compose.set_default_graphic_size(15cm, 5cm)
x = (0.1, 0.5)
fi0 = (0.35, 0.5)
fi1 = (0.7, 0.5)
fi2 = (1.0, 0.5)
img = canvas() do
nb >> fi0
nb >> fi1
lb >> (fi0 .- (0.05, 0.1), "f_{i-1}")
lb >> (fi1 .- (0.02, 0.1), "f_{i}")
lb >> (x, "x")
lb >> ((fi1 .+ fi0) ./ 2 .- (0.02, 0.0), raw"\vec{y}_{i}")
lb >> ((fi1 .+ fi2) ./ 2 .- (0.05, 0.0), raw"\vec{y}_{i+1}")
lb >> ((fi1 .+ fi2) ./ 2 .- (0.05, 0.0), "\\vec{y}_{i+1}")
lb >> (x .- (0.00, 0.25), raw"\color{red}{1}")
lb >> ((fi1 .+ fi0) ./ 2 .- (0.05, 0.45), raw"\color{red}{\frac{\partial \vec{y}_{i}}{\partial x}}")
lb >> ((fi1 .+ fi2) ./ 2 .- (0.08, 0.45), raw"\color{red}{\frac{\partial \vec{y}_{i+1}}{\partial x}}")
ebd >> (x, fi0)
eb >> (fi0, fi1)
eb >> (fi1, fi2)
#arrow((fi1 .+ fi0) ./ 2 .+ (0.08, -0.3), (fi1 .+ fi2) ./ 2 .+ (-0.08, -0.3))
arrow((fi1 .+ fi0) ./ 2 .+ (0.08, -0.3), (fi1 .+ fi2) ./ 2 .+ (-0.08, -0.3))
end
img
end
# ╔═╡ 0ae13734-b826-4dbf-93d1-11044ce88bd4
x_ = Dual(π/4, 1.0)
# ╔═╡ 99187515-c8be-49c2-8d70-9c2998d9993c
sin(x_)
# ╔═╡ 78ca6b08-84c4-4e4d-8412-ae6c28bfafce
md"when automatic comes in"
# ╔═╡ f12b25d8-7c78-4686-b46d-00b34e565605
let
x = Dual(π/4, 1.0)
z = Dual(1.1)
for i=1:10
x = sin(x) * z
end
x
end
# ╔═╡ d90c3cc9-084d-4cf7-9db7-42cea043030b
md"""
**Example:** Computing two gradients $\frac{\partial z\sin x}{\partial x}$ and $\frac{\partial \sin^2x}{\partial x}$ at one sweep
"""
# ╔═╡ 93c98cb2-18af-47df-afb3-8c5a34b4723c
let
lb = textstyle(:math, fontsize(8), width=1.0, height=0.5)
tb = textstyle(:default, fontsize(3.5), Compose.font("monospace"))
tb_big = textstyle(:default, fontsize(4.5), fill("white"), Compose.font("monospace"))
nb = nodestyle(:circle, fill("black"), Compose.stroke("transparent"); r=0.05)
tri = nodestyle(:triangle, Compose.stroke("transparent"), fill("black"); r=0.02)
eb = bondstyle(:default, linewidth(0.5mm))
x_x = (0.1, 0.25)
x_y = (0.9, 0.5)
x_y2 = (0.9, 0.25)
x_z = (0.3, 0.5)
x_sin = (0.3, 0.25)
x_mul = (0.5, 0.5)
x_square = (0.5, 0.25)
function arrow(x, y)
mid = (x .+ y) ./ 2
t = nodestyle(:triangle, θ=π/2-atan((y .- x)...)-1π/6)
eb >> (x, y)
t >> mid
end
img = canvas() do
nb >> x_sin
nb >> x_mul
nb >> x_square
tb_big >> (x_sin, "sin")
tb_big >> (x_mul .+ (0, 0.01), "*")
tb_big >> (x_square, "^2")
arrow(x_sin, x_mul)
arrow(x_x, x_sin)
arrow(x_mul, x_y)
arrow(x_square, x_y2)
arrow(x_z, x_mul)
arrow(x_sin, x_square)
tb >> ((x_x .+ x_sin) ./ 2 .- (0.02, 0.04), "x+ϵˣ")
tb >> ((x_sin .+ x_mul) ./ 2 .- (0.08, 0.04), "sin(x)+cos(x)*ϵˣ")
tb >> ((x_y .+ x_mul) ./ 2 .- (-0.04, 0.055), "z*sin(x)\n+z*cos(x)*ϵˣ")
tb >> ((x_y2 .+ x_square) ./ 2 .- (-0.04, 0.055), "sin(x)^2\n+2*sin(x)*cos(x)*ϵˣ")
tb >> ((x_z .+ x_mul) ./ 2 .- (0.05, 0.02), "z")
end
Compose.set_default_graphic_size(100mm, 100mm/2)
Compose.compose(context(0, -0.15, 1, 2), img)
end
# ╔═╡ 2dc74e15-e2ea-4961-b43f-0ada1a73d80a
md"so the gradients are $z\cos x$ and $2\sin x\cos x$"
# ╔═╡ 7ee75a15-eaea-462a-92b6-293813d2d4d7
md"""
**What if we want to compute gradients for multiple inputs?**
The computing time grows **linearly** as the number of variables that we want to differentiate. But does not grow significantly with the number of outputs.
"""
# ╔═╡ 02a25b73-7353-43b1-8738-e7ca472d0cc7
md"""
## Reverse mode automatic differentiation
"""
# ╔═╡ 2afb984f-624e-4381-903f-ccc1d8a66a17
md"On the other side, the back-propagation can differentiate **many inputs** with respect to a **single output** efficiently"
# ╔═╡ 7e5d5e69-90f2-4106-8edf-223c150a8168
md"""
```math
\begin{align}
\frac{\partial \mathcal{L}}{\partial \vec y_i} = \frac{\partial \mathcal{L}}{\partial \vec y_{i+1}}&\boxed{\frac{\partial \vec y_{i+1}}{\partial \vec y_i}}\\
&\text{local jacobian?}
\end{align}
```
"""
# ╔═╡ 92d7a938-9463-4eee-8839-0b8c5f762c79
let
lb = textstyle(:math, fontsize(8), width=0.5, height=0.5)
tb = textstyle(:default, fontsize(10), Compose.font("monospace"))
tb_big = textstyle(:default, fontsize(3.5), fill("white"), Compose.font("monospace"))
nb = nodestyle(:circle, fill("white"), Compose.stroke("black"); r=0.08)
tri = nodestyle(:triangle, Compose.stroke("transparent"), fill("black"); r=0.02)
eb = bondstyle(:default, linewidth(0.5mm))
ebr = bondstyle(:default, Compose.stroke("red"), linewidth(0.5mm))
ebd = bondstyle(:default, linewidth(0.5mm), dashed=true)
eba = bondstyle(:default, linewidth(0.5mm), Compose.arrow(), Compose.stroke("red"), Compose.fill("red"))
function arrow(x, y)
mid = (x .+ y) ./ 2
t = nodestyle(:triangle, fill("red"), θ=π/2-atan((y .- x)...)-1π/6)
ebr >> (x, y)
t >> mid
end
Compose.set_default_graphic_size(15cm, 5cm)
x = (0.1, 0.5)
fi0 = (0.35, 0.5)
fi1 = (0.7, 0.5)
fi2 = (0.9, 0.5)
img = canvas() do
nb >> fi0
nb >> fi1
lb >> (fi0 .- (0.02, 0.1), "f_{i}")
lb >> (fi1 .- (0.05, 0.1), "f_{i+1}")
lb >> (fi2 .- (0.05, 0.0), raw"\mathcal{L}")
lb >> ((fi0 .+ x) ./ 2 .- (0.05, 0.0), raw"\vec{y}_{i}")
lb >> ((fi0 .+ fi1) ./ 2 .- (0.05, 0.0), raw"\vec{y}_{i+1}")
lb >> ((fi0 .+ fi1) ./ 2 .- (0.05, 0.0), "\\vec{y}_{i+1}")
lb >> (fi2 .- (0.05, 0.25), raw"\color{red}{1}")
lb >> ((fi0 .+ x) ./ 2 .- (0.08, 0.45), raw"\color{red}{\frac{\partial \mathcal{L}}{\partial \vec{y}_{i}}}")
lb >> ((fi0 .+ fi1) ./ 2 .- (0.08, 0.45), raw"\color{red}{\frac{\partial \mathcal{L}}{\partial \vec{y}_{i+1}}}")
ebd >> (fi1, fi2)
eb >> (fi0, fi1)
eb >> (x, fi0)
#arrow((fi1 .+ fi0) ./ 2 .+ (0.08, -0.3), (fi1 .+ fi2) ./ 2 .+ (-0.08, -0.3))
arrow( (fi0 .+ fi1) ./ 2 .+ (-0.08, -0.3), (fi0 .+ x) ./ 2 .+ (0.05, -0.3),)
end
img
end
# ╔═╡ 4b1a0b59-ddc6-4b2d-b5f5-d92084c31e46
md"### How to visit local Jacobians in the reversed order? "
# ╔═╡ 81f16b8b-2f0b-4ba3-8c26-6669eabf48aa
md"The naive approach is to store everything."
# ╔═╡ fb6c3a48-550a-4d2e-a00b-a1e40d86b535
md"""
**Example:** Computing the gradient $\frac{\partial z\sin x}{\partial x}$ and $\frac{\partial z\sin x}{\partial z}$ by back propagating cached local information.
"""
# ╔═╡ ab6fa4ac-29ed-4722-88ed-fa1caf2072f3
let
lb = textstyle(:math, fontsize(10), width=1.0, height=0.5)
tb = textstyle(:default, fontsize(3.5), Compose.font("monospace"))
tbc = textstyle(:default, fontsize(3.5), fill("red"), Compose.font("monospace"))
tb_big = textstyle(:default, fontsize(4), fill("white"), Compose.font("monospace"))
nb = nodestyle(:circle, fill("black"), Compose.stroke("transparent"); r=0.05)
tri = nodestyle(:triangle, Compose.stroke("transparent"), fill("black"); r=0.02)
eb = bondstyle(:default, linewidth(0.5mm))
x_x = (0.1, 0.2)
x_y = (0.9, 0.5)
x_z = (0.1, 0.7)
x_sin = (0.3, 0.3)
x_mul = (0.5, 0.5)
function arrow(x, y)
mid = (x .+ y) ./ 2
t = nodestyle(:triangle, θ=π/2-atan((y .- x)...)-1π/6)
eb >> (x, y)
t >> mid
end
img1 = canvas() do
nb >> x_sin
nb >> x_mul
tb_big >> (x_sin, "sin")
tb_big >> (x_mul .+ (0, 0.01), "*")
arrow(x_sin, x_mul)
arrow(x_x, x_sin)
arrow(x_mul, x_y)
arrow(x_z, x_mul)
tb >> ((x_x .+ x_sin) ./ 2 .- (0.0, 0.1), "x \n push(Σ,x)")
tb >> ((x_sin .+ x_mul) ./ 2 .- (-0.15, 0.04), "s = sin(x) \n push(Σ,s)")
tb >> ((x_y .+ x_mul) ./ 2 .- (-0.05, 0.04), "y = z*sin(x)")
tb >> ((x_z .+ x_mul) ./ 2 .- (0.05, 0.07), "z\n push(Σ,z)")
end
img2 = canvas() do
nb >> x_sin
nb >> x_mul
tb_big >> (x_sin, "sin")
tb_big >> (x_mul .+ (0, 0.01), "*")
arrow(x_mul, x_sin)
arrow(x_sin, x_x)
arrow(x_y, x_mul)
arrow(x_mul, x_z)
tb >> ((x_x .+ x_sin) ./ 2 .- (0.0, 0.1), "x = pop(Σ)\nx̄ = cos(x)*s̄")
tb >> ((x_sin .+ x_mul) ./ 2 .- (-0.12, 0.04), "z = pop(Σ)\ns̄ = z*ȳ")
tb >> ((x_y .+ x_mul) ./ 2 .- (-0.05, 0.06), "y\nȳ=1")
tb >> ((x_z .+ x_mul) ./ 2 .- (0.05, 0.07), "s = pop(Σ)\nz̄ = s*ȳ")
end
Compose.set_default_graphic_size(150mm, 75mm/1.4)
Compose.compose(context(),
(context(0, -0.1, 0.5, 1.4), img1),
(context(0.5, -0.1, 0.5, 1.4), img2)
)
end
# ╔═╡ 8e72d934-e307-4505-ac82-c06734415df6
md"Here, we use $\overline y$ for $\frac{\partial \mathcal{L}}{\partial y}$, which is also called the adjoint."
# ╔═╡ e6ff86a9-9f54-474b-8111-a59a25eda506
md"### Primitives on different scales"
# ╔═╡ 9c1d9607-a634-4350-aacd-2d40984d647d
md"We call the leaf nodes defining AD rules \"**primitives**\""
# ╔═╡ 63db2fa2-50b2-4940-b8ee-0dc6e3966a57
md"
**Design Decision**
* A: If we define primitives on **arrays**, we need tons of manually defined backward rules. (Jax, Pytorch, Zygote.jl, ReverseDiff.jl et al.)
* B: If we define primitives on **scalar instructions**, we will have worse tensor performance. (Tapenade, Adept, NiLang et al.)
*Note*: Here, implementing AD on scalars means specifically the **optimal checkpointing** approach, rather than a package like Jax, Zygote and ReverseDiff that having scalar support.
"
# ╔═╡ 693167e7-e80c-401d-af89-55b5fae30848
let
w, h = 0.22, 0.1
lb = Compose.compose(context(), polygon([(-w, -h), (-w, h), (w, h), (w, -h)]), Compose.stroke("transparent"))
lb2 = Compose.compose(context(), polygon([(-w, -h), (-w, h), (w, h), (w, -h)]), Compose.stroke("transparent"), fill("red"))
tb = Compose.compose(context(), Compose.text(0.0, 0.0, ""), fontsize(3), Compose.font("monospace"))
tb_big = textstyle(:default, fontsize(3), fill("white"), Compose.font("monospace"))
eb = bondstyle(:default, linewidth(0.5mm))
ar = bondstyle(:default, linewidth(0.3mm), Compose.arrow())
xprog = (0.25, 0.15)
xtensors = (0.25, 0.5)
t1 = (0.5, 0.15)
t2 = (0.5, 0.5)
t3 = (0.5, 0.85)
xscalars2 = (0.25, 0.85)
function box(loc, text; color="black")
(color=="black" ? lb : lb2) >> loc
tb_big >> (loc, text)
end
Compose.set_default_graphic_size(10cm, 5cm)
canvas() do
box(xprog, "Program")
ar >> (xprog, xtensors .+ (0, -h-0.03))
#ar >> (xprog, xscalars .+ (-w/2, -h-0.03))
ar >> (xtensors, xscalars2 .+ (0, -h-0.05))
box(xtensors, "Functions on arrays")
#box(xscalars, "Functions on Scalars")
box(xscalars2, "Finite instructions"; color="red")
tb >> (t1, "Neural networks")
tb >> (t2, "matrix multiplication")
tb >> (t3, "+, -, *")
end
end
# ╔═╡ 4cd70901-2142-4868-9a33-c46ca0d064ec
html"""
<table>
<tr>
<th width=200></th>
<th width=300>on tensors</th>
<th width=300>on finite instructions</th>
</tr>
<tr style="vertical-align:top">
<td>meaning</td>
<td>defining backward rules manully for functions on tensors</td>
<td>defining backward rules on a limited set of basic scalar operations, and generate gradient code using source code transformation</td>
</tr>
<tr style="vertical-align:top">
<td>pros and cons</td>
<td>
<ol>
<li style="color:green">Good tensor performance</li>
<li style="color:green">Mature machine learning ecosystem</li>
<li style="color:red">Need to define backward rules manually</li>
</ol>
</td>
<td>
<ol>
<li style="color:green">Reasonalbe scalar performance</li>
<li style="color:red">hard to utilize GPU kernels (except NiLang.jl) and BLAS</li>
</ol>
</td>
<td>
</td>
</tr>
<tr style="vertical-align:top">
<td>packages</td>
<td>Jax<br>PyTorch</td>
<td><a href="http://tapenade.inria.fr:8080/tapenade/">Tapenade</a><br>
<a href="http://www.met.reading.ac.uk/clouds/adept/">Adept</a><br>
<a href="https://github.com/GiggleLiu/NiLang.jl">NiLang.jl</a>
</td>
</tr>
</table>
"""
# ╔═╡ 89018a35-76f4-4f23-b15a-a600db046d6f
md"## A book"
# ╔═╡ 1d219222-0778-4c37-9182-ed5ccbb3ef32
leftright(html"""
<img src="https://images-na.ssl-images-amazon.com/images/I/51+dn97bfKL._SY344_BO1,204,203,200_.jpg"/>
""", md"**Evaluating derivatives: principles and techniques of algorithmic differentiation**
By: Griewank, Andreas, and Andrea Walther
(2008)")
# ╔═╡ 4ff09f7c-aeac-48bd-9d58-8446137c3acd
md"""
## The AD ecosystem in Julia
Please check JuliaDiff: [https://juliadiff.org/](https://juliadiff.org/)
A short list:
* Forward mode AD: ForwardDiff.jl
* Reverse mode AD (tensor): ReverseDiff.jl/Zygote.jl
* Reverse mode AD (scalar): NiLang.jl
Warnings
* The main authors of `Tracker`, `ReverseDiff` and `Zygote` are not maintaining them anymore.
"""
#=
| | Rules | Favors Tensor? | Type |
| ---- | ---- | --- | --- |
| Zygote | C | ✓ | R |
| ReverseDiff | D | ✓ | R |
| Nabla | D→C | ✓ | R |
| Tracker | D | ✓ | R |
| Yota | C | ✓ | R |
| NiLang | - | × | R |
| Enzyme | - | × | R |
| ForwardDiff | - | × | F |
| Diffractor | ? | ? | ? |
* R: reverse mode
* F: forward mode
* C: ChainRules
* D: DiffRules
"""
=#
# ╔═╡ ea44037b-9359-4fbd-990f-529d88d54351
md"# Quick summary
1. The history of AD is longer than many people have thought. People are most familar with *reverse mode AD with primitives implemented on tensors* that brings the boom of machine learning. There are also AD frameworks that can differentiate a general program directly, which does not require users defining AD rules manually.
2. **Forward mode AD** propagate gradients forward, it has a computational overhead propotional to the number of input parameters.
2. **Backward mode AD** propagate gradients backward, it has a computational overhead propotional to the number of output parameters.
* primitives on **tensors** v.s. **scalars**
* it is very expensive to reverse the program
4. Julia has one of the most active AD community!
#### Forward v.s. Backward
when is forward mode AD more useful?
* It is often combined with backward mode AD for obtaining Hessians (forward over backward).
* Having <20 input parameters.
when is backward mode AD more useful?
* In most variational optimizations, especially when we are training a neural network with ~ 100M parameters.
"
# ╔═╡ e731a8e3-6462-4a60-83e9-6ab7ddfff50e
md"# How do AD libraries work?"
# ╔═╡ 685c2b28-b071-452c-a881-801128dcb6c3
md"`ForwardDiff` is operator overloading based, many of its overheads can be optimized by Julia's JIT compiler."
# ╔═╡ 177ddfc2-2cbe-4dba-9d05-2857633dd1ae
md"# [Tapenade](http://tapenade.inria.fr:8080/tapenade/index.jsp)
"
# ╔═╡ 6c2a3a93-385f-4758-9b6e-4cb594a8e856
md"## Example 1: Bessel Example"
# ╔═╡ fb8168c2-8489-418b-909b-cede57b5ae64
md"bessel.f90"
# ╔═╡ fdb39284-dbb1-49fa-9a1c-f360f9e6b765
md"""
```fortran
subroutine besselj(res, v, z, atol)
implicit none
integer, intent(in) :: v
real*8, intent(in) :: z, atol
real*8, intent(out) :: res
real*8 :: s
integer :: k, i, factv
k = 0
factv = 1
do i = 2,v
factv = factv * i
enddo
s = (z/2.0)**v / factv
res = s
do while(abs(s) > atol)
k = k + 1
s = -s / k / (k+v) * ((z/2) ** 2)
res = res + s
enddo
endsubroutine besselj
```
"""
# ╔═╡ 60214f22-c8bb-4a32-a882-4e6c727b29a9
md"""
besselj_d.f90 (forward mode)
```fortran
! Generated by TAPENADE (INRIA, Ecuador team)
! Tapenade 3.15 (master) - 15 Apr 2020 11:54
!
! Differentiation of besselj in forward (tangent) mode:
! variations of useful results: res
! with respect to varying inputs: z
! RW status of diff variables: res:out z:in
SUBROUTINE BESSELJ_D(res, resd, v, z, zd, atol)
IMPLICIT NONE
INTEGER, INTENT(IN) :: v
REAL*8, INTENT(IN) :: z, atol
REAL*8, INTENT(IN) :: zd
REAL*8, INTENT(OUT) :: res
REAL*8, INTENT(OUT) :: resd
REAL*8 :: s
REAL*8 :: sd
INTEGER :: k, i, factv
INTRINSIC ABS
REAL*8 :: abs0
REAL*8 :: pwx1
REAL*8 :: pwx1d
REAL*8 :: pwr1
REAL*8 :: pwr1d
INTEGER :: temp
k = 0
factv = 1
DO i=2,v
factv = factv*i
END DO
pwx1d = zd/2.0
pwx1 = z/2.0
IF (pwx1 .LE. 0.0 .AND. (v .EQ. 0.0 .OR. v .NE. INT(v))) THEN
pwr1d = 0.0_8
ELSE
pwr1d = v*pwx1**(v-1)*pwx1d
END IF
pwr1 = pwx1**v
sd = pwr1d/factv
s = pwr1/factv
resd = sd
res = s
DO WHILE (.true.)
IF (s .GE. 0.) THEN
abs0 = s
ELSE
abs0 = -s
END IF
IF (abs0 .GT. atol) THEN
k = k + 1
temp = k*(k+v)*(2*2)
sd = -((z**2*sd+s*2*z*zd)/temp)
s = -(s*(z*z)/temp)
resd = resd + sd
res = res + s
ELSE
EXIT
END IF
END DO
END SUBROUTINE BESSELJ_D
```
besselj_b.f90 (backward mode)
```fortran
! Generated by TAPENADE (INRIA, Ecuador team)
! Tapenade 3.15 (master) - 15 Apr 2020 11:54
!
! Differentiation of besselj in reverse (adjoint) mode:
! gradient of useful results: res z
! with respect to varying inputs: res z
! RW status of diff variables: res:in-zero z:incr
SUBROUTINE BESSELJ_B(res, resb, v, z, zb, atol)
IMPLICIT NONE
INTEGER, INTENT(IN) :: v
REAL*8, INTENT(IN) :: z, atol
REAL*8 :: zb
REAL*8 :: res
REAL*8 :: resb
REAL*8 :: s
REAL*8 :: sb
INTEGER :: k, i, factv
INTRINSIC ABS
REAL*8 :: abs0
REAL*8 :: tempb
INTEGER :: ad_count
INTEGER :: i0
INTEGER :: branch
k = 0
factv = 1
DO i=2,v
factv = factv*i
END DO
s = (z/2.0)**v/factv
ad_count = 1
DO WHILE (.true.)
IF (s .GE. 0.) THEN
abs0 = s
ELSE
abs0 = -s
END IF
IF (abs0 .GT. atol) THEN
CALL PUSHINTEGER4(k)
k = k + 1
CALL PUSHREAL8(s)
s = -(s/k/(k+v)*(z/2)**2)
ad_count = ad_count + 1
ELSE
GOTO 100
END IF
END DO
CALL PUSHCONTROL1B(0)
GOTO 110
100 CALL PUSHCONTROL1B(1)
110 DO i0=1,ad_count
IF (i0 .EQ. 1) THEN
CALL POPCONTROL1B(branch)
IF (branch .EQ. 0) THEN
sb = 0.0_8
ELSE
sb = 0.0_8
END IF
ELSE
sb = sb + resb
CALL POPREAL8(s)
tempb = -(sb/(k*(k+v)*2**2))
sb = z**2*tempb
zb = zb + 2*z*s*tempb
CALL POPINTEGER4(k)
END IF
END DO
sb = sb + resb
IF (.NOT.(z/2.0 .LE. 0.0 .AND. (v .EQ. 0.0 .OR. v .NE. INT(v)))) zb = &
& zb + v*(z/2.0)**(v-1)*sb/(2.0*factv)
resb = 0.0_8
END SUBROUTINE BESSELJ_B
```
"""
# ╔═╡ 7a6dbe09-cb7f-405f-b9b5-b350ca170e5f
md"## Example 2: Matrix multiplication"
# ╔═╡ 5dc4a849-76dd-4c4f-8828-755671839e5e
md"""
matmul_b.f90
```fortran
! Generated by TAPENADE (INRIA, Ecuador team)
! Tapenade 3.16 (develop) - 9 Apr 2021 17:40
!
! Differentiation of mymatmul in reverse (adjoint) mode:
! gradient of useful results: x y z
! with respect to varying inputs: x y z
! RW status of diff variables: x:incr y:incr z:in-out
SUBROUTINE MYMATMUL_B(z, zb, x, xb, y, yb, m, n, o)
IMPLICIT NONE
INTEGER, INTENT(IN) :: m, n, o
REAL*8, DIMENSION(:, :) :: z(m, n)
REAL*8 :: zb(m, n)
REAL*8, DIMENSION(:, :), INTENT(IN) :: x(m, o), y(o, n)
REAL*8 :: xb(m, o), yb(o, n)
REAL*8 :: temp
REAL*8 :: tempb
INTEGER :: i, j, k
DO j=n,1,-1
DO i=m,1,-1
tempb = zb(i, j)
zb(i, j) = 0.0_8
DO k=o,1,-1
xb(i, k) = xb(i, k) + y(k, j)*tempb
yb(k, j) = yb(k, j) + x(i, k)*tempb
END DO
END DO
END DO
END SUBROUTINE MYMATMUL_B
```
"""
# ╔═╡ b053f11b-9ed7-47ff-ab32-0c70b87e71ed
md"## Example 3: Pyramid"
# ╔═╡ 7b1aa6dd-647f-44cb-b580-b58e23e8b5a6
html"""
<img src="https://user-images.githubusercontent.com/6257240/117090732-228e1a00-ad27-11eb-8231-09c462a17dc7.png" width=500/>
"""
# ╔═╡ b96bac75-b4ad-45f7-aeec-cb6a387eebf0
md"You will see a lot allocation"
# ╔═╡ 5fe022eb-6a17-466e-a6d0-d67e82af23cd
md"pyramid.f90"
# ╔═╡ 92047e95-7eba-4021-9668-9bb4b92261d7
md"""
```fortran
! Differentiation of pyramid in reverse (adjoint) mode:
! gradient of useful results: v x
! with respect to varying inputs: v x
! RW status of diff variables: v:in-out x:incr
SUBROUTINE PYRAMID_B(v, vb, x, xb, n)
IMPLICIT NONE
INTEGER, INTENT(IN) :: n
REAL*8 :: v(n, n)
REAL*8 :: vb(n, n)
REAL*8, INTENT(IN) :: x(n)
REAL*8 :: xb(n)
INTEGER :: i, j
INTRINSIC SIN
INTRINSIC COS
INTEGER :: ad_to
DO j=1,n
v(1, j) = x(j)
END DO
DO i=1,n-1
DO j=1,n-i
CALL PUSHREAL8(v(i+1, j))
v(i+1, j) = SIN(v(i, j))*COS(v(i, j+1))
END DO
CALL PUSHINTEGER4(j - 1)
END DO
DO i=n-1,1,-1
CALL POPINTEGER4(ad_to)
DO j=ad_to,1,-1
CALL POPREAL8(v(i+1, j))
vb(i, j) = vb(i, j) + COS(v(i, j))*COS(v(i, j+1))*vb(i+1, j)
vb(i, j+1) = vb(i, j+1) - SIN(v(i, j+1))*SIN(v(i, j))*vb(i+1, j)
vb(i+1, j) = 0.0_8
END DO
END DO
DO j=n,1,-1
xb(j) = xb(j) + vb(1, j)
vb(1, j) = 0.0_8
END DO
END SUBROUTINE PYRAMID_B
```
"""
# ╔═╡ e2ae1084-8759-4f27-8ad1-43a88e434a3d
md"## How does NiLang avoid too many allocation?"
# ╔═╡ edd3aea8-abdb-4e12-9ef9-12ac0fff835b
@i function pyramid!(y!, v!, x::AbstractVector{T}) where T
@safe @assert size(v!,2) == size(v!,1) == length(x)
@inbounds for j=1:length(x)
v![1,j] += x[j]
end
@invcheckoff @inbounds for i=1:size(v!,1)-1
for j=1:size(v!,2)-i
@routine begin
@zeros T c s
c += cos(v![i,j+1])
s += sin(v![i,j])
end
v![i+1,j] += c * s
~@routine
end
end
y! += v![end,1]
end
# ╔═╡ a2904efb-186c-449d-b1aa-caf530f88e91
@i function power(x3, x)
@routine begin
x2 ← zero(x)
x2 += x^2
end
x3 += x2 * x
~@routine
end
# ╔═╡ 14faaf82-ad3e-4192-8d48-84adfa30442d
ex = NiLangCore.precom_ex(NiLang, :(for j=1:size(v!,2)-i
@routine begin
@zeros T c s
c += cos(v![i,j+1])
s += sin(v![i,j])
end
v![i+1,j] += c * s
~@routine
end)) |> NiLangCore.rmlines
# ╔═╡ 5d141b88-ec07-4a02-8eb3-37405e5c9f5d
NiLangCore.dual_ex(NiLang, ex)
# ╔═╡ 0907e683-f216-4cf6-a210-ae5181fdc487
function pyramid0!(v!, x::AbstractVector{T}) where T
@assert size(v!,2) == size(v!,1) == length(x)
for j=1:length(x)
v![1,j] = x[j]
end
@inbounds for i=1:size(v!,1)-1
for j=1:size(v!,2)-i
v![i+1,j] = cos(v![i,j+1]) * sin(v![i,j])
end
end
end
# ╔═╡ 0bbfa106-f465-4a7b-80a7-7732ba435822
x = randn(20);
# ╔═╡ 805c7072-98fa-4086-a69d-2e126c55af36
let
@benchmark pyramid0!(v, x) seconds=1 setup=(x=randn(1000); v=zeros(1000, 1000))
end
# ╔═╡ 7e527024-c294-4c16-8626-9953588d9b6a
let
@benchmark pyramid!(0.0, v, x) seconds=1 setup=(x=10*randn(1000); v=zeros(1000, 1000))
end
# ╔═╡ 3e59c65a-ceed-42ed-be64-a6964db016e7
pyramid!(0.0, zeros(20, 20), x)
# ╔═╡ 29f85d05-99fd-4843-9be0-5663e681dad7
html"""<img src="https://github.com/GiggleLiu/NiLang.jl/blob/master/examples/pyramid-benchmark.png?raw=true" width=500/>
"""
# ╔═╡ e7830e55-bd9e-4a8a-9239-4191a5f0b1d1
let
@benchmark NiLang.AD.gradient(Val(1), pyramid!, (0.0, v, x)) seconds=1 setup=(x=randn(1000); v=zeros(1000, 1000))
end
# ╔═╡ de2cd247-ba68-4ba4-9784-27a743478635
md"## NiLang's implementation"
# ╔═╡ dc929c23-7434-4848-847a-9fa696e84776
md"""
```math
\begin{align}
&v_{−1} &= & x_1 &=&1.5000\\
&v_0 &= & x_2 &=&0.5000\\
&v_1 &= & v_{−1}/v_0 &=&1.5000/0.5000 &= 3.0000\\
&v_2 &= & \sin(v1)&=& \sin(3.0000) &= 0.1411\\
&v_3 &= & \exp(v0)&=& \exp(0.5000) &= 1.6487\\
&v_4 &= & v_1 − v_3 &=&3.0000 − 1.6487 &= 1.3513\\
&v_5 &= & v_2 + v_4 &=&0.1411 + 1.3513 &= 1.4924\\
&v_6 &= & v_5 ∗ v_4 &=&1.4924 ∗ 1.3513 &= 2.0167\\
&y &= & v_6 &=&2.0167
\end{align}
```
"""
# ╔═╡ 4f1df03f-c315-47b1-b181-749e1231594c
html"""
<img src="https://user-images.githubusercontent.com/6257240/117074233-168f6180-ad01-11eb-8b16-7ae9836cfdcd.png" width=400/>
"""
# ╔═╡ 7eccba6a-3ad5-440b-9c5d-392dc8dc7aba
@i function example_linear(y::T, x1::T, x2::T) where T
@routine begin
@zeros T v1 v2 v3 v4 v5
v1 += x1 / x2
v2 += sin(v1)
v3 += exp(x2)
v4 += v1 - v3
v5 += v2 + v4
end
y += v5 * v4
~@routine
end
# ╔═╡ 4a858a3e-ce28-4642-b061-3975a3ed99ff
md"NOTES:
* a statement changes values inplace directly,
* no return statement, returns the input arguments directly
* `@routine <compute>; <copy statements>; ~@routine` is the Bennett's compute copy uncompute design pattern
"
# ╔═╡ 674bb3bb-637b-44f2-bf6d-d1678da03fbd
PlusEq(identity)(2, 3)
# ╔═╡ 5a59d96f-b2f1-4564-82c7-7f0fe181afb8
prettify(@macroexpand @i function f(y::T, x::T) where T
y.re += x.re
end)
# ╔═╡ 55d2f8ee-4f77-4d44-b704-30643dbbab84
@i function f3(y::T, x::T) where T
y.re += x.re
end
# ╔═╡ 14951168-97c2-43ae-8d5e-5506408a2bb2
f3(1+2im, 2+3im)
# ╔═╡ 4f564581-6032-449c-8b15-3c741f44237a
x5 = GVar(3+4.0im)
# ╔═╡ a36516e8-76c1-4bff-8a12-3e1e621b857d
~example_linear
# ╔═╡ 402b861c-d363-4d23-b9e9-eb088f57b5c4
expre = NiLangCore.precom_ex(@__MODULE__, :(begin
@routine begin
@zeros T v1 v2 v3 v4 v5
v1 += x1 / x2
v2 += sin(v1)
v3 += exp(x2)
v4 += v1 - v3
v5 += v2 + v4
end
y += v5 * v4
~@routine
end), NiLangCore.PreInfo(Symbol[])) |> NiLangCore.rmlines
# ╔═╡ 63975a80-1b41-4f55-91a1-4a316ad7bf26
example_linear(0.0, 1.5, 0.5)
# ╔═╡ 6f688f88-432a-42b2-a2db-19d6bb282e0a
NiLangCore.dual_ex(@__MODULE__, expre)
# ╔═╡ fb46db14-f7e0-4f01-9096-02334c62942d
(~example_linear)(example_linear(0.0, 1.5, 0.5)...)
# ╔═╡ b2c3db3d-c250-4daa-8453-3c9a2734aede
md"**How to get gradients?**"
# ╔═╡ 9a986264-5ba7-4697-a00d-711f8efe29f0
let
y, x1, x2 = 0.0, 1.5, 0.5
# compute
(y_out, x1_out, x2_out) = example_linear(y, x1, x2)
# wrap elements with GVar
y_out_with_g = GVar(y_out, 1.0)
x1_out_with_g = GVar(x1_out, 0.0)
x2_out_with_g = GVar(x2_out, 0.0)
# uncompute
(y_with_g, x1_with_g, x2_with_g) = (~example_linear)(y_out_with_g, x1_out_with_g, x2_out_with_g)
# get gradients
grad(y_with_g), grad(x1_with_g), grad(x2_with_g)
end
# ╔═╡ 560cf3e9-0c14-4497-85b9-f07045eea32a
with_terminal() do
dump(GVar)
end
# ╔═╡ 8ab79efc-e8d0-4c6f-81df-a89008142bb7
gvar1 = GVar(1.5, 0.0)
# ╔═╡ 0eec318c-2c09-4dd6-9187-9c0273d29915
grad(gvar1)
# ╔═╡ 1f0ef29c-0ad5-4d97-aeed-5ff44e86577a
gvar2 = GVar(1.0, 2.0)
# ╔═╡ 603d8fc2-5e7b-4d55-92b6-208b25ea6569
grad(gvar2)
# ╔═╡ 2b3c765e-b505-4f07-9bcb-3c8cc47364ad
md"To differentiate operation `y += exp(x)`, we bind the backward rule on its inverse `y -= exp(x)`, i.e. `MinusEq(exp)` in the program."
# ╔═╡ e0f266da-7e65-4398-bfd4-a6c0b54e626b
MinusEq(exp)(gvar2, gvar1)
# ╔═╡ e1d35886-79d0-40a5-bd33-1c4e5f4a0a9a
md"""
```math
\left(\begin{matrix}\overline y& \overline x\end{matrix}\right) \rightarrow \left(\begin{matrix}\overline y& \overline x\end{matrix}\right)\left(\begin{matrix}
1 & \exp(x) \\
0 & 1
\end{matrix}\right) = \left(\begin{matrix}\overline y& \overline x + \exp(x) \overline y\end{matrix}\right)
```
"""
# ╔═╡ b63a30b0-c75b-4998-a2b2-0b79574cab81
exp(1.5) * 2
# ╔═╡ 139bf020-c4a8-45c8-96fa-aeebc7ddaedc
md"*one line version*"
# ╔═╡ 8967c0f0-89f8-4893-b11b-253333d1a823
NiLang.AD.gradient(example_linear, (0.0, 1.5, 0.5); iloss=1)
# ╔═╡ f2540450-5a07-4fb8-93fb-a6d48dd36a56
md"## Control Flows"
# ╔═╡ 3acb2cfd-fa29-4a2b-8f23-f5aaf474edd0
(@code_julia for i=1:10
x += y
end) |> NiLangCore.rmlines
# ╔═╡ aa1547f2-5edd-4b7e-b93e-bdfc4e4fc6d5
md"""# Memory Management"""
# ╔═╡ 6e76a107-4f51-4e32-b133-7b6e04d7d107
md"The true reverse mode autodiff has to handle the memory wall problem."
# ╔═╡ 999f7a8f-d72e-4ccd-8cbf-b5bbb7db1842
md"""
## Checkpointing
"""
# ╔═╡ 32772c2a-6b80-4779-963c-06974ff0d832
html"""
<img src="https://raw.githubusercontent.com/GiggleLiu/WuLiXueBao/master/paper/tikzimg-1.svg" style="clip-path: inset(0px 300px 40px 0px); margin-left:40px;" width=600/>
"""
# ╔═╡ 41642bd5-1321-490a-95ad-4c1d6363456f
md"
* red arrow: back propagation
* black dot: cached
* white dot: not cached
"
# ╔═╡ 2a553e32-05ef-4c2d-aba7-41185c6035d4
md"Most time efficient (checkpoint every step)"
# ╔═╡ ab8345ce-e038-4d6b-9e1f-57e4f33bb67b
html"""
<img src="https://raw.githubusercontent.com/GiggleLiu/WuLiXueBao/master/paper/tikzimg3-1.svg" style="clip-path: inset(0px 0px 0px 0px); margin-left:40px;" width=300/>
"""
# ╔═╡ bb9c9a4c-601a-4708-9b2d-04d1583938f2
md"Most space efficient (only checkpoint the first step)"
# ╔═╡ b9917e94-c33d-423f-a478-3252bacc2494
html"""
<img src="https://raw.githubusercontent.com/GiggleLiu/WuLiXueBao/master/paper/tikzimg4-1.svg" style="clip-path: inset(0px 0px 0px 0px); margin-left:40px;" width=300/>
"""
# ╔═╡ 4978f404-11ff-41b8-a673-f2d051b1f526
md"Restricting the number of checkpoints, is evenly checkpointed program optimal?"
# ╔═╡ 73bd2e3b-902f-461b-860f-246257608ecd
html"""
<img src="https://raw.githubusercontent.com/GiggleLiu/WuLiXueBao/master/paper/tikzimg2-1.svg" style="clip-path: inset(0px 0px 0px 0px); margin-left:40px;" width=500/>
"""
# ╔═╡ 4dd47dc8-6dfa-47a4-a088-689b4b870762
md"## Optimal checkpointing"
# ╔═╡ ecd975d2-9374-4f40-80ac-2cceda11e7fb
md"""
1992 ~ Andreas Griewank, Achieving logarithmic growth of temporal and spatial complexity in reverse automatic differentiation.
Julia implementation: [TreeverseAlgorithm.jl](https://github.com/GiggleLiu/TreeverseAlgorithm.jl)
"""
# ╔═╡ 832cc81d-a49d-46e7-9d2b-d8bde9bb1273
html"""
<img src="https://user-images.githubusercontent.com/6257240/116494309-91263000-a86e-11eb-8054-9b91646be0e5.png" style="clip-path: inset(74px 350px 0px 0px);"/>
"""
# ╔═╡ 2192a1de-1042-4b13-a313-b67de489124c
md"""
1. Devide the program into ``\delta`` segments, each segment having size $\eta(\delta, \tau) = \frac{(\delta+\tau)!}{\delta! \tau!}$, where ``\delta=1,...,d`` and ``\tau=t-1``.
2. Cache the first state of each segment,
3. Compute gradients in the last segment,
4. Deallocate last checkpoint,
5. Devide the second last segments into two parts.
6. Recursively apply treeverse (Step 2-5).
"""
# ╔═╡ 01c709c7-806c-4389-bbb2-4081e64426d9
md"total number of steps ``T = \eta(d, t)``, both ``t`` and ``d`` can be logarithmic"
# ╔═╡ b1e0cf83-4337-4044-a7d1-5fca8ae79268
md"## An example"
# ╔═╡ 71f4b476-027d-4c8f-b561-1ee418bc9e61
html"""
<img src="https://raw.githubusercontent.com/GiggleLiu/WuLiXueBao/master/paper/bennett_treeverse_pebbles.svg" style="clip-path: inset(50px 350px 0px 0px);"/>
"""
# ╔═╡ 042013cf-9cd2-409d-827f-a311a2f8ce62
md"""
* black dot: current step,
* gray dot: checkpointed state,
* empty dot: state deallocated in current step,
* red square: gradient computed.
"""
# ╔═╡ 82593cd0-1403-4597-8370-919c80494479
md"# Program is not always linear!"
# ╔═╡ f58720b5-2bcb-4950-b453-bd59f648c66a
md"You think your program is like"
# ╔═╡ 4576d791-6af7-4ba5-9b80-fe99c0bb2e88
let
Compose.set_default_graphic_size(15cm, 3cm)
nb = nodestyle(:circle, r=0.01)
eb = compose(context(), bondstyle(:default, r=0.1), Compose.arrow(), linewidth(0.2mm))
loc(i) = (i/11, 0.5)
eloc(i) = (loc(i-1) .- (-0.02, 0.0), loc(i) .- (0.025, 0.0))
canvas() do
for i=1:10
nb >> loc(i)
i == 1 || eb >> eloc(i)
end
end
end
# ╔═╡ 6e9d17f1-b17d-4e8d-82a3-921558a20c0f
md"or a DAG (directed acyclic graph)"
# ╔═╡ f18d89f5-1129-43e0-8b4a-5c1fcd618eab
let
Compose.set_default_graphic_size(15cm, 3cm)
nb = nodestyle(:circle, r=0.01)
eb = compose(context(), bondstyle(:default, r=0.1), Compose.arrow(), linewidth(0.2mm))
loc(i) = (i/11, 0.2)
loc2(i) = (i/11, 0.7)
eloc(i, j) = shrink(loc(i), loc(j), 0.02, 0.025)
eloc2(i, j) = shrink(loc2(i), loc2(j), 0.02, 0.025)
eloc12(i, j) = shrink(loc(i), loc2(j), 0.1, 0.15)
eloc21(i, j) = shrink(loc2(i), loc(j), 0.05, 0.1)
canvas() do
for i=1:10
nb >> loc(i)
i == 1 || eb >> eloc(i-1,i)
end
for i=2:5
nb >> loc2(i)
i == 2 || eb >> eloc2(i-1, i)
end
eb >> eloc12(2,2)
eb >> eloc12(4,5)
eb >> eloc21(5,7)
end
end
# ╔═╡ 2912c7ed-75e3-4dfd-9c40-92115cc08194
md"The truth is"
# ╔═╡ 5d1517c0-562b-40db-bec2-32b5494de1b8
let
Compose.set_default_graphic_size(15cm, 3cm)
nb = nodestyle(:circle, r=0.01)
tb = textstyle(:default)
eb = compose(context(), bondstyle(:default, r=0.1), Compose.arrow(), linewidth(0.2mm))
eb2 = compose(context(), bondstyle(:dcurve, r=0.8), Compose.arrow(), linewidth(0.2mm))
loc(i) = (i/11, 0.2)
loc2(i) = (i/11, 0.7)
eloc(i, j) = shrink(loc(i), loc(j), 0.02, 0.025)
eloc2(i, j) = shrink(loc2(j), loc2(i), 0.02, 0.025)
eloc12(i, j) = shrink(loc2(j), loc(i), 0.1, 0.15)
eloc21(i, j) = shrink(loc(j), loc2(i), 0.05, 0.1)
canvas() do
for i=1:10
nb >> loc(i)
i == 1 || eb >> eloc(i-1,i)
end
for i=2:5
nb >> loc2(i)
i == 2 || eb >> eloc2(i-1, i)
end
eb >> eloc12(2,2)
eb >> eloc12(4,5)
tb >> ((0.3, 0.45), "× n")
for i=7:8
nb >> loc2(i)
i == 7 || eb >> eloc2(i-1, i)
end
eb >> eloc12(7,7)
eb >> eloc12(8,8)
tb >> ((0.68, 0.45), "× ∞")
eb2 >> (loc(6) .+ (0.0, 0.1), loc(9) .+ (0, 0.15))
end
end
# ╔═╡ ae096ad2-3ae9-4440-a959-0d7d9a174f1d
md"## Example 3: Sparse matrix multiplication"
# ╔═╡ 8148bc1f-ef99-40a4-a5ce-0a42643f703d
md"original implementation: [https://github.com/JuliaLang/julia/blob/master/stdlib/SparseArrays/src/linalg.jl](https://github.com/JuliaLang/julia/blob/master/stdlib/SparseArrays/src/linalg.jl)
"
# ╔═╡ bd86c5c2-16be-4cfd-ba7a-a0e2544d82d1
@i function mul!(C::StridedVecOrMat{T}, A::SparseMatrixCSC{T}, B::StridedVecOrMat{T}, α::Number) where T
@safe A.n == size(B, 1) || throw(DimensionMismatch())
@safe A.m == size(C, 1) || throw(DimensionMismatch())
@safe size(B, 2) == size(C, 2) || throw(DimensionMismatch())
@invcheckoff for k = 1:size(C, 2)
@inbounds for col = 1:A.n
@routine begin
αxj ← zero(T)
αxj += α*B[col,k]
end
for j = A.colptr[col]:(A.colptr[col + 1] - 1)
C[A.rowval[j], k] += A.nzval[j]*αxj
end
~@routine
end
end
end
# ╔═╡ 11557d6b-3a1e-416d-874f-b8d217976f76
md"## Example 4: How to differentiate QR"
# ╔═╡ 48a10ea2-5d32-4a55-b8c0-f6a5e82eace9
md"original implementation: [https://github.com/JuliaLang/julia/blob/master/stdlib/LinearAlgebra/src/qr.jl](https://github.com/JuliaLang/julia/blob/master/stdlib/LinearAlgebra/src/qr.jl)
"
# ╔═╡ fafc1b0f-6469-4b6c-a00d-5272a45fc69b
md"See also"
# ╔═╡ ad6cff7b-5cbf-4ab1-94f7-d21cbc171000
leftright(html"<img src='https://images-na.ssl-images-amazon.com/images/I/41JjpllrDrL._SX364_BO1,204,203,200_.jpg' width=150/>", md"**Matrix computations**
Golub, Gene H., and Charles F. Van Loan (2013)")
# ╔═╡ 4d373cf6-9b39-44bc-8f13-220933fc8f5c
function qrfactPivotedUnblocked!(A::AbstractMatrix)
m, n = size(A)
piv = Vector(UnitRange{BlasInt}(1,n))
τ = Vector{eltype(A)}(undef, min(m,n))
for j = 1:min(m,n)
# Find column with maximum norm in trailing submatrix
jm = indmaxcolumn(view(A, j:m, j:n)) + j - 1
if jm != j
# Flip elements in pivoting vector
tmpp = piv[jm]
piv[jm] = piv[j]
piv[j] = tmpp
# Update matrix with
for i = 1:m
tmp = A[i,jm]
A[i,jm] = A[i,j]
A[i,j] = tmp
end
end
# Compute reflector of columns j
x = view(A, j:m, j)
τj = LinearAlgebra.reflector!(x)
τ[j] = τj
# Update trailing submatrix with reflector
LinearAlgebra.reflectorApply!(x, τj, view(A, j:m, j+1:n))
end
return LinearAlgebra.QRPivoted{eltype(A), typeof(A)}(A, τ, piv)
end
# ╔═╡ 293a68ca-e02f-47b3-85ed-aeeb8995f3ec
struct Reflector{T,RT,VT<:AbstractVector{T}}
ξ::T
normu::RT
sqnormu::RT
r::T
y::VT
end
# ╔═╡ fa5716f9-8bff-4295-812b-691ccdc12832
struct QRPivotedRes{T,RT,VT}
factors::Matrix{T}
τ::Vector{T}
jpvt::Vector{Int}
reflectors::Vector{Reflector{T,RT,VT}}
vAs::Vector{Vector{T}}
jms::Vector{Int}
end
# ╔═╡ 8324f365-fd12-4ca3-8ca6-657e5917f946
# Elementary reflection similar to LAPACK. The reflector is not Hermitian but
# ensures that tridiagonalization of Hermitian matrices become real. See lawn72
@i function reflector!(R::Reflector{T,RT}, x::AbstractVector{T}) where {T,RT}
n ← length(x)
@inbounds @invcheckoff if n != 0
@zeros T ξ1
@zeros RT normu sqnormu
ξ1 += x[1]
sqnormu += abs2(ξ1)
for i = 2:n
sqnormu += abs2(x[i])
end
if !iszero(sqnormu)
normu += sqrt(sqnormu)
if real(ξ1) < 0
NEG(normu)
end
ξ1 += normu
R.y[1] -= normu
for i = 2:n
R.y[i] += x[i] / ξ1
end
R.r += ξ1/normu
end
SWAP(R.ξ, ξ1)
SWAP(R.normu, normu)
SWAP(R.sqnormu, sqnormu)
end
end
# ╔═╡ 70fb10ea-9229-46ef-8ba3-b1d3874b7929
# apply reflector from left
@i function reflectorApply!(vA::AbstractVector{T}, x::AbstractVector, τ::Number, A::StridedMatrix{T}) where T
(m, n) ← size(A)
if length(x) != m || length(vA) != n
@safe throw(DimensionMismatch("reflector has length ($(length(x)), $(length(vA))), which must match the first dimension of matrix A, ($m, $n)"))
end
@inbounds @invcheckoff if m != 0
for j = 1:n
# dot
@zeros T vAj vAj_τ
vAj += A[1, j]
for i = 2:m
vAj += x[i]'*A[i, j]
end
vAj_τ += τ' * vAj
# ger
A[1, j] -= vAj_τ
for i = 2:m
A[i, j] -= x[i]*vAj_τ
end
vAj_τ -= τ' * vAj
SWAP(vA[j], vAj)
end
end
end
# ╔═╡ 51504ba4-4711-48b7-aab9-d4f26c009659
function alloc(::typeof(reflector!), x::AbstractVector{T}) where T
RT = real(T)
Reflector(zero(T), zero(RT), zero(RT), zero(T), zero(x))
end
# ╔═╡ f267e315-3c19-4345-8fba-641bb0ea515b
@i function qr_pivoted!(res::QRPivotedRes, A::StridedMatrix{T}) where T
m, n ← size(A)
@invcheckoff @inbounds for j = 1:min(m,n)
# Find column with maximum norm in trailing submatrix
jm ← LinearAlgebra.indmaxcolumn(NiLang.value.(view(A, j:m, j:n))) + j - 1
if jm != j
# Flip elements in pivoting vector
SWAP(res.jpvt[jm], res.jpvt[j])
# Update matrix with
for i = 1:m
SWAP(A[i, jm], A[i, j])
end
end
# Compute reflector of columns j
R ← alloc(reflector!, A |> subarray(j:m, j))
vA ← zeros(T, n-j)
reflector!(R, A |> subarray(j:m, j))
# Update trailing submatrix with reflector
reflectorApply!(vA, R.y, R.r, A |> subarray(j:m, j+1:n))
for i=1:length(R.y)
SWAP(R.y[i], A[j+i-1, j])
end
PUSH!(res.reflectors, R)
PUSH!(res.vAs, vA)
PUSH!(res.jms, jm)
R → _zero(Reflector{T,real(T),Vector{T}})
vA → zeros(T, 0)
jm → 0
end
@inbounds for i=1:length(res.reflectors)
res.τ[i] += res.reflectors[i].r
end
res.factors += A
end
# ╔═╡ a07b93b1-742b-41d4-bd0f-bc899de55338
function alloc_qr(A::AbstractMatrix{T}) where T
(m, n) = size(A)
τ = zeros(T, min(m,n))
jpvt = collect(1:n)
reflectors = Reflector{T,real(T),Vector{T}}[]
vAs = Vector{T}[]
jms = Int[]
QRPivotedRes(zero(A), τ, jpvt, reflectors, vAs, jms)
end
# ╔═╡ 5f207f59-b9f4-477f-b79f-0aee743bdb8e
A = randn(ComplexF64, 20, 20);
# ╔═╡ f88517d6-b87d-45ba-bf3f-67074fa51fca
@test qr_pivoted!(alloc_qr(A), copy(A))[1].factors ≈ LinearAlgebra.qrfactPivotedUnblocked!(copy(A)).factors
# ╔═╡ 45aef837-9b2c-49b2-b815-e4d60f103f58
let
@testset "qr pivoted gradient" begin
# rank deficient initial matrix
n = 50
U = LinearAlgebra.qr(randn(n, n)).Q
Σ = Diagonal((x=randn(n); x[n÷2+1:end] .= 0; x))
A = U*Σ*U'
res = alloc_qr(A)
@test rank(A) == n ÷ 2
qrres = qr_pivoted!(deepcopy(res), copy(A))[1]
@test count(x->(x>1e-12), sum(abs2, QRPivoted(qrres.factors, qrres.τ, qrres.jpvt).R, dims=2)) == n ÷ 2
@i function loss(y, qrres, A)
qr_pivoted!(qrres, A)
y += abs(qrres.factors[1])
end
nrloss(A) = loss(0.0, deepcopy(res), A)[1]
ngA = zero(A)
δ = 1e-5
for j=1:size(A, 2)
for i=1:size(A, 1)
A_ = copy(A)
A_[i,j] -= δ/2
l1 = nrloss(copy(A_))
A_[i,j] += δ
l2 = nrloss(A_)
ngA[i,j] = (l2-l1)/δ
end
end
gA = NiLang.AD.gradient(loss, (0.0, res, A); iloss=1)[3]
@test real.(gA) ≈ ngA
end
end
# ╔═╡ Cell order:
# ╟─a1ef579e-4b66-4042-944e-7e27c660095e
# ╟─100b4293-fd1e-4b9c-a831-5b79bc2a5ebe
# ╟─f11023e5-8f7b-4f40-86d3-3407b61863d9
# ╟─9d11e058-a7d0-11eb-1d78-6592ff7a1b43
# ╟─b73157bf-1a77-47b8-8a06-8d6ec2045023
# ╟─ec13e0a9-64ff-4f66-a5a6-5fef53428fa1
# ╟─f8b0d1ce-99f7-4729-b46e-126da540cbbe
# ╟─435ac19e-1c0c-4ee5-942d-f2a97c8c4d80
# ╟─48ecd619-d01d-43ff-8b52-7c2566c3fa2b
# ╟─4878ce45-40ff-4fae-98e7-1be41e930e4d
# ╠═ce44f8bd-692e-4eab-9ba4-055b25e40c81
# ╠═b2c1936c-2c27-4fbb-8183-e38c5e858483
# ╠═8be1b812-fcac-404f-98aa-0571cb990f34
# ╟─33e0c762-c75e-44aa-bfe2-bff92dd1ace8
# ╟─c59c35ee-1907-4736-9893-e22c052150ca
# ╠═0ae13734-b826-4dbf-93d1-11044ce88bd4
# ╠═99187515-c8be-49c2-8d70-9c2998d9993c
# ╟─78ca6b08-84c4-4e4d-8412-ae6c28bfafce
# ╠═f12b25d8-7c78-4686-b46d-00b34e565605
# ╟─d90c3cc9-084d-4cf7-9db7-42cea043030b
# ╟─93c98cb2-18af-47df-afb3-8c5a34b4723c
# ╟─2dc74e15-e2ea-4961-b43f-0ada1a73d80a
# ╟─7ee75a15-eaea-462a-92b6-293813d2d4d7
# ╟─02a25b73-7353-43b1-8738-e7ca472d0cc7
# ╟─2afb984f-624e-4381-903f-ccc1d8a66a17
# ╟─7e5d5e69-90f2-4106-8edf-223c150a8168
# ╟─92d7a938-9463-4eee-8839-0b8c5f762c79
# ╟─4b1a0b59-ddc6-4b2d-b5f5-d92084c31e46
# ╟─81f16b8b-2f0b-4ba3-8c26-6669eabf48aa
# ╟─fb6c3a48-550a-4d2e-a00b-a1e40d86b535
# ╟─ab6fa4ac-29ed-4722-88ed-fa1caf2072f3
# ╟─8e72d934-e307-4505-ac82-c06734415df6
# ╟─e6ff86a9-9f54-474b-8111-a59a25eda506
# ╟─9c1d9607-a634-4350-aacd-2d40984d647d
# ╟─63db2fa2-50b2-4940-b8ee-0dc6e3966a57
# ╟─693167e7-e80c-401d-af89-55b5fae30848
# ╟─4cd70901-2142-4868-9a33-c46ca0d064ec
# ╟─89018a35-76f4-4f23-b15a-a600db046d6f
# ╟─1d219222-0778-4c37-9182-ed5ccbb3ef32
# ╟─4ff09f7c-aeac-48bd-9d58-8446137c3acd
# ╟─ea44037b-9359-4fbd-990f-529d88d54351
# ╟─e731a8e3-6462-4a60-83e9-6ab7ddfff50e
# ╟─685c2b28-b071-452c-a881-801128dcb6c3
# ╟─177ddfc2-2cbe-4dba-9d05-2857633dd1ae
# ╟─6c2a3a93-385f-4758-9b6e-4cb594a8e856
# ╟─fb8168c2-8489-418b-909b-cede57b5ae64
# ╟─fdb39284-dbb1-49fa-9a1c-f360f9e6b765
# ╟─60214f22-c8bb-4a32-a882-4e6c727b29a9
# ╟─7a6dbe09-cb7f-405f-b9b5-b350ca170e5f
# ╟─5dc4a849-76dd-4c4f-8828-755671839e5e
# ╟─b053f11b-9ed7-47ff-ab32-0c70b87e71ed
# ╟─7b1aa6dd-647f-44cb-b580-b58e23e8b5a6
# ╟─b96bac75-b4ad-45f7-aeec-cb6a387eebf0
# ╟─5fe022eb-6a17-466e-a6d0-d67e82af23cd
# ╟─92047e95-7eba-4021-9668-9bb4b92261d7
# ╟─e2ae1084-8759-4f27-8ad1-43a88e434a3d
# ╠═edd3aea8-abdb-4e12-9ef9-12ac0fff835b
# ╠═a2904efb-186c-449d-b1aa-caf530f88e91
# ╠═14faaf82-ad3e-4192-8d48-84adfa30442d
# ╠═5d141b88-ec07-4a02-8eb3-37405e5c9f5d
# ╠═0907e683-f216-4cf6-a210-ae5181fdc487
# ╠═805c7072-98fa-4086-a69d-2e126c55af36
# ╠═7e527024-c294-4c16-8626-9953588d9b6a
# ╠═0bbfa106-f465-4a7b-80a7-7732ba435822
# ╠═3e59c65a-ceed-42ed-be64-a6964db016e7
# ╟─29f85d05-99fd-4843-9be0-5663e681dad7
# ╠═9a46597c-b1ee-4e3b-aed1-fd2874b6e77a
# ╠═e7830e55-bd9e-4a8a-9239-4191a5f0b1d1
# ╟─de2cd247-ba68-4ba4-9784-27a743478635
# ╟─dc929c23-7434-4848-847a-9fa696e84776
# ╟─4f1df03f-c315-47b1-b181-749e1231594c
# ╠═ccd38f52-104d-434a-aea3-dd94e571374f
# ╠═7eccba6a-3ad5-440b-9c5d-392dc8dc7aba
# ╠═f4230251-ba54-434a-b86b-f972c7389217
# ╟─4a858a3e-ce28-4642-b061-3975a3ed99ff
# ╠═674bb3bb-637b-44f2-bf6d-d1678da03fbd
# ╠═5a59d96f-b2f1-4564-82c7-7f0fe181afb8
# ╠═55d2f8ee-4f77-4d44-b704-30643dbbab84
# ╠═14951168-97c2-43ae-8d5e-5506408a2bb2
# ╠═4f564581-6032-449c-8b15-3c741f44237a
# ╠═a36516e8-76c1-4bff-8a12-3e1e621b857d
# ╠═402b861c-d363-4d23-b9e9-eb088f57b5c4
# ╠═63975a80-1b41-4f55-91a1-4a316ad7bf26
# ╠═6f688f88-432a-42b2-a2db-19d6bb282e0a
# ╠═fb46db14-f7e0-4f01-9096-02334c62942d
# ╟─b2c3db3d-c250-4daa-8453-3c9a2734aede
# ╠═69dc2685-b70f-4a81-af30-f02e0054bd52
# ╠═9a986264-5ba7-4697-a00d-711f8efe29f0
# ╠═560cf3e9-0c14-4497-85b9-f07045eea32a
# ╠═8ab79efc-e8d0-4c6f-81df-a89008142bb7
# ╠═0eec318c-2c09-4dd6-9187-9c0273d29915
# ╠═1f0ef29c-0ad5-4d97-aeed-5ff44e86577a
# ╠═603d8fc2-5e7b-4d55-92b6-208b25ea6569
# ╟─2b3c765e-b505-4f07-9bcb-3c8cc47364ad
# ╠═e0f266da-7e65-4398-bfd4-a6c0b54e626b
# ╟─e1d35886-79d0-40a5-bd33-1c4e5f4a0a9a
# ╠═b63a30b0-c75b-4998-a2b2-0b79574cab81
# ╟─139bf020-c4a8-45c8-96fa-aeebc7ddaedc
# ╠═8967c0f0-89f8-4893-b11b-253333d1a823
# ╟─f2540450-5a07-4fb8-93fb-a6d48dd36a56
# ╠═3acb2cfd-fa29-4a2b-8f23-f5aaf474edd0
# ╟─aa1547f2-5edd-4b7e-b93e-bdfc4e4fc6d5
# ╟─6e76a107-4f51-4e32-b133-7b6e04d7d107
# ╟─999f7a8f-d72e-4ccd-8cbf-b5bbb7db1842
# ╟─32772c2a-6b80-4779-963c-06974ff0d832
# ╟─41642bd5-1321-490a-95ad-4c1d6363456f
# ╟─2a553e32-05ef-4c2d-aba7-41185c6035d4
# ╟─ab8345ce-e038-4d6b-9e1f-57e4f33bb67b
# ╟─bb9c9a4c-601a-4708-9b2d-04d1583938f2
# ╟─b9917e94-c33d-423f-a478-3252bacc2494
# ╟─4978f404-11ff-41b8-a673-f2d051b1f526
# ╟─73bd2e3b-902f-461b-860f-246257608ecd
# ╟─4dd47dc8-6dfa-47a4-a088-689b4b870762
# ╟─ecd975d2-9374-4f40-80ac-2cceda11e7fb
# ╟─832cc81d-a49d-46e7-9d2b-d8bde9bb1273
# ╟─2192a1de-1042-4b13-a313-b67de489124c
# ╟─01c709c7-806c-4389-bbb2-4081e64426d9
# ╟─b1e0cf83-4337-4044-a7d1-5fca8ae79268
# ╟─71f4b476-027d-4c8f-b561-1ee418bc9e61
# ╟─042013cf-9cd2-409d-827f-a311a2f8ce62
# ╟─82593cd0-1403-4597-8370-919c80494479
# ╟─f58720b5-2bcb-4950-b453-bd59f648c66a
# ╟─4576d791-6af7-4ba5-9b80-fe99c0bb2e88
# ╟─6e9d17f1-b17d-4e8d-82a3-921558a20c0f
# ╟─f18d89f5-1129-43e0-8b4a-5c1fcd618eab
# ╟─2912c7ed-75e3-4dfd-9c40-92115cc08194
# ╟─5d1517c0-562b-40db-bec2-32b5494de1b8
# ╟─ae096ad2-3ae9-4440-a959-0d7d9a174f1d
# ╟─8148bc1f-ef99-40a4-a5ce-0a42643f703d
# ╠═200f1848-0980-4185-919a-93ab2e7f788f
# ╠═bd86c5c2-16be-4cfd-ba7a-a0e2544d82d1
# ╟─11557d6b-3a1e-416d-874f-b8d217976f76
# ╟─48a10ea2-5d32-4a55-b8c0-f6a5e82eace9
# ╟─fafc1b0f-6469-4b6c-a00d-5272a45fc69b
# ╟─ad6cff7b-5cbf-4ab1-94f7-d21cbc171000
# ╠═30c191c5-642b-4062-98f3-643d314a054d
# ╠═fa5716f9-8bff-4295-812b-691ccdc12832
# ╠═f267e315-3c19-4345-8fba-641bb0ea515b
# ╠═4d373cf6-9b39-44bc-8f13-220933fc8f5c
# ╠═293a68ca-e02f-47b3-85ed-aeeb8995f3ec
# ╠═8324f365-fd12-4ca3-8ca6-657e5917f946
# ╠═70fb10ea-9229-46ef-8ba3-b1d3874b7929
# ╠═51504ba4-4711-48b7-aab9-d4f26c009659
# ╠═a07b93b1-742b-41d4-bd0f-bc899de55338
# ╠═864dbde7-b689-4165-a08e-6bbbd72190de
# ╠═5f207f59-b9f4-477f-b79f-0aee743bdb8e
# ╠═f88517d6-b87d-45ba-bf3f-67074fa51fca
# ╠═45aef837-9b2c-49b2-b815-e4d60f103f58
|
{"hexsha": "b3b02773fa886d7899ecb38fb4f5b221268b998b", "size": 51355, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "notebooks/autodiff.jl", "max_stars_repo_name": "GiggleLiu/NiLang.jl", "max_stars_repo_head_hexsha": "f24036c478ace467ae71f630e1db2698e38fd64d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 214, "max_stars_repo_stars_event_min_datetime": "2019-12-06T04:23:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T17:14:39.000Z", "max_issues_repo_path": "notebooks/autodiff.jl", "max_issues_repo_name": "GiggleLiu/NiLang.jl", "max_issues_repo_head_hexsha": "f24036c478ace467ae71f630e1db2698e38fd64d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2020-01-17T13:46:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T05:58:20.000Z", "max_forks_repo_path": "notebooks/autodiff.jl", "max_forks_repo_name": "GiggleLiu/NiLang.jl", "max_forks_repo_head_hexsha": "f24036c478ace467ae71f630e1db2698e38fd64d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2020-01-17T13:38:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T19:34:04.000Z", "avg_line_length": 30.3516548463, "max_line_length": 326, "alphanum_fraction": 0.6384967384, "num_tokens": 23037}
|
## This script generates a curves for fluid particle trajectories
## This is intended as the script of a 'Programmable Source'
## Author: Kelton Halbert
## Institution: University of Wisconsin - Madison
## Department: Atmospheric and Oceanic Sciences
## Research Group: Cooperative Institute for Meteorological Satellite Studies
## Date: Oct 29, 2019
import numpy as np
pdi = self.GetInput()
pdo = self.GetOutput()
outInfo = self.GetOutputInformation(0)
## get nParcels and nTimes from
## the input dataset
bounds = pdi.GetBounds()
nParcels = int(bounds[3]+1)
nTimes = int(bounds[1]+1)
timeSteps = range(0, nTimes, 1)
timeRange = [int(timeSteps[0]), int(timeSteps[-1])]
## The names of the arrays containing position
## information about the parcels
poskeys = ["xpos", "ypos", "zpos"]
vars = inputs[0].PointData.keys()
outInfo.Set(vtk.vtkStreamingDemandDrivenPipeline.TIME_RANGE(), timeRange, int(2))
outInfo.Set(vtk.vtkStreamingDemandDrivenPipeline.TIME_STEPS(), timeSteps, len(timeSteps))
if outInfo.Has(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_TIME_STEP()):
time = outInfo.Get(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_TIME_STEP())
else:
time = nTimes
if (time <= 1): time = 2
print(time)
varcount = 0
for var in vars:
#if var not in poskeys:
newArray = vtk.vtkFloatArray()
newArray.SetName(var)
newArray.SetNumberOfComponents(1)
oldArray = inputs[0].PointData[var]
array_out = newArray
## copy the values into the output array
## We divide the position by 1000 to get into units of km since
## our 3D data is stored in units of km
if (var in poskeys):
for i in range(len(oldArray)):
array_out.InsertNextValue(oldArray[i] / 1000.)
else:
for i in range(len(oldArray)):
array_out.InsertNextValue(oldArray[i])
pdo.GetPointData().AddArray(newArray)
varcount += 1
## Allocate the number of 'cells' that will be added. We are just
## adding one vtkPolyLine 'cell' to the vtkPolyData object.
pdo.Allocate(nParcels, 1)
xpos = inputs[0].PointData[poskeys[0]].reshape((nParcels, nTimes)) / 1000.
ypos = inputs[0].PointData[poskeys[1]].reshape((nParcels, nTimes)) / 1000.
zpos = inputs[0].PointData[poskeys[2]].reshape((nParcels, nTimes)) / 1000.
## This will store the points for the parcel trajectory
newPts = vtk.vtkPoints()
## Loop over our parcels
for pcl in range(0, nParcels):
## Loop over each time step per parcel
for i in range(0, int(nTimes)):
## Generate the Points along the parcel curve
x = xpos[pcl, i]
y = ypos[pcl, i]
z = zpos[pcl, i]
## Insert the Points into the vtkPoints object
## The first parameter indicates the reference.
## value for the point. The reference value is
## offset so that we keep each parcel
## seperate from each other.
newPts.InsertPoint(i+nTimes*pcl, x, y, z)
## Add the points to the vtkPolyData object
## Right now the points are not associated with a line -
## it is just a set of unconnected points. We need to
## create a 'cell' object that ties points together
## to make a curve (in this case). This is done below.
## A 'cell' is just an object that tells how points are
## connected to make a 1D, 2D, or 3D object.
pdo.SetPoints(newPts)
## Now we loop over our times again, this time
## Relating each parcel trace to a line rather
## than unrelated points.
for pcl in range(nParcels):
## Make a vtkPolyLine which holds the info necessary
## to create a curve composed of line segments. This
## really just hold constructor data that will be passed
## to vtkPolyData to add a new line.
aPolyLine = vtk.vtkPolyLine()
#Indicate the number of points along the line
aPolyLine.GetPointIds().SetNumberOfIds(nTimes)
for i in range(0,int(nTimes)):
## Add the points to the line. The first value indicates
## the order of the point on the line. The second value
## is a reference to a point in a vtkPoints object. Depends
## on the order that Points were added to vtkPoints object.
## Note that this will not be associated with actual points
## until it is added to a vtkPolyData object which holds a
## vtkPoints object.
aPolyLine.GetPointIds().SetId(i, i+nTimes*pcl)
## Add the poly line 'cell' to the vtkPolyData object.
pdo.InsertNextCell(aPolyLine.GetCellType(), aPolyLine.GetPointIds())
## The trajectories are ready to plot! Click 'Apply'.
|
{"hexsha": "7517cdd54c1a70362b92df06022092d4ce272326", "size": 4379, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/pvplugin.py", "max_stars_repo_name": "keltonhalbert/LOFT", "max_stars_repo_head_hexsha": "23a242dd23036a50a932a25ecb85116ce3194177", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-02-29T16:51:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-01T22:14:19.000Z", "max_issues_repo_path": "scripts/pvplugin.py", "max_issues_repo_name": "keltonhalbert/LOFT", "max_issues_repo_head_hexsha": "23a242dd23036a50a932a25ecb85116ce3194177", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-04-14T13:39:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-11T20:59:20.000Z", "max_forks_repo_path": "scripts/pvplugin.py", "max_forks_repo_name": "keltonhalbert/LOFT", "max_forks_repo_head_hexsha": "23a242dd23036a50a932a25ecb85116ce3194177", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-06T03:50:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-06T03:50:40.000Z", "avg_line_length": 35.893442623, "max_line_length": 89, "alphanum_fraction": 0.7179721398, "include": true, "reason": "import numpy", "num_tokens": 1180}
|
# -*- coding: utf-8 -*-
import numpy as np
import torch as th
def is_indexable(data):
if isinstance(data, tuple):
return True
elif isinstance(data, list):
return True
elif isinstance(data, np.ndarray):
return True
elif isinstance(data, th._TensorBase):
return True
else:
return False
def deep_size(indexable, depth=0):
if is_indexable(indexable) and depth < 10:
return [len(indexable)] + deep_size(indexable[0], depth=depth+1)
else:
return []
|
{"hexsha": "ab8b064feaec20c0eceb9698677d4cadb85e4842", "size": 532, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/flare/util/iter.py", "max_stars_repo_name": "mountain/planetarium", "max_stars_repo_head_hexsha": "14c5a75f9ac0be36f28d059c7bf7a77635d617da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-03T18:58:01.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-03T18:58:01.000Z", "max_issues_repo_path": "src/flare/util/iter.py", "max_issues_repo_name": "mountain/planetarium", "max_issues_repo_head_hexsha": "14c5a75f9ac0be36f28d059c7bf7a77635d617da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/flare/util/iter.py", "max_forks_repo_name": "mountain/planetarium", "max_forks_repo_head_hexsha": "14c5a75f9ac0be36f28d059c7bf7a77635d617da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.28, "max_line_length": 72, "alphanum_fraction": 0.6259398496, "include": true, "reason": "import numpy", "num_tokens": 130}
|
"""
This script is organized like so:
+ `if __name__ == "__main__" sets up the Streamlit UI elements
+ `generate_image` houses interactions between UI and the CLIP image
generation models
+ Core model code is abstracted in `logic.py` and imported in `generate_image`
"""
import streamlit as st
from pathlib import Path
import sys
import datetime
import shutil
import json
import os
import base64
sys.path.append("./taming-transformers")
from PIL import Image
from typing import Optional, List
from omegaconf import OmegaConf
import imageio
import numpy as np
from logic import VQGANCLIPRun
def generate_image(
text_input: str = "the first day of the waters",
vqgan_ckpt: str = "vqgan_imagenet_f16_16384",
num_steps: int = 300,
image_x: int = 300,
image_y: int = 300,
init_image: Optional[Image.Image] = None,
image_prompts: List[Image.Image] = [],
continue_prev_run: bool = False,
seed: Optional[int] = None,
mse_weight: float = 0,
mse_weight_decay: float = 0,
mse_weight_decay_steps: int = 0,
tv_loss_weight: float = 1e-3,
) -> None:
### Init -------------------------------------------------------------------
run = VQGANCLIPRun(
text_input=text_input,
vqgan_ckpt=vqgan_ckpt,
num_steps=num_steps,
image_x=image_x,
image_y=image_y,
seed=seed,
init_image=init_image,
image_prompts=image_prompts,
continue_prev_run=continue_prev_run,
mse_weight=mse_weight,
mse_weight_decay=mse_weight_decay,
mse_weight_decay_steps=mse_weight_decay_steps,
tv_loss_weight=tv_loss_weight,
)
### Load model -------------------------------------------------------------
if continue_prev_run is True:
run.load_model(
prev_model=st.session_state["model"],
prev_perceptor=st.session_state["perceptor"],
)
prev_run_id = st.session_state["run_id"]
else:
# Remove the cache first! CUDA out of memory
if "model" in st.session_state:
del st.session_state["model"]
if "perceptor" in st.session_state:
del st.session_state["perceptor"]
st.session_state["model"], st.session_state["perceptor"] = run.load_model()
prev_run_id = None
# Generate random run ID
# Used to link runs linked w/ continue_prev_run
# ref: https://stackoverflow.com/a/42703382/13095028
# Use URL and filesystem safe version since we're using this as a folder name
run_id = st.session_state["run_id"] = base64.urlsafe_b64encode(
os.urandom(6)
).decode("ascii")
run_start_dt = datetime.datetime.now()
### Model init -------------------------------------------------------------
if continue_prev_run is True:
run.model_init(init_image=st.session_state["prev_im"])
elif init_image is not None:
run.model_init(init_image=init_image)
else:
run.model_init()
### Iterate ----------------------------------------------------------------
step_counter = 0
frames = []
try:
# Try block catches st.script_runner.StopExecution, no need of a dedicated stop button
# Reason is st.form is meant to be self-contained either within sidebar, or in main body
# The way the form is implemented in this app splits the form across both regions
# This is intended to prevent the model settings from crowding the main body
# However, touching any button resets the app state, making it impossible to
# implement a stop button that can still dump output
# Thankfully there's a built-in stop button :)
while True:
# While loop to accomodate running predetermined steps or running indefinitely
status_text.text(f"Running step {step_counter}")
_, im = run.iterate()
if num_steps > 0: # skip when num_steps = -1
step_progress_bar.progress((step_counter + 1) / num_steps)
else:
step_progress_bar.progress(100)
# At every step, display and save image
im_display_slot.image(im, caption="Output image", output_format="PNG")
st.session_state["prev_im"] = im
# ref: https://stackoverflow.com/a/33117447/13095028
# im_byte_arr = io.BytesIO()
# im.save(im_byte_arr, format="JPEG")
# frames.append(im_byte_arr.getvalue()) # read()
frames.append(np.asarray(im))
step_counter += 1
if (step_counter == num_steps) and num_steps > 0:
break
# Stitch into video using imageio
writer = imageio.get_writer("temp.mp4", fps=24)
for frame in frames:
writer.append_data(frame)
writer.close()
# Save to output folder if run completed
runoutputdir = outputdir / (
run_start_dt.strftime("%Y%m%dT%H%M%S") + "-" + run_id
)
runoutputdir.mkdir()
# Save final image
im.save(runoutputdir / "output.PNG", format="PNG")
# Save init image
if init_image is not None:
init_image.save(runoutputdir / "init-image.JPEG", format="JPEG")
# Save image prompts
for count, image_prompt in enumerate(image_prompts):
image_prompt.save(
runoutputdir / f"image-prompt-{count}.JPEG", format="JPEG"
)
# Save animation
shutil.copy("temp.mp4", runoutputdir / "anim.mp4")
# Save metadata
with open(runoutputdir / "details.json", "w") as f:
json.dump(
{
"run_id": run_id,
"num_steps": step_counter,
"planned_num_steps": num_steps,
"text_input": text_input,
"init_image": False if init_image is None else True,
"image_prompts": False if len(image_prompts) == 0 else True,
"continue_prev_run": continue_prev_run,
"prev_run_id": prev_run_id,
"seed": run.seed,
"Xdim": image_x,
"ydim": image_y,
"vqgan_ckpt": vqgan_ckpt,
"start_time": run_start_dt.strftime("%Y%m%dT%H%M%S"),
"end_time": datetime.datetime.now().strftime("%Y%m%dT%H%M%S"),
"mse_weight": mse_weight,
"mse_weight_decay": mse_weight_decay,
"mse_weight_decay_steps": mse_weight_decay_steps,
"tv_loss_weight": tv_loss_weight,
},
f,
indent=4,
)
status_text.text("Done!") # End of run
except st.script_runner.StopException as e:
# Dump output to dashboard
print(f"Received Streamlit StopException")
status_text.text("Execution interruped, dumping outputs ...")
writer = imageio.get_writer("temp.mp4", fps=24)
for frame in frames:
writer.append_data(frame)
writer.close()
# TODO: Make the following DRY
# Save to output folder if run completed
runoutputdir = outputdir / (
run_start_dt.strftime("%Y%m%dT%H%M%S") + "-" + run_id
)
runoutputdir.mkdir()
# Save final image
im.save(runoutputdir / "output.PNG", format="PNG")
# Save init image
if init_image is not None:
init_image.save(runoutputdir / "init-image.JPEG", format="JPEG")
# Save image prompts
for count, image_prompt in enumerate(image_prompts):
image_prompt.save(
runoutputdir / f"image-prompt-{count}.JPEG", format="JPEG"
)
# Save animation
shutil.copy("temp.mp4", runoutputdir / "anim.mp4")
# Save metadata
with open(runoutputdir / "details.json", "w") as f:
json.dump(
{
"run_id": run_id,
"num_steps": step_counter,
"planned_num_steps": num_steps,
"text_input": text_input,
"init_image": False if init_image is None else True,
"image_prompts": False if len(image_prompts) == 0 else True,
"continue_prev_run": continue_prev_run,
"prev_run_id": prev_run_id,
"seed": run.seed,
"Xdim": image_x,
"ydim": image_y,
"vqgan_ckpt": vqgan_ckpt,
"start_time": run_start_dt.strftime("%Y%m%dT%H%M%S"),
"end_time": datetime.datetime.now().strftime("%Y%m%dT%H%M%S"),
"mse_weight": mse_weight,
"mse_weight_decay": mse_weight_decay,
"mse_weight_decay_steps": mse_weight_decay_steps,
"tv_loss_weight": tv_loss_weight,
},
f,
indent=4,
)
status_text.text("Done!") # End of run
if __name__ == "__main__":
defaults = OmegaConf.load("defaults.yaml")
outputdir = Path("output")
if not outputdir.exists():
outputdir.mkdir()
st.set_page_config(page_title="VQGAN-CLIP playground")
st.title("VQGAN-CLIP playground")
# Determine what weights are available in `assets/`
weights_dir = Path("assets").resolve()
available_weight_ckpts = list(weights_dir.glob("*.ckpt"))
available_weight_configs = list(weights_dir.glob("*.yaml"))
available_weights = [
i.stem
for i in available_weight_ckpts
if i.stem in [j.stem for j in available_weight_configs]
]
# i.e. no weights found, ask user to download weights
if len(available_weights) == 0:
st.warning("No weights found in `assets/`, refer to `download-weights.sh`")
st.stop()
# Set vqgan_imagenet_f16_1024 as default if possible
if "vqgan_imagenet_f16_1024" in available_weights:
default_weight_index = available_weights.index("vqgan_imagenet_f16_1024")
else:
default_weight_index = 0
# Start of input form
with st.form("form-inputs"):
# Only element not in the sidebar, but in the form
text_input = st.text_input(
"Text prompt",
help="VQGAN-CLIP will generate an image that best fits the prompt",
)
radio = st.sidebar.radio(
"Model weights",
available_weights,
index=default_weight_index,
help="Choose which weights to load, trained on different datasets. Make sure the weights and configs are downloaded to `assets/` as per the README!",
)
num_steps = st.sidebar.number_input(
"Num steps",
value=defaults["num_steps"],
min_value=-1,
max_value=None,
step=1,
help="Specify -1 to run indefinitely. Use Streamlit's stop button in the top right corner to terminate execution. The exception is caught so the most recent output will be dumped to dashboard",
)
image_x = st.sidebar.number_input(
"Xdim", value=defaults["Xdim"], help="Width of output image, in pixels"
)
image_y = st.sidebar.number_input(
"ydim", value=defaults["ydim"], help="Height of output image, in pixels"
)
set_seed = st.sidebar.checkbox(
"Set seed",
value=defaults["set_seed"],
help="Check to set random seed for reproducibility. Will add option to specify seed",
)
seed_widget = st.sidebar.empty()
if set_seed is True:
# Use text_input as number_input relies on JS
# which can't natively handle large numbers
# torch.seed() generates int w/ 19 or 20 chars!
seed_str = seed_widget.text_input(
"Seed", value=str(defaults["seed"]), help="Random seed to use"
)
try:
seed = int(seed_str)
except ValueError as e:
st.error("seed input needs to be int")
else:
seed = None
use_custom_starting_image = st.sidebar.checkbox(
"Use starting image",
value=defaults["use_starting_image"],
help="Check to add a starting image to the network",
)
starting_image_widget = st.sidebar.empty()
if use_custom_starting_image is True:
init_image = starting_image_widget.file_uploader(
"Upload starting image",
type=["png", "jpeg", "jpg"],
accept_multiple_files=False,
help="Starting image for the network, will be resized to fit specified dimensions",
)
# Convert from UploadedFile object to PIL Image
if init_image is not None:
init_image: Image.Image = Image.open(init_image).convert(
"RGB"
) # just to be sure
else:
init_image = None
use_image_prompts = st.sidebar.checkbox(
"Add image prompt(s)",
value=defaults["use_image_prompts"],
help="Check to add image prompt(s), conditions the network similar to the text prompt",
)
image_prompts_widget = st.sidebar.empty()
if use_image_prompts is True:
image_prompts = image_prompts_widget.file_uploader(
"Upload image prompts(s)",
type=["png", "jpeg", "jpg"],
accept_multiple_files=True,
help="Image prompt(s) for the network, will be resized to fit specified dimensions",
)
# Convert from UploadedFile object to PIL Image
if len(image_prompts) != 0:
image_prompts = [Image.open(i).convert("RGB") for i in image_prompts]
else:
image_prompts = []
continue_prev_run = st.sidebar.checkbox(
"Continue previous run",
value=defaults["continue_prev_run"],
help="Use existing image and existing weights for the next run. If yes, ignores 'Use starting image'",
)
use_mse_reg = st.sidebar.checkbox(
"Use MSE regularization",
value=defaults["use_mse_regularization"],
help="Check to add MSE regularization",
)
mse_weight_widget = st.sidebar.empty()
mse_weight_decay_widget = st.sidebar.empty()
mse_weight_decay_steps = st.sidebar.empty()
if use_mse_reg is True:
mse_weight = mse_weight_widget.number_input(
"MSE weight",
value=defaults["mse_weight"],
# min_value=0.0, # leave this out to allow creativity
step=0.05,
help="Set weights for MSE regularization",
)
mse_weight_decay = mse_weight_decay_widget.number_input(
"Decay MSE weight by ...",
value=defaults["mse_weight_decay"],
# min_value=0.0, # leave this out to allow creativity
step=0.05,
help="Subtracts MSE weight by this amount at every step change. MSE weight change stops at zero",
)
mse_weight_decay_steps = mse_weight_decay_steps.number_input(
"... every N steps",
value=defaults["mse_weight_decay_steps"],
min_value=0,
step=1,
help="Number of steps to subtract MSE weight. Leave zero for no weight decay",
)
else:
mse_weight = 0
mse_weight_decay = 0
mse_weight_decay_steps = 0
use_tv_loss = st.sidebar.checkbox(
"Use TV loss regularization",
value=defaults["use_tv_loss_regularization"],
help="Check to add MSE regularization",
)
tv_loss_weight_widget = st.sidebar.empty()
if use_tv_loss is True:
tv_loss_weight = tv_loss_weight_widget.number_input(
"TV loss weight",
value=defaults["tv_loss_weight"],
min_value=0.0,
step=1e-4,
help="Set weights for TV loss regularization, which encourages spatial smoothness. Ref: https://github.com/jcjohnson/neural-style/issues/302",
format="%.1e",
)
else:
tv_loss_weight = 0
submitted = st.form_submit_button("Run!")
# End of form
status_text = st.empty()
status_text.text("Pending input prompt")
step_progress_bar = st.progress(0)
im_display_slot = st.empty()
vid_display_slot = st.empty()
debug_slot = st.empty()
if "prev_im" in st.session_state:
im_display_slot.image(
st.session_state["prev_im"], caption="Output image", output_format="PNG"
)
with st.expander("Expand for README"):
with open("README.md", "r") as f:
# Preprocess links to redirect to github
# Thank you https://discuss.streamlit.io/u/asehmi, works like a charm!
# ref: https://discuss.streamlit.io/t/image-in-markdown/13274/8
markdown_links = [str(i) for i in Path("docs/").glob("*.md")]
images = [str(i) for i in Path("docs/images/").glob("*")]
readme_lines = f.readlines()
readme_buffer = []
for line in readme_lines:
for md_link in markdown_links:
if md_link in line:
line = line.replace(
md_link,
"https://github.com/tnwei/vqgan-clip-app/tree/main/"
+ md_link,
)
readme_buffer.append(line)
for image in images:
if image in line:
st.markdown(" ".join(readme_buffer[:-1]))
st.image(
f"https://raw.githubusercontent.com/tnwei/vqgan-clip-app/main/{image}"
)
readme_buffer.clear()
st.markdown(" ".join(readme_buffer))
with st.expander("Expand for CHANGELOG"):
with open("CHANGELOG.md", "r") as f:
st.markdown(f.read())
if submitted:
# debug_slot.write(st.session_state) # DEBUG
status_text.text("Loading weights ...")
generate_image(
# Inputs
text_input=text_input,
vqgan_ckpt=radio,
num_steps=num_steps,
image_x=int(image_x),
image_y=int(image_y),
seed=int(seed) if set_seed is True else None,
init_image=init_image,
image_prompts=image_prompts,
continue_prev_run=continue_prev_run,
mse_weight=mse_weight,
mse_weight_decay=mse_weight_decay,
mse_weight_decay_steps=mse_weight_decay_steps,
)
vid_display_slot.video("temp.mp4")
# debug_slot.write(st.session_state) # DEBUG
|
{"hexsha": "d5fdac1838719736239d9b0bc5363ffd34a4045e", "size": 19174, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "marklr/vqgan-clip-app", "max_stars_repo_head_hexsha": "23edb7ae6234ab177a91865c02be160151fcf566", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "marklr/vqgan-clip-app", "max_issues_repo_head_hexsha": "23edb7ae6234ab177a91865c02be160151fcf566", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "marklr/vqgan-clip-app", "max_forks_repo_head_hexsha": "23edb7ae6234ab177a91865c02be160151fcf566", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0436507937, "max_line_length": 205, "alphanum_fraction": 0.5693647648, "include": true, "reason": "import numpy", "num_tokens": 4101}
|
import torch
import numpy as np
def squash(tensor):
"""
Squash function, defined in [1]. Works as a non-linearity for CapsNets.
Input tensor will be of format (bs, units, C, H, W) or (bs, units, C)
Norm should be computed on the axis representing the number of units.
Parameters
----------
tensor : torch Variable containing n-dimensional tensor
Returns
-------
(||tensor||^2 / (1+ ||tensor||^2)) * tensor/||tensor||
"""
norm = torch.norm(tensor, p=2, dim=1, keepdim=True)
sq_norm = norm ** 2 # Avoid computing square twice
return tensor.div(norm) * sq_norm / (1 + sq_norm)
def split_indices(num_samples, validation_split):
"""
Helper function to randomly split a list of indices.
Parameters
----------
num_samples : int, Total number of samples in list.
validation_split : float, Fraction of the set to be reserved for validation.
Returns
-------
train_idx, val_idx : lists of integer indices.
"""
split_idx = int(validation_split * num_samples)
indices = np.arange(num_samples)
np.random.shuffle(indices)
train_idx, val_idx = indices[split_idx:], indices[:split_idx]
return train_idx, val_idx
|
{"hexsha": "f36202abd36716cf3e5b3f4142628b339ffbcd01", "size": 1245, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "apsvieira/Capsule-Networks", "max_stars_repo_head_hexsha": "18bb4429bbcec0508f7760a14c312eb9fdcdd117", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "apsvieira/Capsule-Networks", "max_issues_repo_head_hexsha": "18bb4429bbcec0508f7760a14c312eb9fdcdd117", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "apsvieira/Capsule-Networks", "max_forks_repo_head_hexsha": "18bb4429bbcec0508f7760a14c312eb9fdcdd117", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2954545455, "max_line_length": 84, "alphanum_fraction": 0.6385542169, "include": true, "reason": "import numpy", "num_tokens": 296}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 00:39:18 2020
@author: nikbakht
"""
import tensorflow as tf
from tensorflow.keras.layers import Layer
import numpy as np
class Data(Layer):
def __init__(self,Nuser, **kwargs):
super(Data, self).__init__(**kwargs)
self.EX=100
self.EY=100
self.exponent=3.8
self.shadowing_sigma=0;
self.Zuser=0;
self.Zap=1;
self.Nuser_drop=10*Nuser
self.Nap=Nuser
self.Nuser=Nuser
def call(self,batch_num,beta_open_loop=1):
self.batch_num= batch_num
batch_num = batch_num*2
# Xin = tf.zeros([batch_num,2*(self.Nuser+self.Nap)],dtype='float32')
G = tf.zeros([batch_num,self.Nap,self.Nuser],dtype='float32')
power_propotional = tf.zeros([batch_num,self.Nap,self.Nuser],dtype='float32')
x0 = tf.random.uniform([batch_num,self.Nuser_drop,1],0,self.EX)
y0 = tf.random.uniform([batch_num,self.Nuser_drop,1],0,self.EY)
z0 = self.Zuser+tf.zeros([batch_num,self.Nuser_drop,1],dtype='float32')
Xuser = tf.concat([x0,y0,z0],axis=2)
x = tf.random.uniform([batch_num,self.Nap,1],0,self.EX)
y = tf.random.uniform([batch_num,self.Nap,1],0,self.EY)
z = self.Zap+tf.zeros([batch_num,self.Nap,1],dtype='float32')
Xap = tf.concat([x,y,z],axis=2)
d = self.Dist(Xap,Xuser,self.EX,self.EY)
D_assign =self.Assign_AP(d)
g = -46-10*self.exponent*tf.math.log(D_assign)/tf.math.log(10.0)
g= g+self.shadowing_sigma*tf.random.normal([D_assign.shape[0],self.Nap,self.Nuser],0,1)
g_linear=tf.pow(10.0,g/10)
G = g_linear
power_propotional=1/tf.pow(tf.linalg.diag_part(g_linear),beta_open_loop)
# else:
# print('Not enough valid batches created')
return G, power_propotional
def Assign_AP(self,D):
D_assign=tf.zeros([D.shape[0],self.Nap,1],dtype='float32')
d_sort=tf.math.argmin(D,axis=1)
# d_sort =tf.squeeze(d_sort)
# Status=1
# Make sure mask does not have zero value!!!!!!
mask = tf.expand_dims(tf.range(1.0,self.Nuser_drop+1),axis=0)
mask = tf.tile(mask,[D.shape[0],1])
for i in range(self.Nap):
# ind_i=np.argwhere(d_sort==i)
# idnof user assigned to AP+i
#----how many users assigned to AP_i
ind_ap_i =d_sort ==i
# compute valid batch (AP_i has atleast one user assigned)
valid_batch = ind_ap_i
valid_batch = tf.reduce_sum(tf.cast(valid_batch,'float32'),axis=1)
valid_batch = tf.squeeze(tf.where(valid_batch>0))
#-----------Keep valid batch
# ind_i_val = tf.gather(ind_i_val,valid_batch)
ind_ap_i = tf.gather(ind_ap_i,valid_batch,axis=0)
d_sort = tf.gather(d_sort,valid_batch,axis=0)
D = tf.gather(D,valid_batch,axis=0)
D_assign = tf.gather(D_assign,valid_batch,axis=0)
mask = tf.gather(mask,valid_batch,axis=0)
#---------------------------------------------------
mask_i =mask*tf.cast(ind_ap_i,'float32')
mask_i = tf.math.round(tf.nn.softmax(100*mask_i,axis=1))
mask_i = tf.tile(tf.expand_dims(mask_i,axis=1),[1,self.Nap,1])
dist_selected_user = tf.reduce_sum(mask_i*D,axis=2, keepdims=True)
# if tf.reduce_sum(tf.cast(dist_selected_user==0.0,'float32')):
# print('User assign error')
D_assign = tf.concat([D_assign,dist_selected_user],axis= 2)
D_assign = D_assign[0:self.batch_num,:,1:]
return D_assign
def Dist(self,X1,X2,EX,EY):
N1 = X1.shape[1]
N2 = X2.shape[1]
#----------The pair distances
xvec1 = tf.expand_dims(X1[:,:,0],axis=2)
xvec2 = tf.expand_dims(X2[:,:,0],axis=2)
xmat1 = tf.tile(xvec1,[1,1,N2])
xmat2 = tf.tile(tf.transpose(xvec2,perm=[0,2,1]),[1,N1,1])
xdiff = xmat1-xmat2
xdist2 = tf.pow(tf.math.minimum(tf.math.abs(xdiff),EX-tf.math.abs(xdiff)),2)
yvec1 = tf.expand_dims(X1[:,:,1],axis=2)
yvec2 = tf.expand_dims(X2[:,:,1],axis=2)
ymat1 = tf.tile(yvec1,[1,1,N2])
ymat2 = tf.tile(tf.transpose(yvec2,perm=[0,2,1]),[1,N1,1])
ydiff = ymat1-ymat2
ydist2 = tf.pow(tf.minimum(tf.math.abs(ydiff),EY-tf.math.abs(ydiff)),2)
zvec1 = tf.expand_dims(X1[:,:,2],axis=2)
zvec2 = tf.expand_dims(X2[:,:,2],axis=2)
zmat1 = tf.tile(zvec1,[1,1,N2]);
zmat2 = tf.tile(tf.transpose(zvec2,perm=[0,2,1]),[1,N1,1]);
zdiff = zmat1-zmat2
zdist2=tf.pow(zdiff,2)
D=tf.math.sqrt(xdist2+ydist2+zdist2)
return D
|
{"hexsha": "65c875d98cbc12f75d6a7b98658d78d7f27c454a", "size": 4818, "ext": "py", "lang": "Python", "max_stars_repo_path": "Cellular/Uplink/lib/Data0.py", "max_stars_repo_name": "FerdinandHannequart/Nikbakht", "max_stars_repo_head_hexsha": "85e1b3ec400338de0dea6ad37ce773024d4cd571", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-16T11:02:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T02:50:06.000Z", "max_issues_repo_path": "Cellular/Uplink/lib/Data0.py", "max_issues_repo_name": "FerdinandHannequart/Nikbakht", "max_issues_repo_head_hexsha": "85e1b3ec400338de0dea6ad37ce773024d4cd571", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Cellular/Uplink/lib/Data0.py", "max_forks_repo_name": "FerdinandHannequart/Nikbakht", "max_forks_repo_head_hexsha": "85e1b3ec400338de0dea6ad37ce773024d4cd571", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1794871795, "max_line_length": 95, "alphanum_fraction": 0.5776255708, "include": true, "reason": "import numpy", "num_tokens": 1406}
|
from pytest import raises
from numpy import arange, prod, array, full
from hypothesis import given, example
from hypothesis.strategies import integers, one_of
from ..ndindex import ndindex
from ..tuple import Tuple
from ..integer import Integer
from .helpers import ndindices, check_same, short_shapes
@example(..., 0)
@example((True,), ())
@example(([[True, False], [True, False]], [True, True], slice(0, 2)), ((2, 2, 2, 3, 3)))
@example((array([], dtype=bool),), (0, 0))
@example((False, False), ())
@example(array([], dtype=bool), 0)
@example((array([], dtype=bool),), 0)
@example(array([[[True], [False]]]), (1, 1, 2))
@example(full((1, 9), False), (3, 3))
@example(([0, 1], 0), (2, 2))
@example(([0, 0, 0], [0, 0]), (2, 2))
@example((0, None, 0, ..., 0, None, 0), (2, 2, 2, 2, 2, 2, 2))
@example((0, slice(None), ..., slice(None), 3), (2, 3, 4, 5, 6, 7))
@given(ndindices, one_of(short_shapes, integers(0, 10)))
def test_newshape_hypothesis(idx, shape):
if isinstance(shape, int):
a = arange(shape)
else:
a = arange(prod(shape)).reshape(shape)
try:
index = ndindex(idx)
except IndexError:
pass
else:
# Make sure ndindex input gives an error
raises(TypeError, lambda: index.newshape(Tuple(2, 1)))
raises(TypeError, lambda: index.newshape(Integer(2)))
def raw_func(a, idx):
return a[idx].shape
def ndindex_func(a, index):
return index.newshape(shape)
def assert_equal(raw_shape, newshape):
assert raw_shape == newshape
check_same(a, idx, raw_func=raw_func, ndindex_func=ndindex_func,
assert_equal=assert_equal, same_exception=False)
|
{"hexsha": "bd6022a64c659c112d5499ba77092d7ca3811c36", "size": 1676, "ext": "py", "lang": "Python", "max_stars_repo_path": "ndindex/tests/test_newshape.py", "max_stars_repo_name": "Quansight/ndindex", "max_stars_repo_head_hexsha": "5957c70d1ab5fab66f7c87aba8030a45f858085c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 67, "max_stars_repo_stars_event_min_datetime": "2020-03-10T12:37:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-02T00:25:36.000Z", "max_issues_repo_path": "ndindex/tests/test_newshape.py", "max_issues_repo_name": "Quansight/ndindex", "max_issues_repo_head_hexsha": "5957c70d1ab5fab66f7c87aba8030a45f858085c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 78, "max_issues_repo_issues_event_min_datetime": "2020-03-25T22:08:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-16T21:39:14.000Z", "max_forks_repo_path": "ndindex/tests/test_newshape.py", "max_forks_repo_name": "Quansight/ndindex", "max_forks_repo_head_hexsha": "5957c70d1ab5fab66f7c87aba8030a45f858085c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-04-09T20:01:05.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-11T17:18:36.000Z", "avg_line_length": 31.6226415094, "max_line_length": 88, "alphanum_fraction": 0.6270883055, "include": true, "reason": "from numpy", "num_tokens": 500}
|
# Use baremodule to shave off a few KB from the serialized `.ji` file
baremodule Cubature_jll
using Base
using Base: UUID
import JLLWrappers
JLLWrappers.@generate_main_file_header("Cubature")
JLLWrappers.@generate_main_file("Cubature", UUID("7bc98958-0e37-5d67-a6ac-a3a19030071a"))
end # module Cubature_jll
|
{"hexsha": "14f9e0436600d5f688230d28dc637903a42b685f", "size": 310, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Cubature_jll.jl", "max_stars_repo_name": "JuliaBinaryWrappers/Cubature_jll.jl", "max_stars_repo_head_hexsha": "b8ebd00e185c66d6f20613dbb30ad469f8d144e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Cubature_jll.jl", "max_issues_repo_name": "JuliaBinaryWrappers/Cubature_jll.jl", "max_issues_repo_head_hexsha": "b8ebd00e185c66d6f20613dbb30ad469f8d144e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Cubature_jll.jl", "max_forks_repo_name": "JuliaBinaryWrappers/Cubature_jll.jl", "max_forks_repo_head_hexsha": "b8ebd00e185c66d6f20613dbb30ad469f8d144e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0, "max_line_length": 89, "alphanum_fraction": 0.8096774194, "num_tokens": 100}
|
[STATEMENT]
lemma has_derivative_power[simp, derivative_intros]:
fixes f :: "'a :: real_normed_vector \<Rightarrow> 'b :: real_normed_field"
assumes f: "(f has_derivative f') (at x within S)"
shows "((\<lambda>x. f x^n) has_derivative (\<lambda>y. of_nat n * f' y * f x^(n - 1))) (at x within S)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. f x ^ n) has_derivative (\<lambda>y. of_nat n * f' y * f x ^ (n - 1))) (at x within S)
[PROOF STEP]
using has_derivative_prod[OF f, of "{..< n}"]
[PROOF STATE]
proof (prove)
using this:
((\<lambda>x. \<Prod>i<n. f x) has_derivative (\<lambda>y. \<Sum>i<n. f' y * (\<Prod>j\<in>{..<n} - {i}. f x))) (at x within S)
goal (1 subgoal):
1. ((\<lambda>x. f x ^ n) has_derivative (\<lambda>y. of_nat n * f' y * f x ^ (n - 1))) (at x within S)
[PROOF STEP]
by (simp add: prod_constant ac_simps)
|
{"llama_tokens": 376, "file": null, "length": 2}
|
import os.path as osp
import Image
from scipy.misc import fromimage
import numpy as np
from ImageProcessing import thresholdNDArray
from DefinitionsAndUtils import *
from GraphAndHistogramUtilities import countQuantiles
from CurrentLM import applyCurrentLM, iles, ileNames
def applyPredThresh(pixels):
# zero removal for quantile computation
nonzp = pixels[np.any(pixels, 1)]
counts = {c:np.bincount(nonzp[:,c], minlength=256) for c in colors}
qs = {c :
countQuantiles(counts[c], iles) for c in colors}
# clunky:
qDict = {"R8D": qs[R][0], "R9D": qs[R][1],
"G8D": qs[G][0], "G9D": qs[G][1],
"B8D": qs[B][0], "B9D": qs[B][1]}
predictedThreshes = \
dict((c, applyCurrentLM(qDict, c)) for c in colorNames)
# print ",".join("%5.4f" % t for t in predictedThresholds.values())
thresholdNDArray(pixels, predictedThreshes, dropSaturated=True)
origFilename = osp.join(imageDataPath,
"Endosomes/10minend/ser3/10m60xendser31.TIF")
expThreshes = {"R":17, "G":41, "B":34}
currentImage = Image.open(origFilename)
# get base arrays
asArray = {}
asArray['exp'] = fromimage(currentImage).reshape((numImagePoints,3))
asArray['pred'] = asArray['exp'].copy()
# apply thresholds
thresholdNDArray(asArray['exp'], expThreshes, dropSaturated=True)
applyPredThresh(asArray['pred'])
# reconstruct images and write out
outputPath = "/home/mfenner/scipy_prep/final-images"
for name, arr in asArray.items():
# outImage = Image.merge(currentImage.mode, (arr[R], arr[G], arr[B]))
outImage = Image.fromarray(arr.astype(np.uint8).reshape((1024,1024,3)))
outImage.save(osp.join(outputPath,
"10minEndosomeSeries3Slice1-"+name+".tif"))
|
{"hexsha": "1e06435b59cdbeef586efd393b88f7746919e2c7", "size": 1782, "ext": "py", "lang": "Python", "max_stars_repo_path": "thresholdAnImage.py", "max_stars_repo_name": "mfenner1/py_coloc_utils", "max_stars_repo_head_hexsha": "1d98c8e9934928ced9d92f8dcef64471aa4b9dbc", "max_stars_repo_licenses": ["Unlicense", "BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2016-02-13T05:41:25.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-08T16:48:42.000Z", "max_issues_repo_path": "thresholdAnImage.py", "max_issues_repo_name": "mfenner1/py_coloc_utils", "max_issues_repo_head_hexsha": "1d98c8e9934928ced9d92f8dcef64471aa4b9dbc", "max_issues_repo_licenses": ["Unlicense", "BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-26T22:55:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-26T22:55:30.000Z", "max_forks_repo_path": "thresholdAnImage.py", "max_forks_repo_name": "mfenner1/py_coloc_utils", "max_forks_repo_head_hexsha": "1d98c8e9934928ced9d92f8dcef64471aa4b9dbc", "max_forks_repo_licenses": ["Unlicense", "BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-08-07T04:13:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-15T18:36:30.000Z", "avg_line_length": 33.6226415094, "max_line_length": 77, "alphanum_fraction": 0.6655443322, "include": true, "reason": "import numpy,from scipy", "num_tokens": 509}
|
# -*- coding: utf-8 -*-
from scipy.constants import Avogadro
from pymatgen.core.structure import Structure as Structure_PMG
# from pymatgen.analysis.prototypes import AflowPrototypeMatcher
from simmate.database.base_data_types import (
DatabaseTable,
table_column,
Spacegroup,
)
# TODO:
# Explore polymorphic relations instead of a JSON dictionary.
# Making relationships to different tables makes things difficult to use, so
# these columns are just standalone.
#
# This is will be very important for "source" and "parent_nested_calculations"
# fields because I have no way to efficiently convert these fields to the objects
# that they refer to. There's also no good way to access a structure's "children"
# (i.e. structure where they are the source).
#
# I should investigate generic relations in django though:
# https://docs.djangoproject.com/en/3.2/ref/contrib/contenttypes/#generic-relations
#
# Another option is using django-polymorphic.
# https://django-polymorphic.readthedocs.io/en/latest/
# This thread is really helpful on the subject:
# https://stackoverflow.com/questions/30343212/
# TODO:
# Consider adding some methods to track the history of a structure. This
# would be useful for things like evolutionary algorithms.
# get_source_parent:
# this would iterate through sources until we find one in the same table
# as this one. Parent sources are often the most recent transformation
# or mutation applied to a structure, such as a MirrorMutation.
# get_source_seed:
# this would iterate through sources until we hit a dead-end. So the seed
# source would be something like a third-party database, a method that
# randomly create structures, or a prototype.
# Both of these get more complex when we consider transformation that have
# multiple parents (and therefore multiple seeds too). An example of this
# is the HereditaryMutation.
class Structure(DatabaseTable):
"""Base Info"""
# The structure which is written to a string and in a compressed format
# using the .from_pymatgen() method. To get back to our pymatgen structure
# object, use the .to_pymatgen() method!
structure_string = table_column.TextField()
# EXPERIMENTAL
# Where the structure came from. This could be a number of things, including
# a third party id, a transformation of another structure, a creation method,
# or just a custom submission by the user.
#
# Source can be the name of another table or a python transformation.
# Source id can be thought of as the "parent structure id", which can be a
# string (mp-123), an integer (123 of same table), a list of these ([123,321]),
# or even be nothing. We make it a JSON field to account for all scenarios.
# EXAMPLES: (source --> source_id)
# MaterialsProject --> mp-123
# PyXtalStructure --> null
# AtomicPurmutation --> 123
# HereditaryMutation --> [123,124]
# user_submission --> null
source = table_column.JSONField(blank=True, null=True)
# EXPERIMENTAL
# Where this calculation plays a role within a "nested" workflow calculation.
# Becuase this structure can be reused by multiple workflows, we make this
# a list of source-like objects. For example, a relaxation could be part of
# a series of relaxations (like in StagedRelaxation) or it can be an initial
# step of a BandStructure calculation.
# parent_nested_calculations = table_column.JSONField(blank=True, null=True)
""" Query-helper Info """
# total number of sites in the unitcell
nsites = table_column.IntegerField()
# total number of unique elements
nelements = table_column.IntegerField()
# List of elements in the structure (ex: ["Y", "C", "F"])
elements = table_column.JSONField()
# the base chemical system (ex: "Y-C-F")
chemical_system = table_column.CharField(max_length=25)
# Note: be careful when searching for elements! Running chemical_system__includes="C"
# on this field won't do what you expect -- because it will return structures
# containing Ca, Cs, Ce, Cl, and so on. If you want to search for structures
# that contain a specific element, use elements__contains="C" instead.
# Density of the crystal (g/cm^3)
density = table_column.FloatField()
# Density of atoms in the crystal (atoms/Angstom^3)
density_atomic = table_column.FloatField()
# Volume of the unitcell.
# Note: in most cases, volume_molar should be used instead!
volume = table_column.FloatField()
# Molar volume of the crystal (cm^3/mol)
# Note we prefer this over a "volume" field because volume is highly dependent
# on the symmetry and the arbitray unitcell. If you are truly after small volumes
# of the unitcell, it is likely you really just want to search by spacegroup.
volume_molar = table_column.FloatField()
# The composition of the structure formatted in various ways
# BUG: The max length here is overkill because there are many structures
# with 8+ elements and disordered formula (e.g. "Ca2.103 N0.98")
formula_full = table_column.CharField(max_length=50) # more
formula_reduced = table_column.CharField(max_length=50)
formula_anonymous = table_column.CharField(max_length=50)
# NOTE: extra fields for the Lattice and Sites are intentionally left out
# in order to save on overall database size. Things such as...
# Lattice: matrix and then... a, b, c, alpha, beta, gamma, volume
# Sites: abc, xyz, properties, species/element, occupancy
# shouldn't be queried directly. If you'd like to sort structures by these
# criteria, you can still do this in python and pandas! Just not at the
# SQL level
""" Relationships """
# For the majority of Structures, you'll want to have a "source" relation that
# indicates where the structure came from. I don't include this is the abstract
# model but there are many ways to define it. For example it may relate to another
# Structure table or even a Calculation. In another case, the entire Structure
# table may have the same exact source, in which case you'd make a property!
# Each structure can have many Calculation(s)
# symmetry info
spacegroup = table_column.ForeignKey(Spacegroup, on_delete=table_column.PROTECT)
# The AFLOW prototype that this structure maps to.
# TODO: this will be a relationship in the future
# prototype = table_column.CharField(max_length=50, blank=True, null=True)
""" Properties """
# none
""" Model Methods """
@classmethod
def from_pymatgen(cls, structure, as_dict=False, **kwargs):
# --------------------------------------
# FIND A BETTER SPOT FOR THIS CODE. See _from_dynamic method below for more.
structure = cls._from_dynamic(structure)
# --------------------------------------
# OPTIMIZE: I currently store files as poscar strings for ordered structures
# and as CIFs for disordered structures. Both of this include excess information
# that slightly inflates file size, so I will be making a new string format in
# the future. This will largely be based off the POSCAR format, but will
# account for disordered structures and all limit repeated data (such as the
# header line, "direct", listing each element/composition, etc.).
storage_format = "POSCAR" if structure.is_ordered else "CIF"
# OPTIMIZE
# This attempts to match the structure to an AFLOW prototype and it is
# by far the slowest step of loading structures to the database. Try
# to optimize this in the future.
# prototype = AflowPrototypeMatcher().get_prototypes(structure)
# prototype_name = prototype[0]["tags"]["mineral"] if prototype else None
# Given a pymatgen structure object, this will return a database structure
# object, but will NOT save it to the database yet. The kwargs input
# is only if you inherit from this class and add extra fields.
structure_dict = dict(
structure_string=structure.to(fmt=storage_format),
nsites=structure.num_sites,
nelements=len(structure.composition),
elements=[str(e) for e in structure.composition.elements],
chemical_system=structure.composition.chemical_system,
density=structure.density,
density_atomic=structure.num_sites / structure.volume,
volume=structure.volume,
# 1e-27 is to convert from cubic angstroms to Liter and then 1e3 to
# mL. Therefore this value is in mL/mol
# OPTIMIZE: move this to a class method
volume_molar=(structure.volume / structure.num_sites)
* Avogadro
* 1e-27
* 1e3,
spacegroup_id=structure.get_space_group_info(0.1)[1], # OPTIMIZE
formula_full=structure.composition.formula,
formula_reduced=structure.composition.reduced_formula,
formula_anonymous=structure.composition.anonymized_formula,
# prototype=prototype_name,
**kwargs, # this allows subclasses to add fields with ease
)
# If as_dict is false, we build this into an Object. Otherwise, just
# return the dictionary
return structure_dict if as_dict else cls(**structure_dict)
def to_pymatgen(self):
# Converts the database object to pymatgen structure object
# NOTE: if you know this is what you're going to do from a query, then
# it is more efficient to only grab the structure_string column because
# that's all you need! You'd do that like this:
# structure_db = Structure.objects.only("structure_string").get(id="example-id")
# This grabs the proper Structure entry and only the structure column.
# convert the stored string to python dictionary.
# OPTIMIZE: see my comment on storing strings in the from_pymatgen method above.
# For now, I need to figure out if I used "CIF" or "POSCAR" and read the structure
# accordingly. In the future, I can just assume my new format.
# If the string starts with "#", then I know that I stored it as a "CIF".
storage_format = "CIF" if (self.structure_string[0] == "#") else "POSCAR"
# convert the string to pymatgen Structure object
structure = Structure_PMG.from_str(
self.structure_string,
fmt=storage_format,
)
return structure
@staticmethod
def _from_dynamic(structure):
# FIND A BETTER SPOT FOR THIS CODE (likely attached to base Structure class)
# For an almost indentical implementatin see...
# from simmate.workflows.common_tasks.all import load_input
# I should comibine/condense these.
# I allow the structure input to be a number of inputs
# (see workflows.common_tasks.load_input and workflow_engine.workflow for why)
# I therefore convert to pymatgen structure object here first.
# if the input is already a pymatgen structure, just return it back
if isinstance(structure, Structure_PMG):
return structure
# otherwise we have a dictionary object
# if the "@module" key is in the dictionary, then we have a pymatgen structure
# dict which we convert to a pymatgen object and return
if "@module" in structure.keys():
return Structure.from_dict(structure)
# otherwise we now know we have a dictionary pointing to the simmate database
from simmate.website.local_calculations import models as all_datatables
from django.utils.module_loading import import_string
# first start by loading the datbase table, which is given as a module path
datatable_str = structure["calculation_table"]
# Import the datatable class -- how this is done depends on if it is from
# a simmate supplied class or if the user supplied a full path to the class
# OPTIMIZE: is there a better way to do this?
if hasattr(all_datatables, datatable_str):
datatable = getattr(all_datatables, datatable_str)
else:
datatable = import_string(datatable_str)
# These attributes tells us which structure to grab from our datatable. The
# user should have only provided one -- if they gave more, we just use whichever
# one comes first.
prefect_flow_run_id = structure.get("prefect_flow_run_id")
calculation_id = structure.get("calculation_id")
directory_old = structure.get("directory")
# we must have either a prefect_flow_run_id or calculation_id
if not prefect_flow_run_id and not calculation_id and not directory_old:
raise Exception(
"You must have either a prefect_flow_run_id, calculation_id, or directory"
" provided if you want to load a structure from a previous calculation."
)
# now query the datable with which whichever was provided. Each of these
# are unique so all three should return a single calculation.
if calculation_id:
calculation = datatable.objects.get(id=calculation_id)
elif prefect_flow_run_id:
calculation = datatable.objects.get(prefect_flow_run_id=prefect_flow_run_id)
elif directory_old:
calculation = datatable.objects.get(directory=directory_old)
# In some cases, the structure we want is not within the calculation table.
# For example, in relaxations the final structure is attached via table.structure_final
structure_field = structure.get("structure_field")
if structure_field:
structure = getattr(calculation, structure_field).to_pymatgen()
# if there's no structure field, that means we already have the correct entry
else:
structure = calculation.to_pymatgen()
# structure should now be a pymatgen structure object
return structure
""" For website compatibility """
class Meta:
abstract = True
# Any time you inherit from this class, you'll need to indicate which
# django app it is associated with. For example...
# app_label = "third_parties"
|
{"hexsha": "0da110f43f4e8c419cf5f32ced055a04a438d10c", "size": 14381, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/simmate/database/base_data_types/structure.py", "max_stars_repo_name": "sionab/simmate", "max_stars_repo_head_hexsha": "6dedea7310829aae425bf3393e7923e454a0129f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/simmate/database/base_data_types/structure.py", "max_issues_repo_name": "sionab/simmate", "max_issues_repo_head_hexsha": "6dedea7310829aae425bf3393e7923e454a0129f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/simmate/database/base_data_types/structure.py", "max_forks_repo_name": "sionab/simmate", "max_forks_repo_head_hexsha": "6dedea7310829aae425bf3393e7923e454a0129f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.6915584416, "max_line_length": 95, "alphanum_fraction": 0.6929281691, "include": true, "reason": "from scipy", "num_tokens": 3156}
|
import numpy as np
import heapq
import os
import time
import random
import csv
import scipy as sp
import scipy.stats
# Global Variables for easier use in the simulation.
# ----------------------------------- Parameters -----------------------------------
pm = 0 # Number of parallel simulations
k = 0 # Number of patient types
l_arr = [] # Array containig Arrival rate for each simulation
l_aban = [] # Array containing abandonment rate for each simulation
w_mu = [] # Array containing mean service time
w_std = [] # Array containing STD of service time
h_cost = [] # Array containing holding cost of each ward queue
n_free = [] # Array containing number of nurses free
preempt = [] # Array determining whether or not simulation is running preemption
# ----------------------------------- Simulation Variables -----------------------------------
t = [] # Array to hold current time for each simulation
r_time_arr = [] # Array holding next time of rebalance for each simulation
queue = [] # Array containing the patients in the queues of each ward for each simulation
q_capac = [] # Array containing the capacity for each queue
Events = [] # List containing Events where patients leave the ward
A_Events = [] # List containing Events of arrival for each patient type
a_queue_count = [] # Counter for number of preloaded arrivals
queue_length = [] # tracks queue length for each ward
ward_alloc = [] # tracks current allocation of nurses in each ward
ward_capac = [] # tracks current capacity available for each ward
ward_nurse_def = [] # tracks number of nurses ward still needs to receive after rebalance
ward_assignment = [] # tracks wards that still need to receive nurses after rebalance
last_arrival = [] # tracks last patient that arrived in a ward
# ---------------------- Counters for the simulation ----------------------
Arrival_Count = [] # Counts total number of arrivals
Abandonment_Count = [] # Counts total number of abandoments
Balk_Count = [] # Counter for patients who balk
Treated = [] # Counter for number of patients treated
holding_cost = [] # Counter for holding cost
time_server_occupied = [] # Counter for time servers are occupied
weighted_queue = [] # Counter to calculate average queue length
weighted_ward = [] # Counter to calculate average headcount in ward
Counter_Record = []
class Patient:
def __init__(self, t, t_arr, pt, location, aban, serv):
self.t = t
self.location = location
self.ward = 0
self.t_arr = t_arr
self.pt = pt
self.aban = aban
self.serv = serv
def __cmp__(self, other):
return cmp(self.t, other.t)
def get_time(self):
return self.t
def set_time(self, t):
self.t = t
def get_location(self):
return self.location
def set_location(self, location):
self.location = location
def get_pt(self):
return self.pt
def set_pt(self, pt):
self.pt = pt
def get_aban(self):
return self.aban
def get_serv(self):
return self.serv
def set_serv(self, serv1):
self.serv = serv1
def mean_confidence_interval(data, confidence=0.95):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
def vs_exp(l, period, a, t, vary):
if vary:
original_t = t
lv = 2*a*float(l)
U = 1.1
while(U > lv/(2*a*float(l))):
U = np.random.uniform()
t = t + np.random.exponential(1/float(l*a*2))
lv = l*a + a*l*np.sin(2*np.pi*t/period)
return (t - original_t)
else:
return np.random.exponential(1/l)
# Used to reset simulation parameters for new trial
def resetvar():
global pm, k, l_arr, l_aban, w_mu, w_std, h_cost, n_free, preempt, t, r_time_arr, queue, q_capac, Events
global a_queue_count, queue_length, ward_alloc, ward_capac, ward_nurse_def, ward_assignment, Arrival_Count
global Balk_Count, Treated, holding_cost, time_server_occupied, Abandonment_Count, A_Events, weighted_ward, weighted_queue
global Counter_Record
new_row = []
new_row.append(Arrival_Count)
new_row.append(holding_cost)
new_row.append(time_server_occupied)
new_row.append(weighted_queue)
new_row.append(weighted_ward)
new_row.append(t)
Counter_Record.append(new_row)
# ----------------------------------- Parameters -----------------------------------
pm = 0 # Number of parallel simulations
k = 0 # Number of patient types
l_arr = [] # Array containig Arrival rate for each simulation
l_aban = [] # Array containing abandonment rate for each simulation
w_mu = [] # Array containing mean service time
w_std = [] # Array containing STD of service time
h_cost = [] # Array containing holding cost of each ward queue
n_free = [] # Array containing number of nurses free
preempt = [] # Array determining whether or not simulation is running preemption
# ----------------------------------- Simulation Variables -----------------------------------
t = [] # Array to hold current time for each simulation
r_time_arr = [] # Array holding next time of rebalance for each simulation
queue = [] # Array containing the patients in the queues of each ward for each simulation
q_capac = [] # Array containing the capacity for each queue
Events = [] # List containing Events where patients leave the ward
A_Events = [] # List containing Events of arrival for each patient type
a_queue_count = [] # Counter for number of preloaded arrivals
queue_length = [] # tracks queue length for each ward
ward_alloc = [] # tracks current allocation of nurses in each ward
ward_capac = [] # tracks current capacity available for each ward
ward_nurse_def = [] # tracks number of nurses ward still needs to receive after rebalance
ward_assignment = [] # tracks wards that still need to receive nurses after rebalance
# ---------------------- Counters for the simulation ----------------------
Arrival_Count = [] # Counts total number of arrivals
Abandonment_Count = [] # Counts total number of abandoments
Balk_Count = [] # Counter for patients who balk
Treated = [] # Counter for number of patients treated
holding_cost = [] # Counter for holding cost
time_server_occupied = [] # Counter for time servers are occupied
weighted_queue = [] # Counter to calculate average queue length
weighted_ward = [] # Counter to calculate average headcount in ward
def rebalance(N, sim):
global queue_length
global ward_capac
global ward_alloc
global ward_assignment
global ward_nurse_def
global Events
global A_Events
global n_free
global pm
global k
global t
global preempt
# print queue_length[sim]
# print ward_capac[sim]
# print n_free
# Set variable to hold old allocation
old_alloc = [0]*k
# -------------------- START: Procedure for setting new allocation --------------------
total = max(sum(queue_length[sim]), k)
s = 0
for i in range(k-1):
old_alloc[i] = ward_alloc[sim][i]
ward_alloc[sim][i] = queue_length[sim][i]*(N-k)/total + 1
ward_capac[sim][i] = ward_capac[sim][i] + ward_alloc[sim][i] - old_alloc[i]
s += queue_length[sim][i]*(N-k)/total
# -------------------- END: Procedure for setting new allocation --------------------
# Setting new allocation and calculating new capacities
old_alloc[k-1] = ward_alloc[sim][k-1]
ward_alloc[sim][k-1] = (N - k) - s + 1
ward_capac[sim][k-1] = ward_capac[sim][k-1] + ward_alloc[sim][k-1] - old_alloc[k-1]
# Procedure to output information to check for consistency
# print 'phase i'
# print ward_capac[sim]
# print ward_alloc[sim]
# print n_free[sim]
# Preemption procedure
requeue = []
if preempt[sim] == 1:
for i in range(k):
requeue.append([])
# Changes the ward patients back to queue patients after readjusting service time
# to remaining service time.
for pat in Events[sim]:
remaining_service = pat.get_time() - t[sim]
pat.set_serv(remaining_service)
pat.set_location('abandonment')
requeue[pat.get_pt()].append(pat)
# Reset the wards so they are empty and nurses are all freed
Events[sim] = []
ward_capac[sim] = [x for x in ward_alloc[sim]]
n_free[sim] = N
# Procecure pushes all patients in wards back to their respective queues
# We then redistribute the patients later in the next block of code
for i in range(k):
incoming_length = len(requeue[i])
queue[sim][i] = requeue[i] + queue[sim][i]
queue_length[sim][i] += incoming_length
# For each type of class of patients we will check to see if we can refill the wards
for i in range(k):
# If there are nurses free and the ward has capacity and has a queue, we assign patients
while(n_free[sim] > 0 and ward_capac[sim][i] > 0 and queue_length[sim][i] > 0):
# Get next patient in the queue
next_patient = queue[sim][i].pop(0)
queue_length[sim][i] -= 1
# Change patient type to ward patient from abandoner
next_patient.set_time(t[sim] + next_patient.get_serv())
next_patient.set_location('ward')
# Add patient to Events list
heapq.heappush(Events[sim], next_patient)
# Update counters
n_free[sim] -= 1
ward_capac[sim][i] -= 1
# Determines deficits in wards by checking for excess capacity when no nurses are free
for i in range(k):
if n_free[sim] == 0 and ward_capac[sim][i] > 0:
ward_nurse_def[sim][i] = -ward_capac[sim][i]
ward_assignment[sim].append(i)
# Procedure to output information to check for consistency
# print 'phase ii'
# print ward_capac[sim]
# print ward_alloc[sim]
# print n_free[sim]
# --------------------------------- Arrival Event for continuous policy ---------------------------------
def arrival_event_cont(event, sim):
global Arrival_Count
global Balk_Count
global ward_capac
global w_mu
global w_std
global t
global l_aban
global Events
global A_Events
global l_arr
global queue
global n_free
global q_capac
global a_queue_count
global pm
global queue_length
global preempt
global last_arrival
global h_cost
Arrival_Count[sim] += 1
pt = event.get_pt()
if n_free[sim] > 0:
# Get service and abandonment times for the patient
new_serv_time = event.get_serv()
new_aban = event.get_aban()
# Create new patient with updated event time for ward
new_ward_arr = Patient(new_serv_time + t[sim], t[sim], pt, 'ward',
new_aban, new_serv_time)
# Push new patient to Event
heapq.heappush(Events[sim], new_ward_arr)
# Adjust the number of nurses free
ward_capac[sim][pt] = ward_capac[sim][pt] - 1
n_free[sim] = n_free[sim] - 1
# Set last ward arrival
# last_arrival[sim][pt] = new_ward_arr
else:
min_cost = h_cost[pt]
min_pt = pt
if preempt[sim] == 1:
for h in range(k):
if h_cost[h] < min_cost and (ward_alloc[sim][h]-ward_capac[sim][h]) > 0:
min_cost = h_cost[h]
min_pt = h
if h_cost[pt] > min_cost:
for pat_ind, pat_var in reversed(list(enumerate(Events[sim]))):
if pat_var.get_pt() == min_pt:
pat1 = pat_var
del Events[sim][pat_ind]
heapq.heapify(Events[sim])
# print n_free[sim]
break
# Remove last patient that arrived from the ward with lowest cost
# pat1 = last_arrival[sim][min_pt]
pat1.set_serv(pat1.get_time()-t[sim])
pat1.set_location('abandonment')
queue[sim][min_pt].insert(0,pat1)
queue_length[sim][min_pt] += 1
heapq.heapify(Events[sim])
# Get service and abandonment times for the patient
new_serv_time = event.get_serv()
new_aban = event.get_aban()
# Create new patient with updated event time for ward
new_ward_arr = Patient(new_serv_time + t[sim], t[sim], pt, 'ward',
new_aban, new_serv_time)
# Push new patient to Event
heapq.heappush(Events[sim], new_ward_arr)
# Adjust the number of nurses free
ward_capac[sim][min_pt] = ward_capac[sim][min_pt] + 1
ward_capac[sim][pt] = ward_capac[sim][pt] - 1
# Set last ward arrival
# last_arrival[sim][min_pt] = new_ward_arr
elif len(queue[sim][pt]) < q_capac[pt]:
# Get service and abandonment times for the patient
new_arrival_time = event.get_serv()
new_aban = event.get_aban()
# Create new patient with updated event time for queue
new_queue_arr = Patient(new_aban + t[sim], t[sim], pt, 'abandonment',
new_aban, new_arrival_time)
# Push new patient to Event
queue[sim][pt].append(new_queue_arr)
queue_length[sim][pt] += 1
#Case where queue is full and patient leaves system
else:
Balk_Count[sim] += 1
# Remove Patient Arrival Event and reduce count
if t[sim] == 0:
heapq.heappop(Events[sim])
else:
heapq.heappop(A_Events[sim])
a_queue_count[sim][pt] -= 1
#Create new arrival and replace old arrival
if a_queue_count[sim][pt] == 0 and t[sim] != 0:
# Generate new arrival time for patient and create new patient
new_arrival_time = vs_exp(1/l_arr[pt], 1, 1, t[sim], False)
new_arr = Patient(new_arrival_time + t[sim], t[sim], pt, 'arrival',
np.random.exponential(l_aban[pt]), np.random.exponential(w_mu[pt]))
# Add new arrival to all queues
for i in range(pm):
heapq.heappush(A_Events[i], new_arr)
a_queue_count[i][pt] += 1
# --------------------------------- Departure Event for continuous policy ---------------------------------
def departure_event_cont(event, sim):
global Events
global A_Events
global queue
global w_mu
global w_std
global ward_capac
global t
global n_free
global Treated
global queue_length
global ward_assignment
global ward_nurse_def
global k
global h_cost
global last_arrival
pt = event.get_pt()
max_cost = 0
max_pt = pt
for h in range(k):
if h_cost[h] >= max_cost and queue_length[sim][h] > 0:
max_cost = h_cost[h]
max_pt = h
if queue_length[sim][max_pt] > 0:
# Get next patient and remove from Event list (abandoner) to change to a ward patient
next_patient = queue[sim][max_pt].pop(0)
# Events[sim].remove(next_patient)
queue_length[sim][max_pt] -= 1
ward_capac[sim][pt] = ward_capac[sim][pt] + 1
ward_capac[sim][max_pt] = ward_capac[sim][max_pt] - 1
# Change patient type to ward patient from abandoner
next_patient.set_time(t[sim] + next_patient.get_serv())
next_patient.set_location('ward')
# Heapify the event list and replace the last departure with the new patient
heapq.heappop(Events[sim])
heapq.heappush(Events[sim], next_patient)
else:
heapq.heappop(Events[sim])
n_free[sim] += 1
ward_capac[sim][pt] += 1
# if event == last_arrival[sim][pt]:
# print 'check'
# print event
Treated[sim] += 1
# --------------------------------- Arrival Event for non-continuous policies ---------------------------------
def arrival_event(event, sim):
global Arrival_Count
global Balk_Count
global ward_capac
global w_mu
global w_std
global t
global l_aban
global Events
global A_Events
global l_arr
global queue
global n_free
global q_capac
global a_queue_count
global pm
global queue_length
Arrival_Count[sim] += 1
pt = event.get_pt()
#Case where designated ward is open
if ward_capac[sim][pt] > 0 and n_free[sim] > 0:
# Get service and abandonment times for the patient
new_serv_time = event.get_serv()
new_aban = event.get_aban()
# Create new patient with updated event time for ward
new_ward_arr = Patient(new_serv_time + t[sim], t[sim], pt, 'ward',
new_aban, new_serv_time)
# Push new patient to Event
heapq.heappush(Events[sim], new_ward_arr)
# Adjust the capacities of the wards and nurses free
ward_capac[sim][pt] = ward_capac[sim][pt] - 1
n_free[sim] = n_free[sim] - 1
#Case where ward is full and patient is sent into queue
elif len(queue[sim][pt]) < q_capac[pt]:
# Get service and abandonment times for the patient
new_arrival_time = event.get_serv()
new_aban = event.get_aban()
# Create new patient with updated event time for queue
new_queue_arr = Patient(new_aban + t[sim], t[sim], pt, 'abandonment',
new_aban, new_arrival_time)
# Push new patient to Event
queue[sim][pt].append(new_queue_arr)
queue_length[sim][pt] += 1
#Case where queue is full and patient leaves system
else:
Balk_Count[sim] += 1
# Remove Patient Arrival Event and reduce count
if t[sim] == 0:
heapq.heappop(Events[sim])
else:
heapq.heappop(A_Events[sim])
a_queue_count[sim][pt] -= 1
#Create new arrival and replace old arrival
if a_queue_count[sim][pt] == 0 and t[sim] != 0:
# Generate new arrival time for patient and create new patient
new_arrival_time = vs_exp(1/l_arr[pt], 1, 1, t[sim], False)
new_arr = Patient(new_arrival_time + t[sim], t[sim], pt, 'arrival',
np.random.exponential(l_aban[pt]), np.random.exponential(w_mu[pt]))
# Add new arrival to all queues
for i in range(pm):
heapq.heappush(A_Events[i], new_arr)
a_queue_count[i][pt] += 1
# --------------------------------- Departure Event for non-continuous policies ---------------------------------
def departure_event(event, sim):
global Events
global A_Events
global queue
global w_mu
global w_std
global ward_capac
global t
global n_free
global Treated
global queue_length
global ward_assignment
global ward_nurse_def
global k
global h_cost
pt = event.get_pt()
# Check if patients are in queue, checks if ward capacity allows for another patient
if queue[sim][pt] and ward_capac[sim][pt] >= 0 and n_free[sim] >= 0:
max_pt = pt
# Get next patient and remove from Event list (abandoner) to change to a ward patient
next_patient = queue[sim][max_pt].pop(0)
# Events[sim].remove(next_patient)
queue_length[sim][max_pt] -= 1
# Change patient type to ward patient from abandoner
next_patient.set_time(t[sim] + next_patient.get_serv())
next_patient.set_location('ward')
# Heapify the event list and replace the last departure with the new patient
heapq.heappop(Events[sim])
heapq.heappush(Events[sim], next_patient)
ward_capac[sim][pt] += 1
ward_capac[sim][max_pt] -= 1
# Case when nurse finishes treating a patient and moves to her newly assigned ward
elif ward_capac[sim][pt] < 0 and n_free[sim] >= 0 and ward_assignment[sim]:
heapq.heappop(Events[sim])
n_free[sim] += 1
ward_capac[sim][pt] += 1
w = random.choice(ward_assignment[sim])
ward_nurse_def[sim][w] += 1
if ward_nurse_def[sim][w] == 0:
ward_assignment[sim].remove(w)
if queue_length[sim][w] > 0:
next_patient = queue[sim][w].pop(0)
queue_length[sim][w] -= 1
# Change patient type to ward patient from abandoner
next_patient.set_time(t[sim] + next_patient.get_serv())
next_patient.set_location('ward')
# Heapify the event list and replace the last departure with the new patient
heapq.heappush(Events[sim], next_patient)
n_free[sim] -= 1
ward_capac[sim][w] -= 1
else:
heapq.heappop(Events[sim])
n_free[sim] += 1
ward_capac[sim][pt] += 1
Treated[sim] += 1
# --------------------------------- Abandonment Event for non-continuous policies ---------------------------------
def aban_event(event, sim):
global Abandonment_Count
global Events
global A_Events
global queue
global queue_length
pt = event.get_pt()
abandoner = heapq.heappop(Events[sim])
if abandoner in queue[sim][pt]:
queue[sim][pt].remove(abandoner)
Abandonment_Count[sim] += 1
queue_length[sim][pt] -= 1
def simulation(T, N, lbda, mu, std, theta, tau, classes, hcost, q_cap, s_alloc, par_sim, rb, cont, preemption, s_t, p_t, a_t):
global t, k, pm, r_time_arr, queue, l_arr, l_aban, w_mu, w_std, n_free, ward_alloc, ward_capac, last_arrival
global Events, A_Events, capac, q_capac, a_queue_count, queue_length, ward_assignment, ward_nurse_def, h_cost, preempt
# ----------------- Environment Variables -----------------
l_arr = lbda # Arrival Rates
l_aban = theta # Abandonment Rates
w_mu = mu # Service Rate
w_std = std # STD of Service Rate
q_capac = q_cap # Capacity of each queue
r_time = tau # Shift Length
Time = T # Total Simulation Run-Time
pm = par_sim # Total parallel simulations
k = classes
h_cost = hcost
preempt = preemption
# ----------------- Simulation Variables -----------------
# Variables Keeping track of states, queues, etc.
# Initiate arrivals for first set of patients, same for all simulations
e = [Patient(np.random.exponential(lbda[i]), t, i, 'arrival',
np.random.exponential(l_aban[i]), np.random.exponential(w_mu[i])) for i in range(0,k)]
# Creating existing patients for the system
existing = []
for ind, e_p in enumerate(p_t):
existing.append(Patient(0, 0, e_p, 'arrival', a_t[ind], s_t[ind]))
# Sort patients in order of arrivals
heapq.heapify(e)
statistics = []
for i in range(par_sim):
t.append(0) # Initiate Current time for each simulation, initial is 0
queue.append([]) # Initiate Queues for each simulation, initial is empty
Events.append([]) # Initiate Event List for each simulation
A_Events.append([]) # Initiate Arrival List
n_free.append(N) # Initiate Nurses free for each simulation, initial is N
ward_alloc.append([]) # Initiate arrays for the allocation of nurses to each ward
ward_capac.append([]) # Current Capacity of Each ward
ward_nurse_def.append([]) # Keeps deficit of nurses during rebalance
ward_assignment.append([]) # Keeps track of wards that still need nurses assigned during rebalance
last_arrival.append([]) # Initiate arrays for each sim
Balk_Count.append(0) # Initiate Balk count for each sim
Arrival_Count.append(0) # Initiate Arrival count for each sim
Abandonment_Count.append(0) # Initiate Abandonment count for each sim
Treated.append(0) # Initiate Treated patient count for each sim
holding_cost.append(0) # Initiate holding cost
time_server_occupied.append(0) # Initiate time occupied
weighted_ward.append([])
weighted_queue.append([])
a_queue_count.append([]) # Initiate array for arrival queue count
queue_length.append([]) # Initiate array for queue length
r_time_arr.append(r_time) # Initiate first rebalance time
statistics.append([]) # Initiate array recording arrivals
headerX = []
headerQ = []
#Populating arrays
for j in range(k):
a_queue_count[i].append(1) # Counts number of arrivals for a type of patient in the simulation
queue_length[i].append(0)
ward_alloc[i].append(s_alloc[i][j]) # Assigning the initial allocation for each ward
ward_capac[i].append(s_alloc[i][j]) # Assigning the initial capacity free for each ward
ward_nurse_def[i].append(0) # Initiates nurse deficits in the ward to 0 at start
last_arrival[i].append(0) # Place holder for last arrival
A_Events[i].append(e[j]) # Assigning the initial patients to the events of each simulation
queue[i].append([])
weighted_queue[i].append(0)
weighted_ward[i].append(0)
headerX.append('Ward_Count_' + str(j))
headerQ.append('Queue_Count_' + str(j))
for e_pat in existing:
Events[i].append(e_pat)
header = ['Time']
statistics[i].append(header + headerX + headerQ)
# ----------------- Simulation Start -----------------
while(min(t) < T):
for curr_sim in range(pm):
#Check to see if we should continue running the current simulation
if t[curr_sim] > T:
continue
row = []
hc = 0
servers_occupied = N - n_free[curr_sim]
t_prev = 0
# Calculating holding cost for next time period
for wt in range(k):
hc += queue_length[curr_sim][wt]*hcost[wt]
# Check what the next event is
if Events[curr_sim]:
curr_event_E = Events[curr_sim][0]
curr_event_A = A_Events[curr_sim][0]
if curr_event_E.get_time() < curr_event_A.get_time():
curr_event = curr_event_E
else:
curr_event = curr_event_A
else:
curr_event = A_Events[curr_sim][0]
# Main body of simulation
if cont[curr_sim] == 1:
t_prev = t[curr_sim]
t[curr_sim] = curr_event.get_time()
if curr_event.get_location() == 'arrival':
# print 'arrival:: Nurses: ' + str(n_free) + ' Queue Length:' + str(queue_length) + ' ' + 'Ward capac:' + str(ward_capac) + 'Ward alloc:' + str(ward_alloc) + ' ' + str(curr_sim) + ' ' + str(t[curr_sim])
arrival_event_cont(curr_event, curr_sim)
elif curr_event.get_location() == 'ward':
# print 'departure:: Nurses: ' + str(n_free) + ' Queue Length:' + str(queue_length) + ' ' + 'Ward capac:' + str(ward_capac) + 'Ward alloc:' + str(ward_alloc) + ' ' + str(curr_sim) + ' ' + str(t[curr_sim])
departure_event_cont(curr_event, curr_sim)
elif curr_event.get_location() == 'abandonment':
# print 'arrival:: Nurses: ' + str(n_free) + ' Queue Length:' + str(queue_length) + ' ' + 'Ward capac:' + str(ward_capac) + 'Ward alloc:' + str(ward_alloc) + ' ' + str(curr_sim) + ' ' + str(t[curr_sim])
aban_event(curr_event, curr_sim)
else:
# Rebalance case
if r_time_arr[curr_sim] < curr_event.get_time() and rb[curr_sim] == 1:
t_prev = t[curr_sim]
t[curr_sim] = r_time_arr[curr_sim]
rebalance(N, curr_sim)
# print 'rebalance ' + str(ward_nurse_def[curr_sim])
r_time_arr[curr_sim] = r_time_arr[curr_sim] + r_time
# No Rebalance case
elif r_time_arr[curr_sim] < curr_event.get_time() and rb[curr_sim] == 0:
t_prev = t[curr_sim]
t[curr_sim] = r_time_arr[curr_sim]
r_time_arr[curr_sim] = r_time_arr[curr_sim] + r_time
# Continue
else:
#print 'else'
t_prev = t[curr_sim]
t[curr_sim] = curr_event.get_time()
if curr_event.get_location() == 'arrival':
# print 'arrival:: Nurses: ' + str(n_free) + ' Queue Length:' + str(queue_length) + ' ' + 'Ward capac:' + str(ward_capac) + ' Ward alloc:' + str(ward_alloc) + ' ' + str(curr_sim) + ' ' + str(t[curr_sim])
arrival_event(curr_event, curr_sim)
elif curr_event.get_location() == 'ward':
# print 'arrival:: Nurses: ' + str(n_free) + ' Queue Length:' + str(queue_length) + ' ' + 'Ward capac:' + str(ward_capac) + ' Ward alloc:' + str(ward_alloc) + ' ' + str(curr_sim) + ' ' + str(t[curr_sim])
departure_event(curr_event, curr_sim)
elif curr_event.get_location() == 'abandonment':
# print 'arrival:: Nurses: ' + str(n_free) + ' Queue Length:' + str(queue_length) + ' ' + 'Ward capac:' + str(ward_capac) + ' Ward alloc:' + str(ward_alloc) + ' ' + str(curr_sim) + ' ' + str(t[curr_sim])
aban_event(curr_event, curr_sim)
holding_cost[curr_sim] += (t[curr_sim] - t_prev)*hc
time_server_occupied[curr_sim] += (t[curr_sim] - t_prev)*servers_occupied
row.append(t[curr_sim])
w_val = 0
for x in range(k):
w_val = ward_alloc[curr_sim][x]-ward_capac[curr_sim][x]+queue_length[curr_sim][x]
row.append(w_val)
weighted_ward[curr_sim][x] += (t[curr_sim] - t_prev)*w_val
for x in range(k):
w_val = queue_length[curr_sim][x]
row.append(w_val)
weighted_queue[curr_sim][x] += (t[curr_sim] - t_prev)*w_val
statistics[curr_sim].append(row)
# print len(statistics[0])
return statistics
# Function to write table results to a file, used in the main function to write the return values to a csv file
def writeLog(fil, table):
c1 = csv.writer(fil)
for val in table:
# print val
c1.writerow(val)
Trials = 10
Results = []
# ================ Input Variables ================
Total_Time = 400
lbda_out = [1.0/15.0, 1.0/15.0]
mu_out = [1.0/8.0, 1.0/8.0]
std_out = [1, 1]
theta_out = [10000, 10000]
tau_out = .5
k_out = 2
Nurses = 4
hcost_out = [1,2]
q_cap_out = [float('inf'), float('inf')]
# Parallel simulation variables
s_alloc_out = [[2,2], [4,4]]
rebalance1 = [1, 1]
cont_out = [0, 1]
preemption_out = [0, 1]
stats = []
# ---------------- Pre-existing Patients in ward ----------------
service_times = []
patient_type = []
abandonment_times = []
# Length 2 array used to randomly generate pre-existing patients
patient_type_count = [5,5]
for ind, cnt in enumerate(patient_type_count):
service_times = service_times + [np.random.exponential(mu_out[ind]) for x in range(cnt)]
patient_type = patient_type + [ind for x in range(cnt)]
abandonment_times += [10000 for x in range(cnt)]
# print service_times
# print patient_type
# print abandonment_times
# service_times = [.15, .05, .21, .04]
# patient_type = [1, 0, 1, 0]
# abandonment_times = [10000, 10000, 10000, 10000]
for tests in range(Trials):
stats = simulation(Total_Time, Nurses, lbda_out, mu_out, std_out, theta_out, tau_out, k_out, hcost_out, q_cap_out, s_alloc_out, 2, rebalance1, cont_out, preemption_out, service_times, patient_type, abandonment_times)
resetvar()
# print Treated
# print Arrival_Count
# print Abandonment_Count
# print Balk_Count
# print holding_cost
# print time_server_occupied
# print Total_Time*Nurses
print "Done with Trial " + str(tests)
# ================ File Writing ================
fil0 = open(os.getcwd() + "/Sim_Rebalance_20.csv","wb")
fil1 = open(os.getcwd() + "/Sim_No_Rebalance_20.csv","wb")
writeLog(fil0, stats[0])
writeLog(fil1, stats[1])
# print Counter_Record
print '\n'
for k in range(0,2):
dataset_arr = []
dataset_hc = []
dataset_st = []
dataset_wq = []
dataset_ww = []
for i in range(k_out):
dataset_wq.append([])
dataset_ww.append([])
print "Simulation " + str(k)
for i in range(Trials):
dataset_arr.append(Counter_Record[i][0][k])
dataset_hc.append(Counter_Record[i][1][k])
dataset_st.append(Counter_Record[i][2][k])
for j in range(k_out):
dataset_wq[j].append(Counter_Record[i][3][k][j]/Counter_Record[i][5][k])
dataset_ww[j].append(Counter_Record[i][4][k][j]/Counter_Record[i][5][k])
print "Arrivals CI: " + str(mean_confidence_interval(dataset_arr))
print "Holding Cost CI: " + str(mean_confidence_interval(dataset_hc))
print "Server Time CI: " + str(mean_confidence_interval(dataset_st))
for i in range(k_out):
print "Queue length for ward CI " + str(i) + ": " + str(mean_confidence_interval(dataset_wq[i]))
print "Headcount for ward CI" + str(i) + ": " + str(mean_confidence_interval(dataset_ww[i]))
print '\n'
|
{"hexsha": "d7db0c2e5199d9d2fb56960ce1d32f4c90281879", "size": 29974, "ext": "py", "lang": "Python", "max_stars_repo_path": "5_server_queue.py", "max_stars_repo_name": "LaughMachine/Summer-Internship-Project", "max_stars_repo_head_hexsha": "9f4a0560853de2b988d08716479b3e62f092f85b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-05-15T05:59:03.000Z", "max_stars_repo_stars_event_max_datetime": "2016-05-15T05:59:03.000Z", "max_issues_repo_path": "5_server_queue.py", "max_issues_repo_name": "LaughMachine/Dynamic-Nurse-Allocation", "max_issues_repo_head_hexsha": "9f4a0560853de2b988d08716479b3e62f092f85b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "5_server_queue.py", "max_forks_repo_name": "LaughMachine/Dynamic-Nurse-Allocation", "max_forks_repo_head_hexsha": "9f4a0560853de2b988d08716479b3e62f092f85b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-21T08:08:13.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-21T08:08:13.000Z", "avg_line_length": 31.2229166667, "max_line_length": 217, "alphanum_fraction": 0.6752185227, "include": true, "reason": "import numpy,import scipy", "num_tokens": 8642}
|
from wtpy import BaseCtaStrategy
from wtpy import CtaContext
import numpy as np
import statsmodels.tsa.stattools as ts
# 我们首先创建一个函数用来协整检验
def cointegration_check(series01, series02):
urt_1 = ts.adfuller(np.array(series01), 1)[1]
urt_2 = ts.adfuller(np.array(series02), 1)[1]
# 同时平稳或不平稳则差分再次检验
if (urt_1 > 0.1 and urt_2 > 0.1) or (urt_1 < 0.1 and urt_2 < 0.1):
urt_diff_1 = ts.adfuller(np.diff(np.array(series01)), 1)[1]
urt_diff_2 = ts.adfuller(np.diff(np.array(series02), 1))[1]
# 同时差分平稳进行OLS回归的残差平稳检验
if urt_diff_1 < 0.1 and urt_diff_2 < 0.1:
matrix = np.vstack([series02, np.ones(len(series02))]).T
beta, c = np.linalg.lstsq(matrix, series01, rcond=None)[0]
resid = series01 - beta * series02 - c
if ts.adfuller(np.array(resid), 1)[1] > 0.1:
result = False
else:
result = True
return beta, c, resid, result
else:
result = False
return 0.0, 0.0, 0.0, result
else:
result = False
return 0.0, 0.0, 0.0, result
class StraT1(BaseCtaStrategy):
def __init__(self, name:str, code1:str, code2:str, bar_cnt:int, period:str, N:int, threshold:float=1):
BaseCtaStrategy.__init__(self, name)
self.__n__ = N
self.__threshold__ = threshold
self.__period__ = period
self.__bar_cnt__ = bar_cnt
self.__code_1__ = code1
self.__code_2__ = code2
self.__tradable__ = True
def on_init(self, context:CtaContext):
context.stra_get_bars(self.__code_1__, self.__period__, self.__bar_cnt__, isMain = True)
context.stra_get_bars(self.__code_2__, self.__period__, self.__bar_cnt__, isMain = False)
context.stra_log_text("T1 inited")
def on_calculate(self, context:CtaContext):
#读取当前仓位
curPos1 = context.stra_get_position(self.__code_1__)
curPos2 = context.stra_get_position(self.__code_2__)
df_bars_1 = context.stra_get_bars(self.__code_1__, self.__period__, self.__bar_cnt__, isMain = True)
df_bars_2 = context.stra_get_bars(self.__code_2__, self.__period__, self.__bar_cnt__, isMain = False)
#把策略参数读进来,作为临时变量,方便引用
days = self.__n__
threshold = self.__threshold__
close_ay1 = df_bars_1.closes
close_ay2 = df_bars_2.closes
maxlen = min(len(close_ay1), len(close_ay2))
curDate = context.stra_get_date()
curTime = context.stra_get_time()
if curTime == 905:
self.beta, self.c, resid, result = cointegration_check(close_ay1[-days-1:-1], close_ay2[-days-1:-1])
self.__tradable__ = result
if not result:
if curPos1 != 0:
context.stra_log_text("[%d.%04d]协整检验不通过,清掉头寸" % (curDate, curTime))
context.stra_set_position(self.__code_1__, 0, 'CutA')
context.stra_set_position(self.__code_2__, 0, 'CutB')
return
# 计算残差的标准差上下轨
mean = np.mean(resid)
std = np.std(resid)
self.up = mean + self.__threshold__ * std
self.down = mean - self.__threshold__ * std
if not self.__tradable__:
return
# 计算新残差
resid_new = close_ay1[-1] - self.beta * close_ay2[-1] - self.c
if resid_new > self.up and curPos1 != -1:
# 做多价差,买入A,卖出B
context.stra_log_text("[%d.%04d]残差正向扩大,做空价差" % (curDate, curTime))
context.stra_enter_short(self.__code_1__, 1, 'OpenSA')
context.stra_enter_long(self.__code_2__, 1, 'OpenLB')
elif resid_new < self.down and curPos1 != 1:
# 做空价差,卖出A,买入B
context.stra_log_text("[%d.%04d]残差反向扩大,做多价差" % (curDate, curTime))
context.stra_enter_long(self.__code_1__, 1, 'OpenLA')
context.stra_enter_short(self.__code_2__, 1, 'OpenSB')
elif curPos1 != 0 and self.down <= resid_new and resid_new <= self.up:
# 做多价差,买入A,卖出B
context.stra_log_text("[%d.%04d]残差回归,清掉头寸" % (curDate, curTime))
context.stra_set_position(self.__code_1__, 0, 'ExitA')
context.stra_set_position(self.__code_2__, 0, 'ExitB')
def on_tick(self, context:CtaContext, stdCode:str, newTick:dict):
#context.stra_log_text ("on tick fired")
return
|
{"hexsha": "e0def8c442ba4bd30c1e801bb4c8867caa3c5a59", "size": 4543, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/cta_arbitrage_bt/Strategies/T1.py", "max_stars_repo_name": "systemtrader/wtpy", "max_stars_repo_head_hexsha": "5654662618b7281d12eedc4a782251838e7a9048", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-17T00:44:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T00:44:50.000Z", "max_issues_repo_path": "demos/cta_arbitrage_bt/Strategies/T1.py", "max_issues_repo_name": "sunnyruin/wtpy", "max_issues_repo_head_hexsha": "f9cc1a6248fd33e1f42f0aa4a1c96cf5357bf3b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demos/cta_arbitrage_bt/Strategies/T1.py", "max_forks_repo_name": "sunnyruin/wtpy", "max_forks_repo_head_hexsha": "f9cc1a6248fd33e1f42f0aa4a1c96cf5357bf3b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1637931034, "max_line_length": 113, "alphanum_fraction": 0.5936605767, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 1464}
|
#=~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~=#
# Problem set 5 solutions
# Written by Tyler Ransom
# Commented by Giuseppe Grasso
# Recording available in Notability
#=~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~=#
using Random
using LinearAlgebra
using Statistics
using Optim
using DataFrames
using DataFramesMeta
using CSV
using HTTP
using GLM
cd("/Users/peppegrass/Documents/GitHub/fall-2020/ProblemSets/PS5-ddc/") # GG: sets working directory
pwd() ## GG: prints working directory
readdir() # GG: equivalent to -ls- to see elements of working directory
# read in function to create state transitions for dynamic model
include("create_grids.jl")
function allwrap()
#=
GG: In this problem set, we will explore a simplified version of the Rust (1987, Econometrica) bus engine
replacement model. Let’s start by reading in the data.
=#
# GG: I - PRELIMINARIES: Loading data and reshaping them
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 1: Reshaping the data
# Reshape the data into “long” panel format, calling your long dataset df_long.
# I have included code on how to do this in the PS5starter.jl file that accompanies this problem set.
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# load in the data
url = "https://raw.githubusercontent.com/OU-PhD-Econometrics/fall-2020/master/ProblemSets/PS5-ddc/busdataBeta0.csv"
df = CSV.read(HTTP.get(url).body)
# create bus id variable
df = @transform(df, bus_id = 1:size(df,1))
#---------------------------------------------------
# reshape from wide to long (must do this twice be-
# cause DataFrames.stack() requires doing it one
# variable at a time)
#---------------------------------------------------
# first reshape the decision variable
dfy = @select(df, :bus_id,:Y1,:Y2,:Y3,:Y4,:Y5,:Y6,:Y7,:Y8,:Y9,:Y10,:Y11,:Y12,:Y13,:Y14,:Y15,:Y16,:Y17,:Y18,:Y19,:Y20,:RouteUsage,:Branded)
dfy_long = DataFrames.stack(dfy, Not([:bus_id,:RouteUsage,:Branded]))
rename!(dfy_long, :value => :Y)
dfy_long = @transform(dfy_long, time = kron(collect([1:20]...),ones(size(df,1))))
select!(dfy_long, Not(:variable))
# next reshape the odometer variable
dfx = @select(df, :bus_id,:Odo1,:Odo2,:Odo3,:Odo4,:Odo5,:Odo6,:Odo7,:Odo8,:Odo9,:Odo10,:Odo11,:Odo12,:Odo13,:Odo14,:Odo15,:Odo16,:Odo17,:Odo18,:Odo19,:Odo20)
dfx_long = DataFrames.stack(dfx, Not([:bus_id]))
rename!(dfx_long, :value => :Odometer)
dfx_long = @transform(dfx_long, time = kron(collect([1:20]...),ones(size(df,1))))
select!(dfx_long, Not(:variable))
# join reshaped df's back together
df_long = leftjoin(dfy_long, dfx_long, on = [:bus_id,:time])
sort!(df_long,[:bus_id,:time])
# GG: II - STATIC ESTIMATION
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 2: Estimate a STATIC version of the model
#:::::::::::::::::::::::::::::::::::::::::::::::::::
#=
GG: The model we would like to estimate is Harold Zurcher’s decision to run buses in his fleet.
Zurcher’s FLOW UTILITY OF RUNNING (i.e. not replacing) a bus is
(1): $u_{1}(x_{1t},b)= θ_{0}+ θ_{1}x_{1t} + θ_{2}b$ [see assignment text]
where x_{1t} is the mileage on the bus’s odometer (in 10,000s of miles) and b is a dummy variable indicating
whether the bus is branded (meaning its manufacturer is high-end). The choice set is {0,1} where 0 denotes
replacing the engine.
Estimate the θ parameters assuming Zurcher is completely myopic.
This amounts to estimating a simple binary logit model.
(Note: you may estimate this any way you wish. I would recommend using the GLM package, but you may also
use Optim with your own log likelihood function.)
=#
θ̂_glm = glm(@formula(Y ~ Odometer + Branded), df_long, Binomial(), LogitLink()) # GG: estimating using GLM package. Pooled binary logit using reshaped long data
println(θ̂_glm)
#= GG: III - DYNAMIC ESTIMATION
Now I will walk you through how to estimate the DYNAMIC version of this model using BACKWARDS RECURSION.
With discount factor β, the DIFFERENTIATED CONDITIONAL VALUE FUNCTION for running the bus (relative to replacing it) is
(2): $ v_{1t}(x_{t},b) - v_{0t}(x_{t},b) =
θ_{0} + θ_{1}x_{1t} + θ_{2}b [GG: same as static so far, flow utility]
+ β ∫ V_{t+1}(x_{t+1}, b) dF(x_{t+1} | x_{t}) [GG: recursive component]
$
where V_{t+1} is the VALUE FUNCTION and the integral is over TRANSITIONS in the mileage states x_{t}.
We will APPROXIMATE the INTEGRAL with a SUMMATION, which means that we will specify a DISCRETE MASS FUNCTION for f(x_{t+1} |x_{t}).
This probability mass function depends on the current odometer reading (x_{1t}),
whether the engine is newly replaced (i.e. d_{t−1}=0),
and on the value of another state variable x_2 which measures the usage intensity of the bus’s route
(i.e. high values of x_2 imply a low usage intensity and vice versa).
We discretize the mileage transitions into 1,250-mile bins (i.e. 0.125 units of x-{1t}).
We specify x_2 as a discrete uniform distribution ranging from 0.25 to 1.25 with 0.01 unit increments.
Formally, we are DISCRETELY (but not discreetly!) APPRXIMATING an EXPONENTIAL DISTRIBUTION:
(3): [see assignment text]
You will not need to program (3); I will provide code for this part. Under this formulation, (2) can be written as:
(4): [see assignment text]
Finally, we can simplify (4) since we know that V_{t+1 = log( ∑_{k} exp(v_{k,t+1}) ) when we assume
that unobserved utility is drawn from a T1EV distribution (as we do here):
(5): [see assignment text]
Estimation of our dynamic model now requires two steps:
#1 SOLVING THE MODEL
First, we need to solve the value functions for a given value of our parameters θ.
The way we do this is by BACKWARDS RECURSION. We know that V_{t+1} = 0 in our final period (i.e. when t = T).
Then we work backwards to obtain the future value at every possible state in our model.
This will include many states that do not actually show up in our data.
# 2 ESTIMATING THE MODEL
Second, once we’ve solved the value functions, we use maximum likelihood to estimate the parameters θ.
The log likelihood function in this case is simply:
(6)-(7): [see assignment text]
Now estimate the θ’s assuming that Zurcher discounts the future with discount factor β = 0.9.
I will walk you through specific steps for how to do this
=#
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 3a: Read in the data for the dynamic model.
# This can be found at the same URL as listed at the top of p. 2, but remove the "Beta0" from the CSV filename.
# Rather than reshaping the data to “long” format as in question 1, we want to keep the data in “wide” format.
# Thus, columns :Y1 through :Y20 should be converted to an array labeled Y which has
# dimension 1000 × 20 where N = 1000 and T = 20. And similarly for columns starting with :Odo and :Xst.
# Variables :Xst* and :Zst keep track of which discrete bin of the f j ’s the given observation falls into.
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# [ Load in the data ]
url = "https://raw.githubusercontent.com/OU-PhD-Econometrics/fall-2020/master/ProblemSets/PS5-ddc/busdata.csv"
df = CSV.read(HTTP.get(url).body)
Y = Matrix(df[:,[:Y1,:Y2,:Y3,:Y4,:Y5,:Y6,:Y7,:Y8,:Y9,:Y10,:Y11,:Y12,:Y13,:Y14,:Y15,:Y16,:Y17,:Y18,:Y19,:Y20]])
X = Matrix(df[:,[:Odo1,:Odo2,:Odo3,:Odo4,:Odo5,:Odo6,:Odo7,:Odo8,:Odo9,:Odo10,:Odo11,:Odo12,:Odo13,:Odo14,:Odo15,:Odo16,:Odo17,:Odo18,:Odo19,:Odo20]])
Z = Vector(df[:,:RouteUsage])
B = Vector(df[:,:Branded])
N = size(Y,1)
T = size(Y,2)
Xstate = Matrix(df[:,[:Xst1,:Xst2,:Xst3,:Xst4,:Xst5,:Xst6,:Xst7,:Xst8,:Xst9,:Xst10,:Xst11,:Xst12,:Xst13,:Xst14,:Xst15,:Xst16,:Xst17,:Xst18,:Xst19,:Xst20]])
Zstate = Vector(df[:,:Zst])
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 3b: Generate state transition matrices
# Construct the state transition matrices, which are the f_j’s in (3). To do so, simply run the following code:
#:::::::::::::::::::::::::::::::::::::::::::::::::::
zval,zbin,xval,xbin,xtran = create_grids() # GG: creates grid points based on points given. See .jl file pf function
# GG: returns bin numbers and associated values for z (rout usage) and x (odometer reading); and transition values for the X
#=
GG: zval and xval are the grids defined at the bottom of p. 2, which respectively correspond to the route usage
and odometer reading. zbin and xbin are the number of bins in zval and xval, respectively.
xtran is a (zbin*xbin)×xbin Markov transition matrix that gives the probability of falling into each x_{1,t+1} bin
given values of x_{1,t} and x_2 , according to the formula in (3).
Note: A Markov transition matrix is a matrix where each row sums to 1 and moving from e.g. column 1 to column 4
within a row gives the probability of moving from state 1 to state 4. Check out the Wikipedia page for more information
=#
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 3c: Compute the future value terms for all possible states of the model
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# • First, initialize the future value array, which should be a 3-dimensional array of zeros.
# The size of the first dimension should be the total number of grid points (i.e. the number of rows of xtran).
# The second dimension should be 2, which is the possible outcomes of :Branded.
# The third dimension should be T+1.
# • Now write four nested for loops over each of the possible states:
# - Loop backwards over t from T+1 to 1
# – Loop over the two possible brand states {0,1}
# – Loop over the possible permanent route usage states (i.e. from 1 to zbin)
# – Loop over the possible odometer states (i.e. from 1 to xbin)
# • Inside all of the for loops, make the following calculations
# – Create an object that marks the row of the transition matrix that we need to be looking at
# (based on the loop values of the two gridded state variables).
# This will be x + (z-1)*xbin (where x indexes the mileage bin and z indexes the route usage bin),
# given how the xtran matrix was constructed in the create grids() function.
# – Create the conditional value function for driving the bus (v_{1t}) based on the values of the state
# variables in the loop (not the values observed in the data). For example, for the mileage (x_{1t}),
# you should plug in xval[x] rather than :Odo. The difficult part of the conditional value function is
# the discrete summation over the state transitions. For this, you need to grab the appropriate row
# (and all columns) of the xtran matrix, and then take the dot product with that and the all possible x_{1t}
# rows of the FV matrix. You should end up with something like xtran[row,:]'*FV[(z-1)*xbin+1:z*xbin,b+1,t+1]
# where b indexes the branded dummy and t indexes time periods.
# – Now create the conditional value function for replacing the engine (v_{0t}). For this, we repeat the same
# process as with v_{1t} except the θ’s are NORMALIZED to be 0. The code for the expected future value is
# the same as for v_{1t} with the exception that mileage resets to 0 after replacement, so instead of grabbing
# xtran[row,:] we want xtran[1+(z-1)*xbin,:].
# – Finally, update the future value array in period t by storing β log( exp(v_{0t}) + exp(v—1t}) ) in the t_th
# slice of the 3rd dimension of the array. This will be the new future value term for period t−1.
# Remember to set β = 0.9
#:::::::::::::::::::::::::::::::::::::::::::::::::::
@views @inbounds function likebus(α,β,Y,B,N,T,X,Z,Zstate,Xstate,xtran,zbin,xbin,xval)
# FV in period T+1 = 0 since this is a finite horizon problem
FV=zeros(zbin*xbin,2,T+1) # GG: initialize future values; initialize the future value array, which should be a 3-dimensional array of zeros.
# GG: 2 columns is Branded=1 vs Branded=0
# First loop: solve the backward recursion problem given the values of α
# This will give the future value for *all* possible states that we might visit
# This is why the FV array does not have an individual dimension
for t=T:-1:1 # GG: Loop backwards over t from T+1 to 1; going backwards over time every period
for b=0:1 # GG: Loop over the two possible brand states {0,1}; based on branded or not
for z=1:zbin # GG: Loop over the possible permanent route usage states (i.e. from 1 to zbin); which bin am I in the x2 state variable
for x=1:xbin # GG: Loop over the possible odometer states (i.e. from 1 to xbin); which hin am I in the X1t state variable
# inputs to FV
# GG: object that marks the row of the transition matrix; to figurate the transition probabilities
row = x + (z-1)*xbin # which row of xtran should we look at? depends on milage bin x and route usage z
# GG: conditional value function for driving the bus (v_{1t})
v1 = α[1] + α[2]*xval[x] + α[3]*b + xtran[ row,:]⋅FV[(z-1)*xbin+1:z*xbin,b+1,t+1] # mileage bin is x, route usage z is permanent
# GG: flow utility component + summation over state transitions
# GG: set of probs xtran[row,:] DOT PRODUCT Future Values (FV)
# GG: conditional value function for replacing the engine (v_{0t})
v0 = xtran[1+(z-1)*xbin,:]⋅FV[(z-1)*xbin+1:z*xbin,b+1,t+1] # the engine got replaced => mileage is 0, so first bin
# FV is discounted log sum of the exponentiated conditional value functions #GG: Updating Future Value terms!!!
FV[row,b+1,t] = β*log(exp(v1) + exp(v0))
#GG: that's where the recursion comes in!
end
end
end
end
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 3d: Construct the log likelihood using the future value terms from the previous step and only
# using the observed states in the data. This will entail a for loop over buses and time periods.
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# • Initialize the log likelihood value to be 0. (We will iteratively add to it as we loop over observations in the data)
# • Create a variable that indexes the state transition matrix rows for the case where the bus has been replaced.
# This will be the same 1+(z-1)*xbin as in the conditional value function v 0t above. However, we need to plug in :Zst from the data rather than a hypothetical value z.
# • Create a variable that indexes the state transition matrix rows for the case where the bus has not been replaced.
# This will be the same x + (z-1)*xbin as in v1t above, except we substitute :Xst and :Zst for x and z.
# • Now create the flow utility component of v_{1t} − v_{0t} using the actual observed data on mileage and branding.
# • Next, we need to add the appropriate discounted future value to round out our calculation of v_{1t} − v_{0t}.
# Here, we can difference the f_j’s as in (5). You should get something like (xtran[row1,:].-xtran[row0,:])'*FV[row0:row0+xbin-1,B[i]+1,t+1]
# • Finally, create the choice probabilities for choosing each option as written in (7) and then create the
# log likelihood according to the summation in (6).
# Second loop: form the likelihood given the future values implied by the previous α guess
# Here, we will take the state-specifc FV's calculated in the first loop
# But we will only use in the likelihood those state value that are actually visited
like = 0 # GG: Initialize the log likelihood value to be 0.
for i=1:N
# GG: vector of indices in the ACTUAL DATA (no replacement)
row0 = (Zstate[i]-1)*xbin+1 # this is the same argument as the index of xtran in v0 above, but we use the actual Z
#z2 = (Zstate[i]-1)*xbin+1 # this is the same argument as the 1st part of the 1st index of the FV array above, but using actual Z
#z3 = z2+xbin-1 # this is the same argument as the 2nd part of the 1st index of the FV array above, but using actual Z
for t=1:T
# GG: vector of indices in the ACTUAL DATA (replacement)
row1 = Xstate[i,t] + (Zstate[i]-1)*xbin # this is the same as row in the first loop, except we use the actual X and Z
v1 = α[1] + α[2]*X[i,t] + α[3]*B[i] + (xtran[row1,:].-xtran[row0,:])⋅FV[row0:row0+xbin-1,B[i]+1,t+1] # using the same formula as v1 above, except we use observed values of X and B, and we difference the transitions
dem = 1 + exp(v1)
like -= ( (Y[i,t]==1)*v1 ) - log(dem) # negative of the binary logit likelihood
end
end
return like
end
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 3e: Wrap all of the code you wrote in (c) and (d) into a function and set up the function so that it can be
# passed to Optim. For example, you will need to return the negative of the log likelihood and you will need to have
# the first argument be the θ vector that we are trying to estimate
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# GG: See row 213
θ_start = rand(3)
θ_true = [2; -.15; 1]
# how long to evaluate likelihood function once?
println("Timing (twice) evaluation of the likelihood function")
@time likebus(θ_start,0.9,Y,B,N,T,X,Z,Zstate,Xstate,xtran,zbin,xbin,xval)
@time likebus(θ_start,0.9,Y,B,N,T,X,Z,Zstate,Xstate,xtran,zbin,xbin,xval)
# estimate likelihood function
θ̂_optim = optimize(a -> likebus(a,0.9,Y,B,N,T,X,Z,Zstate,Xstate,xtran,zbin,xbin,xval), θ_true, LBFGS(), Optim.Options(g_tol = 1e-5, iterations=100_000, show_trace=true))
θ̂_ddc = θ̂_optim.minimizer
println(θ̂_ddc)
return nothing
end
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 3f: On the same line as the function, prepend the function declaration with the macros so that your code
# says @views @inbounds function myfun() rather than function myfun(). This will give you more performant code.
# On my machine, it cut the computation time in half.
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# GG: See row 213
# @inbounds turns off dimensions checkin by Julia; consumes time, if your sure comformability is right you can turn it off to save time
# @views treats items as immutable, makes things more efficiently
# GG: You gotta make sure your functions work firt. Then you use these options to make things faster
# GG: in Matlab it takes about 25mins; in Julia only 5mins; if we used the TwiceDifferentiable object, it'd be even faster
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 3g: Wrap all of your code in an empty function as you’ve done with other problem sets
#:::::::::::::::::::::::::::::::::::::::::::::::::::
allwrap()
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 3h: Try executing your script to estimate the likelihood function. This took about 4 minutes
# on my machine when I started from the estimates of the static model in Question 2.
#:::::::::::::::::::::::::::::::::::::::::::::::::::
#:::::::::::::::::::::::::::::::::::::::::::::::::::
# Question 3i: Pat yourself on the back and grab a beverage of your choice, because that was a lot of work!
#:::::::::::::::::::::::::::::::::::::::::::::::::::
|
{"hexsha": "66898fa3781458f6f7d842c082c4bdedec263a1c", "size": 20103, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "ProblemSets/PS5-ddc/PS5solutions_GG.jl", "max_stars_repo_name": "peppegrass/fall-2020", "max_stars_repo_head_hexsha": "03f90548ca4d800146bbeaf9dceca917a21c1195", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-01T08:58:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-01T08:58:14.000Z", "max_issues_repo_path": "ProblemSets/PS5-ddc/PS5solutions_GG.jl", "max_issues_repo_name": "peppegrass/fall-2020", "max_issues_repo_head_hexsha": "03f90548ca4d800146bbeaf9dceca917a21c1195", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ProblemSets/PS5-ddc/PS5solutions_GG.jl", "max_forks_repo_name": "peppegrass/fall-2020", "max_forks_repo_head_hexsha": "03f90548ca4d800146bbeaf9dceca917a21c1195", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-12T17:24:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-12T17:24:05.000Z", "avg_line_length": 60.5512048193, "max_line_length": 233, "alphanum_fraction": 0.6263244292, "num_tokens": 5531}
|
import gc
import numba
from numba import jit
import numpy as np
import sklearn
import tqdm
import warnings
@jit(nopython=True, nogil=True, fastmath=True)
def _update_wgrad_clipped(learning_rate, loss, w1, w2):
"""same as above, clamped in unit sphere"""
for k in range(w1.size):
grad = loss * w2[k]
w1[k] = w1[k] - learning_rate * grad
if w1[k] < -1.: w1[k] = -1.
elif w1[k] > 1.: w1[k] = 1.
@jit(nopython=True, nogil=True, fastmath=True, parallel=True)
def _ggvec_edges_update(data, dst, indptr, w, b,
learning_rate=0.01,
exponent=0.5,
max_loss=10.):
"""
This implementation is UNSAFE.
We concurrently write to weights and gradients in separate threads
This is only saved by the fact that edges >>> threads
so pr(race condition) is very low
Couple of issues:
- Only one weight matrix
- unvectorized
- unsafe
- Assumes symmetric edges (would need two w matrix for directed graphs)
Implementation inspired from https://github.com/maciejkula/glove-python/blob/master/glove/glove_cython.pyx
"""
n_edges = dst.size
total_loss = 0.
for edge in numba.prange(n_edges):
node1 = dst[edge]
# Find node in indptr array
node2 = np.searchsorted(indptr, edge)
if edge < indptr[node2]:
node2 = node2 - 1
# Loss is dot product b/w two connected nodes
pred = np.dot(w[node1], w[node2]) + b[node1] + b[node2]
# Use arcsinh(weight) instead of log(weight)
# handles all real valued weights well
loss = (pred - data[edge] ** exponent)
# Clip the loss for numerical stability.
if loss < -max_loss: loss = -max_loss
elif loss > max_loss: loss = max_loss
# Update weights
_update_wgrad_clipped(learning_rate, loss, w[node1], w[node2])
_update_wgrad_clipped(learning_rate, loss, w[node2], w[node1])
# Update biases
b[node1] -= learning_rate * loss
b[node2] -= learning_rate * loss
# track losses for early stopping
total_loss = total_loss + np.abs(loss)
return total_loss / n_edges
###########################
# #
# /\ Contraction pass #
# || #
# \/ Relaxation pass #
# #
###########################
@jit(nopython=True, nogil=True, fastmath=True, parallel=True)
def _ggvec_reverse(n_edges, w, b,
learning_rate=0.01,
max_loss=10.):
"""
Negative sampling GGVec pass
negative_edges : array shaped [n_samples, 2]
We pass in here to hoist out the complexity of
handling multithreaded RNG (which plays poorly with numba)
"""
nnodes = w.shape[0]
for _ in numba.prange(n_edges):
# TODO: this thrashes the cache. Find a clever soln
node1 = np.random.randint(0, nnodes)
node2 = np.random.randint(0, nnodes)
# We assume no edge (weight = 0) between nodes on negative sampling pass
loss = np.dot(w[node1], w[node2]) + b[node1] + b[node2]
if loss < -max_loss: loss = -max_loss
elif loss > max_loss: loss = max_loss
_update_wgrad_clipped(learning_rate, loss, w[node1], w[node2])
_update_wgrad_clipped(learning_rate, loss, w[node2], w[node1])
b[node1] -= learning_rate * loss
b[node2] -= learning_rate * loss
##########################
# #
# Main method #
# #
##########################
def ggvec_main(src, dst, data, n_components=2,
learning_rate=0.05,
tol=0.03, tol_samples=75,
negative_ratio=0.15,
negative_decay=0.,
exponent=0.5,
max_loss=30.,
max_epoch=500, verbose=True):
"""
GGVec: Fast global first (and higher) order local embeddings.
This algorithm directly minimizes related nodes' distances.
It uses a relaxation pass (negative sample) + contraction pass (loss minimization)
To find stable embeddings based on the minimal dot product of edge weights.
Parameters:
-------------
n_components (int):
Number of individual embedding dimensions.
negative_ratio : float in [0, 1]
Negative sampling ratio.
Setting this higher will do more negative sampling.
This is slower, but can lead to higher quality embeddings.
exponent : float
Weighing exponent in loss function.
Having this lower reduces effect of large edge weights.
tol : float in [0, 1] or "auto"
Optimization early stopping criterion.
Stops average loss < tol for tol_samples epochs.
"auto" sets tol as a function of learning_rate
tol_samples : int
Optimization early stopping criterion.
This is the number of epochs to sample for loss stability.
Once loss is stable over this number of epochs we stop early.
negative_decay : float in [0, 1]
Decay on negative ratio.
If >0 then negative ratio will decay by (1-negative_decay) ** epoch
You should usually leave this to 0.
max_epoch : int
Stopping criterion.
max_count : int
Ceiling value on edge weights for numerical stability
learning_rate : float in [0, 1]
Optimization learning rate.
max_loss : float
Loss value ceiling for numerical stability.
"""
nnodes = src.size - 1
w = (np.random.rand(nnodes, n_components) - 0.5)
# wc = (np.random.rand(nnodes, n_components) - 0.5)
b = np.zeros(nnodes, dtype=np.float32)
latest_loss = [np.inf] * tol_samples
if verbose:
epoch_range = tqdm.trange(0, max_epoch)
else:
epoch_range = range(0, max_epoch)
for epoch in epoch_range:
# Relaxation pass
# Number of negative edges
neg_edges = int(
dst.size
* negative_ratio
* ((1 - negative_decay) ** epoch)
)
_ggvec_reverse(
neg_edges, w, b,
learning_rate=learning_rate,
max_loss=max_loss)
# Positive "contraction" pass
# TODO: return only loss max
loss = _ggvec_edges_update(
data, dst, src, w, b,
learning_rate=learning_rate,
exponent=exponent,
max_loss=max_loss)
# Pct Change in loss
max_latest = np.max(latest_loss)
min_latest = np.min(latest_loss)
if ((epoch > tol_samples)
and (np.abs((max_latest - min_latest) / max_latest) < tol)
):
if loss < max_loss:
if verbose:
print(f"Converged! Loss: {loss:.4f}")
return w
else:
err_str = (f"Could not learn: loss {loss} = max loss {max_loss}\n"
+ "This is often due to too large learning rates.")
if verbose:
print(err_str)
warnings.warn(err_str)
break
elif not np.isfinite(loss).all():
raise ValueError(
f"non finite loss: {latest_loss} on epoch {epoch}\n"
+ f"Losses: {loss}\n"
+ f"Previous losses: {[x for x in latest_loss if np.isfinite(x)]}"
+ f"Try reducing the learning rate")
else:
latest_loss.append(loss)
latest_loss = latest_loss[1:]
if verbose:
epoch_range.set_description(f"Loss: {loss:.4f}\t")
warnings.warn(f"GVec has not converged. Losses : {latest_loss}")
return w
|
{"hexsha": "461d48046f6c4e9b6e5625441ff1a85c3ac324f4", "size": 7732, "ext": "py", "lang": "Python", "max_stars_repo_path": "csrgraph/ggvec.py", "max_stars_repo_name": "netrias/CSRGraph", "max_stars_repo_head_hexsha": "b35460c8d84906d203f66b511b8eb553a97a622b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 79, "max_stars_repo_stars_event_min_datetime": "2020-03-03T20:55:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:54:54.000Z", "max_issues_repo_path": "csrgraph/ggvec.py", "max_issues_repo_name": "netrias/CSRGraph", "max_issues_repo_head_hexsha": "b35460c8d84906d203f66b511b8eb553a97a622b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2020-07-15T01:53:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-29T03:48:16.000Z", "max_forks_repo_path": "csrgraph/ggvec.py", "max_forks_repo_name": "netrias/CSRGraph", "max_forks_repo_head_hexsha": "b35460c8d84906d203f66b511b8eb553a97a622b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-04-07T10:47:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T21:59:54.000Z", "avg_line_length": 37.1730769231, "max_line_length": 110, "alphanum_fraction": 0.5768235903, "include": true, "reason": "import numpy,import numba,from numba", "num_tokens": 1853}
|
"""librarian: Librarian and Settings classes.
Contains the Librarian class and the Settings class. The Librarian class is the
main Librarian class sets up and runs the librarian program. It reads settings
from the librarian.yaml file.
Copyright (c) 2017 by Jeff Bass.
License: MIT, see LICENSE for more details.
"""
import os
import cv2
import sys
import yaml
import pprint
import signal
import imutils
import logging
import imagezmq
import itertools
import threading
import numpy as np
from time import sleep
from pathlib import Path
from ast import literal_eval
from imutils.video import VideoStream
from helpers.schedules import Schedule
from helpers.data_tools import HubData
from helpers.utils import YamlOptionsError
from helpers.nodehealth import HealthMonitor
from helpers.comms.communications import CommChannel
from helpers.comms.chatbot import ChatBot, Conversation
log = logging.getLogger(__name__)
class Librarian:
""" Contains all the attributes and methods of this librarian.
One Librarian is instantiated during the startup of the librarian.py
program. It takes the settings loaded from the YAML file and sets all
the operational parameters, including the communication channels,
website interaction, relationship to other librarians (if any),
image analysis funcions, authorized users, etc.
The Librarian's 2 primary communications methods are next_query() and
send_reply() which converse with one or more users. Librarian starts
threads and subprocesses for many of its functions.
Parameters:
settings (Settings object): settings object created from YAML file
"""
def __init__(self, settings):
self.log = logging.getLogger()
# check that numpy and OpenCV are OK
self.tiny_image = np.zeros((3,3), dtype="uint8") # tiny blank image
ret_code, jpg_buffer = cv2.imencode(
".jpg", self.tiny_image, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
self.health = HealthMonitor(settings) # health check (RPi vs Mac etc.)
if settings.comm_channels: # need at least one comm channel in yaml file
self.setup_comm_channels(settings)
else:
raise YamlOptionsError('No comm channels specified in YAML file.')
self.hub_data = HubData(settings) # imgagehub data class
self.chatbot = ChatBot(data=self.hub_data) # conversation methods
gmail = None
for channel in self.comm_channels:
if channel.name == 'Gmail':
gmail = channel.gmail
# print('Set gmail object.')
self.schedule = Schedule(settings, gmail) # start doing scheduled tasks
if settings.print:
self.print_details(settings)
def setup_comm_channels(self, settings):
""" Create a list of channels from comm_channels section of YAML file
Each comm_channel listed in the YAML file will specify multiple details
like which port, ZMQ versus some other messaging protocol, whether to
run a thread or a subprocess, etc.
settings.comm_channels is a dictionary from comm_channels section of
the YAML file.
Parameters:
settings (Settings object): settings object created from YAML file
"""
self.comm_channels = []
for channel_type, details in settings.comm_channels.items():
channel = CommChannel(settings, channel_type, details)
self.comm_channels.append(channel)
def print_details(self, settings):
print('Librarian details:')
print(' Librarian name:', settings.librarian_name)
print(' System Type:', self.health.sys_type)
print()
def compose_reply(self, request):
reply = self.chatbot.respond_to(request)
return reply
def closeall(self, settings):
""" Close all resources, files and communications channels.
Parameters:
settings (Settings object): settings object created from YAML file
"""
for channel in self.comm_channels:
channel.close()
class Settings:
"""Load settings from YAML file
Note that there is currently almost NO error checking for the YAML
settings file. Therefore, by design, an exception will be raised
when a required setting is missing or misspelled in the YAML file.
This stops the program with a Traceback which will indicate which
setting below caused the error. Reading the Traceback will indicate
which line below caused the error. Fix the YAML file and rerun the
program until the YAML settings file is read correctly.
There is a "print_settings" option that can be set to TRUE to print
the dictionary that results from reading the YAML file. Note that the
order of the items in the dictionary will not necessarily be the order
of the items in the YAML file (this is a property of Python dictionaries).
"""
def __init__(self):
userdir = os.path.expanduser("~")
with open(os.path.join(userdir,"librarian.yaml")) as f:
self.config = yaml.safe_load(f)
self.print_node = False
if 'librarian' in self.config:
if 'print_settings' in self.config['librarian']:
if self.config['librarian']['print_settings']:
self.print_settings()
self.print = True
else:
self.print = False
else:
self.print_settings('"librarian" is a required settings section but not present.')
raise KeyboardInterrupt
self.schedules = self.config.get('schedules', None)
if 'name' in self.config['librarian']:
self.librarian_name = self.config['librarian']['name']
else:
self.print_settings('"name" is a required setting in the "librarian" section but not present.')
raise YamlOptionsError('No "name" section in yaml file.')
if 'patience' in self.config['librarian']:
self.patience = self.config['librarian']['patience']
else:
self.patience = 10 # default is to wait 10 seconds
if 'queuemax' in self.config['librarian']:
self.queuemax = self.config['librarian']['queuemax']
else:
self.queuemax = 50
if 'heartbeat' in self.config['librarian']:
self.heartbeat = self.config['librarian']['heartbeat']
else:
self.heartbeat = 0
if 'stall_watcher' in self.config['librarian']:
self.stall_watcher = self.config['librarian']['stall_watcher']
else:
self.stall_watcher = False
if 'send_threading' in self.config['librarian']:
self.send_threading = self.config['librarian']['send_threading']
else:
self.send_threading = False
# librararian data directory holds, e.g. gmail credentials & contacts.txt
if 'data_directory' in self.config['librarian']:
self.data_directory = self.config['librarian']['data_directory']
else:
self.data_directory = 'librarian_data'
lib_dir = Path.home() / Path(self.data_directory)
if not lib_dir.exists():
raise YamlOptionsError('Data directory in YAML file does not exist.')
elif not lib_dir.is_dir():
raise YamlOptionsError('Data directory in YAML file is not a directory.')
self.lib_dir = lib_dir
if 'log_directory' in self.config['librarian']:
self.log_directory = self.config['librarian']['log_directory']
else:
raise YamlOptionsError('No log directory specified in YAML file.')
if 'log_file' in self.config['librarian']:
self.log_file = self.config['librarian']['log_file']
else:
raise YamlOptionsError('No log file specified in YAML file.')
if 'comm_channels' in self.config:
self.comm_channels = self.config['comm_channels']
else:
raise YamlOptionsError('No comm channels specified in YAML file.')
def print_settings(self, title=None):
""" prints the settings in the yaml file using pprint()
"""
if title:
print(title)
print('Contents of imagenode.yaml:')
pprint.pprint(self.config)
print()
|
{"hexsha": "b45d7a6f1e3403d1d60af3d7afba31a4d9dd5090", "size": 8342, "ext": "py", "lang": "Python", "max_stars_repo_path": "librarian-prototype/librarian/helpers/library.py", "max_stars_repo_name": "jeffbass/yin-yang-ranch", "max_stars_repo_head_hexsha": "234a3120d4f134a65ab8f064fb5805a436874498", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 79, "max_stars_repo_stars_event_min_datetime": "2018-06-24T07:31:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-06T01:10:15.000Z", "max_issues_repo_path": "librarian-prototype/helpers/library.py", "max_issues_repo_name": "jeffbass/yin-yang-ranch", "max_issues_repo_head_hexsha": "234a3120d4f134a65ab8f064fb5805a436874498", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-08-28T09:47:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T20:18:24.000Z", "max_forks_repo_path": "librarian-prototype/librarian/helpers/library.py", "max_forks_repo_name": "jeffbass/yin-yang-ranch", "max_forks_repo_head_hexsha": "234a3120d4f134a65ab8f064fb5805a436874498", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2018-06-28T19:10:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T06:37:36.000Z", "avg_line_length": 41.0935960591, "max_line_length": 107, "alphanum_fraction": 0.6637497003, "include": true, "reason": "import numpy", "num_tokens": 1745}
|
from src import data_generator
from io import StringIO
import numpy as np
import pytest
class TestFetchDataset:
def test_fetch_dataset_from_right_formatted_data(self):
source = StringIO(
"species,culmen_length_mm,culmen_depth_mm,flipper_length_mm,body_mass_g\n"
"0,0.2545454545454545,0.6666666666666666,0.15254237288135594,0.2916666666666667"
)
actual = data_generator.fetch_dataset(source)
assert actual.dtype.names == (
"species",
"culmen_length_mm",
"culmen_depth_mm",
"flipper_length_mm",
"body_mass_g",
)
assert actual["species"].dtype == np.int64
assert actual["culmen_length_mm"].dtype == np.float64
def test_fetch_dataset_fails_file_without_header(self):
source = StringIO(
"0,1\n"
"0,1"
)
with pytest.raises(IndexError):
data_generator.fetch_dataset(source)
|
{"hexsha": "645ee5d60e02040ee8d88fe23132e882eb201b66", "size": 979, "ext": "py", "lang": "Python", "max_stars_repo_path": "kfp/components/data_generator/tests/test_main.py", "max_stars_repo_name": "hotchpotch/lab_sample_pipelines", "max_stars_repo_head_hexsha": "7c266d807eff861140d4b51c267b0bfba8c50263", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2021-06-04T09:52:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T05:10:44.000Z", "max_issues_repo_path": "kfp/components/data_generator/tests/test_main.py", "max_issues_repo_name": "hotchpotch/lab_sample_pipelines", "max_issues_repo_head_hexsha": "7c266d807eff861140d4b51c267b0bfba8c50263", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-06-24T08:08:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-24T08:09:14.000Z", "max_forks_repo_path": "kfp/components/data_generator/tests/test_main.py", "max_forks_repo_name": "hotchpotch/lab_sample_pipelines", "max_forks_repo_head_hexsha": "7c266d807eff861140d4b51c267b0bfba8c50263", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-06-13T16:01:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T16:28:44.000Z", "avg_line_length": 30.59375, "max_line_length": 92, "alphanum_fraction": 0.6394279877, "include": true, "reason": "import numpy", "num_tokens": 227}
|
import numpy as np
import random
data = np.loadtxt('env_sorter.cfg',skiprows=10)
print(data)
print(np.shape(data), type(data))
print(data[3][3], type(data[3][3]))
# print(data[73][69], data[88][38])
file1 = open("addverb2.scen","a")
count = 0
for i in range(1000):
x_i = random.randint(1,87)
y_i = random.randint(1,121)
x_g = random.randint(1,87)
y_g = random.randint(1,121)
if(data[x_i][y_i] == 0 and data[x_g][y_g] == 0):
count = count+1
L = [str(count), " ", "Addverb.map ", "88 ", "122 ", str(y_i), " ", str(x_i), " ", str(y_g), " ", str(x_g), '\n']
file1.writelines(L)
print(count)
|
{"hexsha": "df6b6ab2bb58c979685d6b85f3258753ebbe1141", "size": 659, "ext": "py", "lang": "Python", "max_stars_repo_path": "addverb_random_benchmarks.py", "max_stars_repo_name": "Aakriti05/ORCA_Astar-warehouse-navigation", "max_stars_repo_head_hexsha": "8699b30c25cacf1a7be1f56dc34db90d5a757b36", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "addverb_random_benchmarks.py", "max_issues_repo_name": "Aakriti05/ORCA_Astar-warehouse-navigation", "max_issues_repo_head_hexsha": "8699b30c25cacf1a7be1f56dc34db90d5a757b36", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "addverb_random_benchmarks.py", "max_forks_repo_name": "Aakriti05/ORCA_Astar-warehouse-navigation", "max_forks_repo_head_hexsha": "8699b30c25cacf1a7be1f56dc34db90d5a757b36", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4583333333, "max_line_length": 142, "alphanum_fraction": 0.564491654, "include": true, "reason": "import numpy", "num_tokens": 227}
|
#Author Lucas Saraiva
import re
import networkx as nx
import sys
def compute_triangle_and_balance(G):
triangles = {}
ballanced = 0
unballanced = 0
print len(G.edges())
print "Calculating triangles status, it may take a while"
triadsVisited = set()
i = 0
for u in G.nodes():
viz_u = set(G.neighbors(u))
for v in viz_u:
inter = set(viz_u).intersection(G.neighbors(v))
for t in inter:
if (u, v, t) in triadsVisited:
continue
if G.has_edge(t,u):
triadsVisited.add((u,v,t))
triadsVisited.add((v,t,u))
triadsVisited.add((t,u,v))
if G[u][v]["sign"] == -1:
edge1 = (v, u)
else:
edge1 = (u, v)
if G[v][t]["sign"] == -1:
edge2 = (t, v)
else:
edge2 = (v, t)
if G[t][u]["sign"] == -1:
edge3 = (u, t)
else:
edge3 = (t, u)
if edge1[1] == edge2[0] and edge2[1] == edge3[0] and edge3[1] == edge1[0]:
unballanced = unballanced + 1
elif edge3[1] == edge2[0] and edge2[1] == edge1[0] and edge1[1] == edge3[0]:
unballanced = unballanced + 1
else:
ballanced = ballanced + 1
elif G.has_edge(u, t):
triadsVisited.add((u,v,t))
triadsVisited.add((v,t,u))
triadsVisited.add((u,t,v))
if G[u][v]["sign"] == -1:
edge1 = (v, u)
else:
edge1 = (u, v)
if G[v][t]["sign"] == -1:
edge2 = (t, v)
else:
edge2 = (v, t)
if G[u][t]["sign"] == -1:
edge3 = (t, u)
else:
edge3 = (u, t)
if edge1[1] == edge2[0] and edge2[1] == edge3[0] and edge3[1] == edge1[0]:
unballanced = unballanced + 1
elif edge3[1] == edge2[0] and edge2[1] == edge1[0] and edge1[1] == edge3[0]:
unballanced = unballanced + 1
else:
ballanced = ballanced + 1
print ballanced
print unballanced
G = nx.DiGraph()
G = nx.read_edgelist('soc-sign-epinions.txt', create_using=nx.DiGraph(), nodetype=int, data=(('sign',int),))
compute_triangle_and_balance(G)
|
{"hexsha": "056b39be683944c473089ec3d1809a2737f648b9", "size": 2801, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Triangles/triangles_python27.py", "max_stars_repo_name": "saraiva3/Shape-of-War", "max_stars_repo_head_hexsha": "454f7d77f919742420dfa4cdc44820f0c88f91ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-05-03T21:37:28.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-08T21:36:39.000Z", "max_issues_repo_path": "src/Triangles/triangles_python27.py", "max_issues_repo_name": "saraiva3/Shape-of-War", "max_issues_repo_head_hexsha": "454f7d77f919742420dfa4cdc44820f0c88f91ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Triangles/triangles_python27.py", "max_forks_repo_name": "saraiva3/Shape-of-War", "max_forks_repo_head_hexsha": "454f7d77f919742420dfa4cdc44820f0c88f91ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1222222222, "max_line_length": 108, "alphanum_fraction": 0.3873616566, "include": true, "reason": "import networkx", "num_tokens": 703}
|
! $UWHPSC/codes/fortran/ifelse1.f90
program ifelse1
implicit none
real(kind=8) :: x
integer :: i
i = 3
if (i<2) then
print *, "i is less than 2"
else
print *, "i is not less than 2"
endif
if (i<=2) then
print *, "i is less or equal to 2"
else if (i/=5) then
print *, "i is greater than 2 but not equal to 5"
else
print *, "i is equal to 5"
endif
end program ifelse1
|
{"hexsha": "1720c37ebf970a5e151401c681304f788afe54f4", "size": 456, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "uwhpsc/codes/fortran/ifelse1.f90", "max_stars_repo_name": "philipwangdk/HPC", "max_stars_repo_head_hexsha": "e2937016821701adb80ece5bf65d43d1860640c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "uwhpsc/codes/fortran/ifelse1.f90", "max_issues_repo_name": "philipwangdk/HPC", "max_issues_repo_head_hexsha": "e2937016821701adb80ece5bf65d43d1860640c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "uwhpsc/codes/fortran/ifelse1.f90", "max_forks_repo_name": "philipwangdk/HPC", "max_forks_repo_head_hexsha": "e2937016821701adb80ece5bf65d43d1860640c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.24, "max_line_length": 57, "alphanum_fraction": 0.5394736842, "num_tokens": 156}
|
\section{Padding}
\label{sec:padding}
The \fw{Padding} module provides a wrapper widget type, \fw{Padded},
which wraps another widget with a specified amount of padding on any
or all four of its sides.
We create padded widgets with the \fw{padded} function, which takes a
child of type \fw{Widget a} and a padding value. In the following
example we create a \fw{FormattedText} widget and pad it on all sides
by two rows (or columns, where appropriate):
\begin{haskellcode}
w <- plainText "foobar"
w2 <- padded w (padAll 2)
\end{haskellcode}
The padding itself is expressed with the \fw{Padding} type, whose
values store padding settings for the top, bottom, left, and right
sides of an object in question. \fw{Padding} values are created with
one of the following functions:
\begin{itemize}
\item \fw{padNone} -- creates a \fw{Padding} value with no padding.
\item \fw{padAll} -- takes a single parameter, \fw{p}, and creates a
\fw{Padding} value with \fw{p} rows or columns of padding on all
four sides.
\item \fw{padLeft}, \fw{padRight}, \fw{padTop}, \fw{padBottom} -- each
takes a single parameter and creates a \fw{Padding} value with the
specified amount of padding on the specified side indicated by the
function name.
\item \fw{padLeftRight}, \fw{padTopBottom} -- each takes a single
parameter and creates a \fw{Pad\-ding} value with the specified
amount of padding on both sides indicated by the function name.
\end{itemize}
With these basic \fw{Padding} constructors we can construct more
interesting \fw{Padding} values with the \fw{pad} function:
\begin{haskellcode}
let p = padNone `pad` (padAll 5) `pad` (padLeft 2)
\end{haskellcode}
The \fw{Padding} type is an instance of the \fw{Paddable} type class,
of which \fw{pad} is the only method. The \fw{Padding} instance of
\fw{Paddable} just adds the padding values together.
In addition to the \fw{padded} function, the \fw{Padding} module
provides the \fw{withPadding} combinator to created a \fw{Padded}
widget in the following way:
\begin{haskellcode}
w <- plainText "foobar" >>= withPadding (padAll 2)
\end{haskellcode}
\subsubsection{Growth Policy}
\fw{Padded} widgets always defer to their children for both horizontal
and vertical growth policy.
|
{"hexsha": "441a0659ca9ce71b657420892351b2745efabdc0", "size": 2248, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/ch4/Padded.tex", "max_stars_repo_name": "erikd/vty-ui", "max_stars_repo_head_hexsha": "250474a8d9dc5e22b8dc80cfa871d9ac4c12ce04", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2015-01-05T08:22:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-03T18:11:11.000Z", "max_issues_repo_path": "doc/ch4/Padded.tex", "max_issues_repo_name": "erikd/vty-ui", "max_issues_repo_head_hexsha": "250474a8d9dc5e22b8dc80cfa871d9ac4c12ce04", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2015-01-04T02:31:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-25T14:34:04.000Z", "max_forks_repo_path": "doc/ch4/Padded.tex", "max_forks_repo_name": "erikd/vty-ui", "max_forks_repo_head_hexsha": "250474a8d9dc5e22b8dc80cfa871d9ac4c12ce04", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2015-01-31T14:08:43.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-25T00:07:42.000Z", "avg_line_length": 37.4666666667, "max_line_length": 70, "alphanum_fraction": 0.7504448399, "num_tokens": 630}
|
import pickle
import numpy as np
import re
counts_list = []
price_list = []
for i in range(9):
with open('info/info-{}.pkl'.format(i+1), 'rb') as f:
temp = pickle.load(f)
counts_list.append(temp[0])
price_list.append(temp[1])
# Aggergate information
counts = counts_list[0]
for i in range(1, 9):
counts = counts.add(counts_list[i], fill_value=0)
price = np.mean(price_list)
# Disregard useless words and special characters from `counts`
useless_words = ['', '[불명]']
regex = '[상품|상세|설명|기타|없음|참조]|[^가-힣a-zA-Z]'
usable_words = set(counts.index) - set(useless_words)
usable_words_counts = counts.loc[usable_words]
usable_words_counts = usable_words_counts.loc[[idx for idx in usable_words_counts.index if not re.findall(regex, idx)]].sort_values(ascending=False)
# Select top frequency words with thresh
thresh = 10000
usable_words = list(usable_words_counts.index[:thresh])
print(len(usable_words))
print(usable_words[:100])
print(price)
with open('info.pkl', 'wb') as handle:
pickle.dump((usable_words, price), handle)
|
{"hexsha": "1f8a90f8bd00cec4cb11ff06ba7e2afb6a7f7651", "size": 1065, "ext": "py", "lang": "Python", "max_stars_repo_path": "info.py", "max_stars_repo_name": "HarangDev/ka-1", "max_stars_repo_head_hexsha": "065f4fa7966fbb0a5e97a696e9890ee2d291826b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "info.py", "max_issues_repo_name": "HarangDev/ka-1", "max_issues_repo_head_hexsha": "065f4fa7966fbb0a5e97a696e9890ee2d291826b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "info.py", "max_forks_repo_name": "HarangDev/ka-1", "max_forks_repo_head_hexsha": "065f4fa7966fbb0a5e97a696e9890ee2d291826b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3571428571, "max_line_length": 148, "alphanum_fraction": 0.7136150235, "include": true, "reason": "import numpy", "num_tokens": 299}
|
import numpy as np
import scipy as sp
#Given a data matrix X, this picks
|
{"hexsha": "902c20b9996c6009179a55121303fe7e88966cd3", "size": 74, "ext": "py", "lang": "Python", "max_stars_repo_path": "bandwidth_selection.py", "max_stars_repo_name": "bubble-07/FETISH3", "max_stars_repo_head_hexsha": "243429acd16d55b30a0de4f1f5d72b0cd7dc84b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bandwidth_selection.py", "max_issues_repo_name": "bubble-07/FETISH3", "max_issues_repo_head_hexsha": "243429acd16d55b30a0de4f1f5d72b0cd7dc84b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bandwidth_selection.py", "max_forks_repo_name": "bubble-07/FETISH3", "max_forks_repo_head_hexsha": "243429acd16d55b30a0de4f1f5d72b0cd7dc84b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.8, "max_line_length": 34, "alphanum_fraction": 0.7567567568, "include": true, "reason": "import numpy,import scipy", "num_tokens": 20}
|
// (C) Copyright 2015 - 2018 Christopher Beck
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef SPIRIT_PO_EXCEPTIONS_HPP_INCLUDED
#define SPIRIT_PO_EXCEPTIONS_HPP_INCLUDED
#include <boost/spirit/include/support_line_pos_iterator.hpp>
#include <string>
namespace spirit_po {
// Show the next 80 characters from some iterator position.
// Intended to be used for parser error messages
template <typename Iterator>
std::string iterator_context(Iterator & it, Iterator & end) {
std::string result;
std::size_t line_no = boost::spirit::get_line(it);
if (line_no != static_cast<std::size_t>(-1)) {
result = "Line " + std::to_string(line_no) + ":\n";
}
unsigned int count = 80;
while (it != end && count) {
result += *it;
++it;
--count;
}
return result;
}
// When the thing being parsed is a short string, we can give
// a better context report
inline std::string string_iterator_context(const std::string & str,
std::string::const_iterator it) {
std::string result{str};
result += "\n";
for (auto temp = str.begin(); temp != it; ++temp) {
result += ' ';
}
result += "^\n";
return result;
}
} // end namespace spirit_po
#ifdef SPIRIT_PO_NO_EXCEPTIONS
#define SPIRIT_PO_CATALOG_FAIL(Message) \
do { \
error_message_ = (Message); \
return ; \
} while(0)
#else // SPIRIT_PO_NO_EXCEPTIONS
#include <stdexcept>
namespace spirit_po {
struct catalog_exception : std::runtime_error {
explicit catalog_exception(const char * what) : runtime_error(what) {}
explicit catalog_exception(const std::string & what) : runtime_error(what) {}
};
} // end namespace spirit_po
#define SPIRIT_PO_CATALOG_FAIL(Message) \
do { \
throw spirit_po::catalog_exception(( Message )); \
} while(0)
#endif // SPIRIT_PO_NO_EXCEPTIONS
#endif // SPIRIT_PO_EXCEPTIONS_HPP_INCLUDED
|
{"hexsha": "603ae6fcfc2de9067571482382aea9319c35e97c", "size": 2212, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/spirit_po/exceptions.hpp", "max_stars_repo_name": "fcojavmc/spirit-po", "max_stars_repo_head_hexsha": "5f88675757a44f86eae845a703c697f93d9cae39", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/spirit_po/exceptions.hpp", "max_issues_repo_name": "fcojavmc/spirit-po", "max_issues_repo_head_hexsha": "5f88675757a44f86eae845a703c697f93d9cae39", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/spirit_po/exceptions.hpp", "max_forks_repo_name": "fcojavmc/spirit-po", "max_forks_repo_head_hexsha": "5f88675757a44f86eae845a703c697f93d9cae39", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.65, "max_line_length": 80, "alphanum_fraction": 0.6148282098, "num_tokens": 506}
|
'''
Created on Jul 3, 2014
@author: roj-idl71
'''
import os
import datetime
import numpy
try:
from gevent import sleep
except:
from time import sleep
from schainpy.model.data.jroheaderIO import RadarControllerHeader, SystemHeader
from schainpy.model.data.jrodata import Voltage
from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
try:
import digital_rf_hdf5
except:
pass
class USRPReader(ProcessingUnit):
'''
classdocs
'''
def __init__(self, **kwargs):
'''
Constructor
'''
ProcessingUnit.__init__(self, **kwargs)
self.dataOut = Voltage()
self.__printInfo = True
self.__flagDiscontinuousBlock = False
self.__bufferIndex = 9999999
self.__ippKm = None
self.__codeType = 0
self.__nCode = None
self.__nBaud = None
self.__code = None
def __getCurrentSecond(self):
return self.__thisUnixSample/self.__sample_rate
thisSecond = property(__getCurrentSecond, "I'm the 'thisSecond' property.")
def __setFileHeader(self):
'''
In this method will be initialized every parameter of dataOut object (header, no data)
'''
ippSeconds = 1.0*self.__nSamples/self.__sample_rate
nProfiles = 1.0/ippSeconds #Number of profiles in one second
self.dataOut.radarControllerHeaderObj = RadarControllerHeader(ipp=self.__ippKm,
txA=0,
txB=0,
nWindows=1,
nHeights=self.__nSamples,
firstHeight=self.__firstHeigth,
deltaHeight=self.__deltaHeigth,
codeType=self.__codeType,
nCode=self.__nCode, nBaud=self.__nBaud,
code = self.__code)
self.dataOut.systemHeaderObj = SystemHeader(nSamples=self.__nSamples,
nProfiles=nProfiles,
nChannels=len(self.__channelList),
adcResolution=14)
self.dataOut.type = "Voltage"
self.dataOut.data = None
self.dataOut.dtype = numpy.dtype([('real','<i8'),('imag','<i8')])
# self.dataOut.nChannels = 0
# self.dataOut.nHeights = 0
self.dataOut.nProfiles = nProfiles
self.dataOut.heightList = self.__firstHeigth + numpy.arange(self.__nSamples, dtype = numpy.float)*self.__deltaHeigth
self.dataOut.channelList = self.__channelList
self.dataOut.blocksize = self.dataOut.nChannels * self.dataOut.nHeights
# self.dataOut.channelIndexList = None
self.dataOut.flagNoData = True
#Set to TRUE if the data is discontinuous
self.dataOut.flagDiscontinuousBlock = False
self.dataOut.utctime = None
self.dataOut.timeZone = self.__timezone/60 #timezone like jroheader, difference in minutes between UTC and localtime
self.dataOut.dstFlag = 0
self.dataOut.errorCount = 0
self.dataOut.nCohInt = 1
self.dataOut.flagDecodeData = False #asumo que la data esta decodificada
self.dataOut.flagDeflipData = False #asumo que la data esta sin flip
self.dataOut.flagShiftFFT = False
self.dataOut.ippSeconds = ippSeconds
#Time interval between profiles
#self.dataOut.timeInterval = self.dataOut.ippSeconds * self.dataOut.nCohInt
self.dataOut.frequency = self.__frequency
self.dataOut.realtime = self.__online
def findDatafiles(self, path, startDate=None, endDate=None):
if not os.path.isdir(path):
return []
try:
digitalReadObj = digital_rf_hdf5.read_hdf5(path, load_all_metadata=True)
except:
digitalReadObj = digital_rf_hdf5.read_hdf5(path)
channelNameList = digitalReadObj.get_channels()
if not channelNameList:
return []
metadata_dict = digitalReadObj.get_rf_file_metadata(channelNameList[0])
sample_rate = metadata_dict['sample_rate'][0]
this_metadata_file = digitalReadObj.get_metadata(channelNameList[0])
try:
timezone = this_metadata_file['timezone'].value
except:
timezone = 0
startUTCSecond, endUTCSecond = digitalReadObj.get_bounds(channelNameList[0])/sample_rate - timezone
startDatetime = datetime.datetime.utcfromtimestamp(startUTCSecond)
endDatatime = datetime.datetime.utcfromtimestamp(endUTCSecond)
if not startDate:
startDate = startDatetime.date()
if not endDate:
endDate = endDatatime.date()
dateList = []
thisDatetime = startDatetime
while(thisDatetime<=endDatatime):
thisDate = thisDatetime.date()
if thisDate < startDate:
continue
if thisDate > endDate:
break
dateList.append(thisDate)
thisDatetime += datetime.timedelta(1)
return dateList
def setup(self, path = None,
startDate = None,
endDate = None,
startTime = datetime.time(0,0,0),
endTime = datetime.time(23,59,59),
channelList = None,
nSamples = None,
ippKm = 60,
online = False,
delay = 60,
buffer_size = 1024,
**kwargs):
'''
In this method we should set all initial parameters.
Inputs:
path
startDate
endDate
startTime
endTime
set
expLabel
ext
online
delay
'''
if not os.path.isdir(path):
raise ValueError("[Reading] Directory %s does not exist" %path)
try:
self.digitalReadObj = digital_rf_hdf5.read_hdf5(path, load_all_metadata=True)
except:
self.digitalReadObj = digital_rf_hdf5.read_hdf5(path)
channelNameList = self.digitalReadObj.get_channels()
if not channelNameList:
raise ValueError("[Reading] Directory %s does not have any files" %path)
if not channelList:
channelList = list(range(len(channelNameList)))
########## Reading metadata ######################
metadata_dict = self.digitalReadObj.get_rf_file_metadata(channelNameList[channelList[0]])
self.__sample_rate = metadata_dict['sample_rate'][0]
# self.__samples_per_file = metadata_dict['samples_per_file'][0]
self.__deltaHeigth = 1e6*0.15/self.__sample_rate
this_metadata_file = self.digitalReadObj.get_metadata(channelNameList[channelList[0]])
self.__frequency = None
try:
self.__frequency = this_metadata_file['center_frequencies'].value
except:
self.__frequency = this_metadata_file['fc'].value
if not self.__frequency:
raise ValueError("Center Frequency is not defined in metadata file")
try:
self.__timezone = this_metadata_file['timezone'].value
except:
self.__timezone = 0
self.__firstHeigth = 0
try:
codeType = this_metadata_file['codeType'].value
except:
codeType = 0
nCode = 1
nBaud = 1
code = numpy.ones((nCode, nBaud), dtype=numpy.int)
if codeType:
nCode = this_metadata_file['nCode'].value
nBaud = this_metadata_file['nBaud'].value
code = this_metadata_file['code'].value
if not ippKm:
try:
#seconds to km
ippKm = 1e6*0.15*this_metadata_file['ipp'].value
except:
ippKm = None
####################################################
startUTCSecond = None
endUTCSecond = None
if startDate:
startDatetime = datetime.datetime.combine(startDate, startTime)
startUTCSecond = (startDatetime-datetime.datetime(1970,1,1)).total_seconds() + self.__timezone
if endDate:
endDatetime = datetime.datetime.combine(endDate, endTime)
endUTCSecond = (endDatetime-datetime.datetime(1970,1,1)).total_seconds() + self.__timezone
start_index, end_index = self.digitalReadObj.get_bounds(channelNameList[channelList[0]])
if not startUTCSecond:
startUTCSecond = start_index/self.__sample_rate
if start_index > startUTCSecond*self.__sample_rate:
startUTCSecond = start_index/self.__sample_rate
if not endUTCSecond:
endUTCSecond = end_index/self.__sample_rate
if end_index < endUTCSecond*self.__sample_rate:
endUTCSecond = end_index/self.__sample_rate
if not nSamples:
if not ippKm:
raise ValueError("[Reading] nSamples or ippKm should be defined")
nSamples = int(ippKm / (1e6*0.15/self.__sample_rate))
channelBoundList = []
channelNameListFiltered = []
for thisIndexChannel in channelList:
thisChannelName = channelNameList[thisIndexChannel]
start_index, end_index = self.digitalReadObj.get_bounds(thisChannelName)
channelBoundList.append((start_index, end_index))
channelNameListFiltered.append(thisChannelName)
self.profileIndex = 0
self.__delay = delay
self.__ippKm = ippKm
self.__codeType = codeType
self.__nCode = nCode
self.__nBaud = nBaud
self.__code = code
self.__datapath = path
self.__online = online
self.__channelList = channelList
self.__channelNameList = channelNameListFiltered
self.__channelBoundList = channelBoundList
self.__nSamples = nSamples
self.__samples_to_read = int(buffer_size*nSamples)
self.__nChannels = len(self.__channelList)
self.__startUTCSecond = startUTCSecond
self.__endUTCSecond = endUTCSecond
self.__timeInterval = 1.0 * self.__samples_to_read/self.__sample_rate #Time interval
if online:
# self.__thisUnixSample = int(endUTCSecond*self.__sample_rate - 4*self.__samples_to_read)
startUTCSecond = numpy.floor(endUTCSecond)
self.__thisUnixSample = int(startUTCSecond*self.__sample_rate) - self.__samples_to_read
self.__data_buffer = numpy.zeros((self.__nChannels, self.__samples_to_read), dtype = numpy.complex)
self.__setFileHeader()
self.isConfig = True
print("[Reading] USRP Data was found from %s to %s " %(
datetime.datetime.utcfromtimestamp(self.__startUTCSecond - self.__timezone),
datetime.datetime.utcfromtimestamp(self.__endUTCSecond - self.__timezone)
))
print("[Reading] Starting process from %s to %s" %(datetime.datetime.utcfromtimestamp(startUTCSecond - self.__timezone),
datetime.datetime.utcfromtimestamp(endUTCSecond - self.__timezone)
))
def __reload(self):
if not self.__online:
return
# print
# print "%s not in range [%s, %s]" %(
# datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
# datetime.datetime.utcfromtimestamp(self.__startUTCSecond - self.__timezone),
# datetime.datetime.utcfromtimestamp(self.__endUTCSecond - self.__timezone)
# )
print("[Reading] reloading metadata ...")
try:
self.digitalReadObj.reload(complete_update=True)
except:
self.digitalReadObj.reload()
start_index, end_index = self.digitalReadObj.get_bounds(self.__channelNameList[self.__channelList[0]])
if start_index > self.__startUTCSecond*self.__sample_rate:
self.__startUTCSecond = 1.0*start_index/self.__sample_rate
if end_index > self.__endUTCSecond*self.__sample_rate:
self.__endUTCSecond = 1.0*end_index/self.__sample_rate
print()
print("[Reading] New timerange found [%s, %s] " %(
datetime.datetime.utcfromtimestamp(self.__startUTCSecond - self.__timezone),
datetime.datetime.utcfromtimestamp(self.__endUTCSecond - self.__timezone)
))
return True
return False
def __readNextBlock(self, seconds=30, volt_scale = 218776):
'''
'''
#Set the next data
self.__flagDiscontinuousBlock = False
self.__thisUnixSample += self.__samples_to_read
if self.__thisUnixSample + 2*self.__samples_to_read > self.__endUTCSecond*self.__sample_rate:
print("[Reading] There are no more data into selected time-range")
self.__reload()
if self.__thisUnixSample + 2*self.__samples_to_read > self.__endUTCSecond*self.__sample_rate:
self.__thisUnixSample -= self.__samples_to_read
return False
indexChannel = 0
dataOk = False
for thisChannelName in self.__channelNameList:
try:
result = self.digitalReadObj.read_vector_c81d(self.__thisUnixSample,
self.__samples_to_read,
thisChannelName)
except IOError as e:
#read next profile
self.__flagDiscontinuousBlock = True
print("[Reading] %s" %datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone), e)
break
if result.shape[0] != self.__samples_to_read:
self.__flagDiscontinuousBlock = True
print("[Reading] %s: Too few samples were found, just %d/%d samples" %(datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
result.shape[0],
self.__samples_to_read))
break
self.__data_buffer[indexChannel,:] = result*volt_scale
indexChannel += 1
dataOk = True
self.__utctime = self.__thisUnixSample/self.__sample_rate
if not dataOk:
return False
print("[Reading] %s: %d samples <> %f sec" %(datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
self.__samples_to_read,
self.__timeInterval))
self.__bufferIndex = 0
return True
def __isBufferEmpty(self):
if self.__bufferIndex <= self.__samples_to_read - self.__nSamples:
return False
return True
def getData(self, seconds=30, nTries=5):
'''
This method gets the data from files and put the data into the dataOut object
In addition, increase el the buffer counter in one.
Return:
data : retorna un perfil de voltages (alturas * canales) copiados desde el
buffer. Si no hay mas archivos a leer retorna None.
Affected:
self.dataOut
self.profileIndex
self.flagDiscontinuousBlock
self.flagIsNewBlock
'''
err_counter = 0
self.dataOut.flagNoData = True
if self.__isBufferEmpty():
self.__flagDiscontinuousBlock = False
while True:
if self.__readNextBlock():
break
if self.__thisUnixSample > self.__endUTCSecond*self.__sample_rate:
return False
if self.__flagDiscontinuousBlock:
print('[Reading] discontinuous block found ... continue with the next block')
continue
if not self.__online:
return False
err_counter += 1
if err_counter > nTries:
return False
print('[Reading] waiting %d seconds to read a new block' %seconds)
sleep(seconds)
self.dataOut.data = self.__data_buffer[:,self.__bufferIndex:self.__bufferIndex+self.__nSamples]
self.dataOut.utctime = (self.__thisUnixSample + self.__bufferIndex)/self.__sample_rate
self.dataOut.flagNoData = False
self.dataOut.flagDiscontinuousBlock = self.__flagDiscontinuousBlock
self.dataOut.profileIndex = self.profileIndex
self.__bufferIndex += self.__nSamples
self.profileIndex += 1
if self.profileIndex == self.dataOut.nProfiles:
self.profileIndex = 0
return True
def printInfo(self):
'''
'''
if self.__printInfo == False:
return
# self.systemHeaderObj.printInfo()
# self.radarControllerHeaderObj.printInfo()
self.__printInfo = False
def printNumberOfBlock(self):
'''
'''
print(self.profileIndex)
def run(self, **kwargs):
'''
This method will be called many times so here you should put all your code
'''
if not self.isConfig:
self.setup(**kwargs)
self.getData(seconds=self.__delay)
return
@MPDecorator
class USRPWriter(Operation):
'''
classdocs
'''
def __init__(self, **kwargs):
'''
Constructor
'''
Operation.__init__(self, **kwargs)
self.dataOut = None
def setup(self, dataIn, path, blocksPerFile, set=0, ext=None):
'''
In this method we should set all initial parameters.
Input:
dataIn : Input data will also be outputa data
'''
self.dataOut = dataIn
self.isConfig = True
return
def run(self, dataIn, **kwargs):
'''
This method will be called many times so here you should put all your code
Inputs:
dataIn : object with the data
'''
if not self.isConfig:
self.setup(dataIn, **kwargs)
if __name__ == '__main__':
readObj = USRPReader()
while True:
readObj.run(path='/Volumes/DATA/haystack/passive_radar/')
# readObj.printInfo()
readObj.printNumberOfBlock()
|
{"hexsha": "1ac362b7ed779c7ddd1f7f913f9af6d1c368946c", "size": 19528, "ext": "py", "lang": "Python", "max_stars_repo_path": "schainpy/model/io/jroIO_usrp.py", "max_stars_repo_name": "LuisRondoCuevas/schainpy", "max_stars_repo_head_hexsha": "ef41efe03993a6ae56e587334a1bfc529fccc2df", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "schainpy/model/io/jroIO_usrp.py", "max_issues_repo_name": "LuisRondoCuevas/schainpy", "max_issues_repo_head_hexsha": "ef41efe03993a6ae56e587334a1bfc529fccc2df", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "schainpy/model/io/jroIO_usrp.py", "max_forks_repo_name": "LuisRondoCuevas/schainpy", "max_forks_repo_head_hexsha": "ef41efe03993a6ae56e587334a1bfc529fccc2df", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.438538206, "max_line_length": 158, "alphanum_fraction": 0.564369111, "include": true, "reason": "import numpy", "num_tokens": 3985}
|
#
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
#
# ******************************************* TRANSFER FUNCTIONS *****************************************************
"""
* `Identity`
* `Linear`
* `Exponential`
* `Logistic`
* `Tanh`
* `ReLU`
* `Angle`
* `Gaussian`
* `GaussianDistort`
* `SoftMax`
* `LinearMatrix`
* `TransferWithCosts`
Overview
--------
Functions that transform their variable but maintain its shape.
All TransferFunctions have the following attributes:
* **bounds**: specifies the lower and upper limits of the result; if there are none, the attribute is set to
`None`; if it has at least one bound, the attribute is set to a tuple specifying the lower and upper bounds,
respectively, with `None` as the entry for no bound.
..
* **multiplicative_param** and **additive_param**:
each of these is assigned the name of one of the function's
parameters and used by `ModulatoryProjections <ModulatoryProjection>` to modulate the output of the
TransferFunction's function (see `Function_Modulatory_Params`).
"""
import numbers
import types
import warnings
from enum import IntFlag
from math import e, pi, sqrt
import numpy as np
import typecheck as tc
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.component import parameter_keywords
from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination
from psyneulink.core.components.functions.function import (
DEFAULT_SEED, Function, Function_Base, FunctionError, _random_state_getter, _seed_setter, function_keywords,
get_matrix, is_function_type,
)
from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot
from psyneulink.core.components.functions.stateful.integratorfunctions import SimpleIntegrator
from psyneulink.core.components.shellclasses import Projection
from psyneulink.core.globals.context import ContextFlags, handle_external_context
from psyneulink.core.globals.keywords import \
ADDITIVE_PARAM, ALL, ANGLE_FUNCTION, BIAS, EXPONENTIAL_FUNCTION, \
GAIN, GAUSSIAN_DISTORT_FUNCTION, GAUSSIAN_FUNCTION, HAS_INITIALIZERS, HOLLOW_MATRIX, \
IDENTITY_FUNCTION, IDENTITY_MATRIX, INTERCEPT, LEAK, LINEAR_FUNCTION, LINEAR_MATRIX_FUNCTION, LOGISTIC_FUNCTION, \
TANH_FUNCTION, MATRIX_KEYWORD_NAMES, MATRIX, MATRIX_KEYWORD_VALUES, MAX_INDICATOR, MAX_VAL, MULTIPLICATIVE_PARAM, \
OFF, OFFSET, ON, PER_ITEM, PROB, PRODUCT, OUTPUT_TYPE, PROB_INDICATOR, \
RATE, RECEIVER, RELU_FUNCTION, SCALE, SLOPE, SOFTMAX_FUNCTION, STANDARD_DEVIATION, SUM, \
TRANSFER_FUNCTION_TYPE, TRANSFER_WITH_COSTS_FUNCTION, VARIANCE, VARIABLE, X_0, PREFERENCE_SET_NAME
from psyneulink.core.globals.parameters import \
FunctionParameter, Parameter, get_validator_by_function
from psyneulink.core.globals.preferences.basepreferenceset import \
REPORT_OUTPUT_PREF, PreferenceEntry, PreferenceLevel, is_pref_set
from psyneulink.core.globals.utilities import parameter_spec, safe_len
__all__ = ['Angle', 'Exponential', 'Gaussian', 'GaussianDistort', 'Identity', 'Linear', 'LinearMatrix',
'Logistic', 'ReLU', 'SoftMax', 'Tanh', 'TransferFunction', 'TransferWithCosts'
]
class TransferFunction(Function_Base):
"""Function that transforms variable but maintains its shape.
All TransferFunctions MUST have the following attributes:
`bounds` -- specifies the lower and upper limits of the result; if there are none, the attribute is set to
`None`; if it has at least one bound, the attribute is set to a tuple specifying the lower and upper bounds,
respectively, with `None` as the entry for no bound.
`multiplicative_param <Function_Modulatory_Params>` and `additive_param <Function_Modulatory_Params>` -- each
of these is assigned the name of one of the function's parameters and used by `ModulatoryProjections
<ModulatoryProjection>` to modulate the output of the TransferFunction's `function <TransferFunction._function>`
(see `Function_Modulatory_Params`).
"""
componentType = TRANSFER_FUNCTION_TYPE
class Parameters(Function_Base.Parameters):
"""
Attributes
----------
bounds
see `bounds <TransferFunction.bounds>`
:default value: None
:type:
"""
bounds = None
def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset):
# Pretend we have one huge array to work on
# TODO: should this be invoked in parts?
assert isinstance(arg_in.type.pointee, pnlvm.ir.ArrayType)
assert arg_in.type == arg_out.type
is_2d = isinstance(arg_in.type.pointee.element, pnlvm.ir.ArrayType)
assert arg_in.type == arg_out.type
with pnlvm.helpers.array_ptr_loop(builder, arg_in, "transfer_loop") as (b, idx):
if is_2d:
vi = b.gep(arg_in, [ctx.int32_ty(0), idx])
vo = b.gep(arg_out, [ctx.int32_ty(0), idx])
with pnlvm.helpers.array_ptr_loop(b, vi, "nested_transfer_loop") as args:
self._gen_llvm_transfer(ctx=ctx, vi=vi, vo=vo,
params=params, state=state, *args, tags=tags)
else:
self._gen_llvm_transfer(b, idx, ctx=ctx, vi=arg_in, vo=arg_out,
params=params, state=state, tags=tags)
return builder
# **********************************************************************************************************************
# Identity
# **********************************************************************************************************************
class Identity(TransferFunction): # -----------------------------------------------------------------------------------
"""
Identity( \
default_variable, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _Identity:
Returns variable.
Arguments
---------
variable : number or np.array : default class_defaults.variable
specifies a template for the value to be returned.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or np.array
contains value to be returned.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = IDENTITY_FUNCTION
classPreferences = {
PREFERENCE_SET_NAME: 'IdentityClassPreferences',
REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE),
}
@tc.typecheck
def __init__(self,
default_variable=None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(default_variable=default_variable,
params=params,
owner=owner,
prefs=prefs,
)
# self.functionOutputType = None
def _function(
self,
variable=None,
context=None,
params=None,
):
"""
Return: `variable <Identity.variable>`
Arguments
---------
variable : number or np.array : default class_defaults.variable
a single value or array to be returned.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
variable : number or np.array
"""
# outputType = self.functionOutputType
return variable
def _gen_llvm_function_body(self, ctx, builder, _1, _2, arg_in, arg_out, *, tags:frozenset):
val = builder.load(arg_in)
builder.store(val, arg_out)
return builder
# **********************************************************************************************************************
# Linear
# **********************************************************************************************************************
class Linear(TransferFunction): # -------------------------------------------------------------------------------------
"""
Linear( \
default_variable, \
slope=1.0, \
intercept=0.0, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _Linear:
`function <Linear._function>` returns linear transform of `variable <Linear.variable>`:
.. math::
slope * variable + intercept
Note: default values for `slope <Linear.slope>` and `intercept <Linear.intercept>` implement the
*IDENTITY_FUNCTION*.
`derivative <Linear.derivative>` returns `slope <Linear.slope>`.
Arguments
---------
default_variable : number or array : default class_defaults.variable
specifies a template for the value to be transformed.
slope : float : default 1.0
specifies a value by which to multiply `variable <Linear.variable>`.
intercept : float : default 0.0
specifies a value to add to each element of `variable <Linear.variable>` after applying `slope <Linear.slope>`.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
contains value to be transformed.
slope : float
value by which each element of `variable <Linear.variable>` is multiplied before applying the
`intercept <Linear.intercept>` (if it is specified).
intercept : float
value added to each element of `variable <Linear.variable>` after applying the `slope <Linear.slope>`
(if it is specified).
bounds : Tuple or None
determines the lower and upper limits of the result; if at least one bound is specified, the attribute is
a tuple specifying the lower and upper bounds, respectively, with `None` as the entry for no bound.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = LINEAR_FUNCTION
classPreferences = {
PREFERENCE_SET_NAME: 'LinearClassPreferences',
REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE),
}
_model_spec_class_name_is_generic = True
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
intercept
see `intercept <Linear.intercept>`
:default value: 0.0
:type: ``float``
slope
see `slope <Linear.slope>`
:default value: 1.0
:type: ``float``
"""
slope = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
intercept = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
@tc.typecheck
def __init__(self,
default_variable=None,
slope: tc.optional(tc.optional(parameter_spec)) = None,
intercept: tc.optional(tc.optional(parameter_spec)) = None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(
default_variable=default_variable,
slope=slope,
intercept=intercept,
params=params,
owner=owner,
prefs=prefs,
)
def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params, state, *, tags:frozenset):
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
ptro = builder.gep(vo, [ctx.int32_ty(0), index])
slope_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, SLOPE)
intercept_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, INTERCEPT)
slope = pnlvm.helpers.load_extract_scalar_array_one(builder, slope_ptr)
intercept = pnlvm.helpers.load_extract_scalar_array_one(builder, intercept_ptr)
if "derivative" in tags:
# f'(x) = m
val = slope
else:
# f(x) = mx + b
val = builder.load(ptri)
val = builder.fmul(val, slope)
val = builder.fadd(val, intercept)
builder.store(val, ptro)
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : number or array : default class_defaults.variable
a single value or array to be transformed.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
linear transformation of variable : number or array
"""
slope = self._get_current_parameter_value(SLOPE, context)
intercept = self._get_current_parameter_value(INTERCEPT, context)
# MODIFIED 11/9/17 NEW:
try:
# By default, result should be returned as np.ndarray with same dimensionality as input
result = variable * slope + intercept
except TypeError:
if hasattr(variable, "dtype"):
# If variable is an array with mixed sizes or types, try item-by-item operation
if variable.dtype == object:
result = np.zeros_like(variable)
for i, item in enumerate(variable):
result[i] = variable[i] * slope + intercept
else:
raise FunctionError("Unrecognized type for {} of {} ({})".format(VARIABLE, self.name, variable))
# KAM 6/28/18: If the variable does not have a "dtype" attr but made it to this line, then it must be of a
# type that even np does not recognize -- typically a custom OutputPort variable with items of different
# shapes (e.g. variable = [[0.0], [0.0], array([[0.0, 0.0]])] )
elif isinstance(variable, list):
result = []
for variable_item in variable:
result.append(np.multiply(variable_item, slope) + intercept)
else:
raise FunctionError("Unrecognized type for {} of {} ({})".format(VARIABLE, self.name, variable))
return self.convert_output_type(result)
@handle_external_context()
def derivative(self, input=None, output=None, context=None):
"""
derivative(input)
Derivative of `function <Linear._function>` at **input**.
Arguments
---------
input : number
value of the input to the Linear transform at which derivative is to be taken.
Returns
-------
Slope of function : number or array
"""
return self._get_current_parameter_value(SLOPE, context)
def _is_identity(self, context=None, defaults=False):
if defaults:
slope = self.defaults.slope
intercept = self.defaults.intercept
else:
slope = self.parameters.slope._get(context)
intercept = self.parameters.intercept._get(context)
return slope == 1 and intercept == 0
# **********************************************************************************************************************
# Exponential
# **********************************************************************************************************************
class Exponential(TransferFunction): # --------------------------------------------------------------------------------
"""
Exponential( \
default_variable, \
rate=1.0, \
bias=0.0, \
scale=1.0, \
offset=0.0, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _Exponential:
`function <Exponential._function>` returns exponential transform of `variable <Exponential.variable>`:
.. math::
scale * e^{rate*variable+bias} + offset
`derivative <Exponential.derivative>` returns the derivative of the Exponential:
.. math::
rate*input+bias
Arguments
---------
default_variable : number or array : default class_defaults.variable
specifies a template for the value to be transformed.
rate : float : default 1.0
specifies a value by which to multiply `variable <Exponential.variable>` before exponentiation.
bias : float : default 0.0
specifies a value to add to `variable <Exponential.variable>` after multplying by `rate <Exponential.rate>`
and before exponentiation.
scale : float : default 1.0
specifies a value by which to multiply the exponentiated value of `variable <Exponential.variable>`.
offset : float : default 0.0
specifies value to add to the exponentiated value of `variable <Exponential.variable>`
after multiplying by `scale <Exponentinal.scale>`.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
contains value to be transformed.
rate : float
value by which `variable <Exponential.variable>` is multiplied before exponentiation;
assigned as *MULTILICATIVE_PARAM* of the Exponential Function.
bias : float
value added to `variable <Exponential.variable>` after multiplying by `rate <Exponential.rate>`
and before exponentiation; assigned as *ADDITIVE_PARAM* of the Exponential Function.
scale : float
value by which the exponentiated value is multiplied.
offset : float
value added to exponentiated value after multiplying by `scale <Exponentinal.scale>`.
bounds : (0, None)
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = EXPONENTIAL_FUNCTION
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
bias
see `bias <Exponential.bias>`
:default value: 0.0
:type: ``float``
offset
see `offset <Exponential.offset>`
:default value: 0.0
:type: ``float``
rate
see `rate <Exponential.rate>`
:default value: 1.0
:type: ``float``
scale
see `scale <Exponential.scale>`
:default value: 1.0
:type: ``float``
"""
rate = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
bias = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
scale = Parameter(1.0, modulable=True)
offset = Parameter(0.0, modulable=True)
bounds = (0, None)
@tc.typecheck
def __init__(self,
default_variable=None,
rate: tc.optional(parameter_spec) = None,
scale: tc.optional(parameter_spec) = None,
bias: tc.optional(parameter_spec) = None,
offset: tc.optional(parameter_spec) = None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(
default_variable=default_variable,
rate=rate,
bias=bias,
scale=scale,
offset=offset,
params=params,
owner=owner,
prefs=prefs,
)
def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params, state, *, tags:frozenset):
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
ptro = builder.gep(vo, [ctx.int32_ty(0), index])
rate_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, RATE)
bias_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, BIAS)
scale_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, SCALE)
offset_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, OFFSET)
rate = pnlvm.helpers.load_extract_scalar_array_one(builder, rate_ptr)
bias = pnlvm.helpers.load_extract_scalar_array_one(builder, bias_ptr)
scale = pnlvm.helpers.load_extract_scalar_array_one(builder, scale_ptr)
offset = pnlvm.helpers.load_extract_scalar_array_one(builder, offset_ptr)
exp_f = ctx.get_builtin("exp", [ctx.float_ty])
val = builder.load(ptri)
val = builder.fmul(val, rate)
val = builder.fadd(val, bias)
val = builder.call(exp_f, [val])
if "derivative" in tags:
# f'(x) = s*r*e^(r*x + b)
val = builder.fmul(val, scale)
val = builder.fmul(val, rate)
else:
# f(x) = s*e^(r*x + b) + o
val = builder.fmul(val, scale)
val = builder.fadd(val, offset)
builder.store(val, ptro)
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : number or array : default class_defaults.variable
a single value or array to be exponentiated.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
Exponential transformation of variable : number or array
"""
rate = self._get_current_parameter_value(RATE, context)
bias = self._get_current_parameter_value(BIAS, context)
scale = self._get_current_parameter_value(SCALE, context)
offset = self._get_current_parameter_value(OFFSET, context)
# The following doesn't work with autograd (https://github.com/HIPS/autograd/issues/416)
# result = scale * np.exp(rate * variable + bias) + offset
result = scale * e**(rate * variable + bias) + offset
return self.convert_output_type(result)
@handle_external_context()
def derivative(self, input, output=None, context=None):
"""
derivative(input)
Arguments
---------
input : number
value of the input to the Exponential transform at which derivative is to be taken.
Derivative of `function <Exponential._function>` at **input**.
Returns
-------
derivative : number or array
"""
rate = self._get_current_parameter_value(RATE, context)
scale = self._get_current_parameter_value(SCALE, context)
bias = self._get_current_parameter_value(BIAS, context)
return rate * scale * e**(rate * input + bias)
# **********************************************************************************************************************
# Logistic
# **********************************************************************************************************************
class Logistic(TransferFunction): # ------------------------------------------------------------------------------------
"""
Logistic( \
default_variable, \
gain=1.0, \
bias=0.0, \
x_0=0.0, \
offset=0.0, \
scale=1.0, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _Logistic_Function:
`function <Logistic._function>` returns logistic transform of `variable <Logistic.variable>`:
.. math::
\\frac{1}{1 + e^{ - gain ( variable + bias - x_{0}) + offset}}
(this is an offset and scaled version of the `Tanh`, which is centered on origin).
.. note::
The **bias** and **x_0** arguments are identical, apart from opposite signs: **bias** is included to
accomodate the convention in the machine learning community; **x_0** is included to match the `standard
form of the Logistic Function <https://en.wikipedia.org/wiki/Logistic_function>`_ (in which **gain**
corresponds to the *k* parameter and **scale** corresponds to the *L* parameter).
`derivative <Logistic.derivative>` returns the derivative of the Logistic using its **output**:
.. math::
gain * scale * output * (1-output)
Arguments
---------
default_variable : number or array : default class_defaults.variable
specifies a template for the value to be transformed.
gain : float : default 1.0
specifies value by which to multiply `variable <Logistic.variable>` before logistic transformation
bias : float : default 0.0
specifies value to add to each element of `variable <Logistic.variable>` before applying `gain <Logistic.gain>`
and before logistic transformation. This argument is identical to x_0, with the opposite sign.
x_0 : float : default 0.0
specifies value to subtract from each element of `variable <Logistic.variable>` before applying `gain <Logistic.gain>`
and before logistic transformation. This argument is identical to bias, with the opposite sign.
offset : float : default 0.0
specifies value to add to each element of `variable <Logistic.variable>` after applying `gain <Logistic.gain>`
but before logistic transformation.
scale : float : default 0.0
specifies value by which each element is multiplied after applying the logistic transformation.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
contains value to be transformed.
gain : float : default 1.0
value by which each element of `variable <Logistic.variable>` is multiplied before applying the
`bias <Logistic.bias>` (if it is specified).
bias : float : default 0.0
value added to each element of `variable <Logistic.variable>` before applying the `gain <Logistic.gain>`
(if it is specified). This attribute is identical to x_0, with the opposite sign.
x_0 : float : default 0.0
value subtracted from each element of `variable <Logistic.variable>` before applying the `gain <Logistic.gain>`
(if it is specified). This attribute is identical to bias, with the opposite sign.
offset : float : default 0.0
value to added to each element of `variable <Logistic.variable>` after applying `gain <Logistic.gain>`
but before logistic transformation.
scale : float : default 0.0
value by which each element is multiplied after applying the Logistic transform.
bounds : (0,1)
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = LOGISTIC_FUNCTION
parameter_keywords.update({GAIN, BIAS, OFFSET})
_model_spec_class_name_is_generic = True
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
bias
see `bias <Logistic.bias>`
:default value: 0.0
:type: ``float``
gain
see `gain <Logistic.gain>`
:default value: 1.0
:type: ``float``
offset
see `offset <Logistic.offset>`
:default value: 0.0
:type: ``float``
scale
see `scale <Logistic.scale>`
:default value: 1.0
:type: ``float``
x_0
see `x_0 <Logistic.x_0>`
:default value: 0.0
:type: ``float``
"""
gain = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
x_0 = Parameter(0.0, modulable=True)
bias = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
offset = Parameter(0.0, modulable=True)
scale = Parameter(1.0, modulable=True)
bounds = (0, 1)
@tc.typecheck
def __init__(self,
default_variable=None,
gain: tc.optional(parameter_spec) = None,
x_0=None,
bias=None,
offset: tc.optional(parameter_spec) = None,
scale: tc.optional(parameter_spec) = None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(
default_variable=default_variable,
gain=gain,
x_0=x_0,
bias=bias,
offset=offset,
scale=scale,
params=params,
owner=owner,
prefs=prefs,
)
def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params, state, *, tags:frozenset):
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
ptro = builder.gep(vo, [ctx.int32_ty(0), index])
gain_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, GAIN)
bias_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, BIAS)
x_0_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, X_0)
scale_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, SCALE)
offset_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, OFFSET)
gain = pnlvm.helpers.load_extract_scalar_array_one(builder, gain_ptr)
bias = pnlvm.helpers.load_extract_scalar_array_one(builder, bias_ptr)
x_0 = pnlvm.helpers.load_extract_scalar_array_one(builder, x_0_ptr)
offset = pnlvm.helpers.load_extract_scalar_array_one(builder, offset_ptr)
scale = pnlvm.helpers.load_extract_scalar_array_one(builder, scale_ptr)
exp_f = ctx.get_builtin("exp", [ctx.float_ty])
val = builder.load(ptri)
val = builder.fadd(val, bias)
val = builder.fsub(val, x_0)
val = builder.fmul(val, gain)
val = builder.fsub(offset, val)
val = builder.call(exp_f, [val])
val = builder.fadd(ctx.float_ty(1), val)
val = builder.fdiv(ctx.float_ty(1), val)
val = builder.fmul(val, scale)
if "derivative" in tags:
# f(x) = g * s * o * (1-o)
function_val = val
val = builder.fsub(ctx.float_ty(1), function_val)
val = builder.fmul(function_val, val)
val = builder.fmul(gain, val)
val = builder.fmul(scale, val)
builder.store(val, ptro)
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : number or array : default class_defaults.variable
a single value or array to be transformed.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
Logistic transformation of variable : number or array
"""
gain = self._get_current_parameter_value(GAIN, context)
bias = self._get_current_parameter_value(BIAS, context)
x_0 = self._get_current_parameter_value(X_0, context)
offset = self._get_current_parameter_value(OFFSET, context)
scale = self._get_current_parameter_value(SCALE, context)
# The following doesn't work with autograd (https://github.com/HIPS/autograd/issues/416)
# result = 1. / (1 + np.exp(-gain * (variable - bias) + offset))
result = scale * (1. / (1 + e**(-gain * (variable + bias - x_0) + offset)))
return self.convert_output_type(result)
@handle_external_context()
def derivative(self, input=None, output=None, context=None):
"""
derivative(input=None, output=None)
Derivative of `function <Exponential._function>` at either **input** or **output**.
Either **input** or **ouput** must be specified. If **output** is not specified, it is computed from **input**.
If both are specified, **input** is ignored unless paramValidationPref is set, in which case
an error is generated if **output** does not correspond to `function <Logistic._function>`\\(**input**).
Arguments
---------
input : number
value of the input to the Logistic transform at which derivative is to be taken.
output : number
value of the output of the Logistic transform at which derivative is to be taken.
Returns
-------
Deriviative of logistic transform at output: number or array
"""
if output is not None and input is not None and self.prefs.paramValidationPref:
if isinstance(input, numbers.Number):
valid = output == self.function(input, context=context)
else:
valid = all(output[i] == self.function(input, context=context)[i] for i in range(len(input)))
if not valid:
raise FunctionError("Value of {} arg passed to {} ({}) "
"does not match the value expected for specified {} ({})".
format(repr('output'), self.__class__.__name__ + '.' + 'derivative', output,
repr('input'), input))
gain = self._get_current_parameter_value(GAIN, context)
scale = self._get_current_parameter_value(SCALE, context)
if output is None:
output = self.function(input, context=context)
return gain * scale * output * (1 - output)
# **********************************************************************************************************************
# Tanh
# **********************************************************************************************************************
class Tanh(TransferFunction): # ------------------------------------------------------------------------------------
"""
Tanh( \
default_variable, \
gain=1.0, \
bias=0.0, \
x_0=0.0, \
offset=0.0, \
scale=1.0, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _Tanh_Function:
`function <Logistic._function>` returns hyperbolic tangent of `variable <Logistic.variable>`:
.. math::
\\scale*frac{1 - e^{-2(gain*(variable+bias-x\\_0)+offset)}}{1 + e^{-2(gain*(variable+bias-x\\_0)+offset)}}
.. note::
The `Logistic` function is an offset and scaled version of this function.
The parameters used here have the same meaning as those used for the `Logistic` Function.
`derivative <Tanh.derivative>` returns the derivative of the hyperbolic tangent at its **input**:
.. math::
\\frac{gain*scale}{(\\frac{1+e^{-2(gain*(variable+bias-x\\_0)+offset)}}{2e^{-(gain*(
variable+bias-x\\_0)+offset)}})^2}
Arguments
---------
default_variable : number or array : default class_defaults.variable
specifies template for the value to be transformed.
gain : float : default 1.0
specifies value by which to multiply `variable <Tanh.variable>` before logistic transformation
bias : float : default 0.0
specifies value to add to each element of `variable <Tanh.variable>` before applying `gain <Tanh.gain>`
and before logistic transformation. This argument is identical to x_0, with the opposite sign.
x_0 : float : default 0.0
specifies value to subtract from each element of `variable <Tanh.variable>` before applying `gain <Tanh.gain>`
and before logistic transformation. This argument is identical to bias, with the opposite sign.
offset : float : default 0.0
specifies value to add to each element of `variable <Tanh.variable>` after applying `gain <Tanh.gain>`
but before logistic transformation.
scale : float : default 1.0
specifies value by which to multiply each element after applying Tanh transform.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
contains value to be transformed.
gain : float : default 1.0
value by which each element of `variable <Tanh.variable>` is multiplied before applying the
`bias <Tanh.bias>` (if it is specified).
bias : float : default 0.0
value added to each element of `variable <Tanh.variable>` before applying the `gain <Tanh.gain>`
(if it is specified). This attribute is identical to x_0, with the opposite sign.
x_0 : float : default 0.0
value subtracted from each element of `variable <Tanh.variable>` before applying the `gain <Tanh.gain>`
(if it is specified). This attribute is identical to bias, with the opposite sign.
offset : float : default 0.0
value to added to each element of `variable <Tanh.variable>` after applying `gain <Tanh.gain>`
but before logistic transformation.
scale : float : default 1.0
value by which element is multiplied after applying Tanh transform.
bounds : (0,1)
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = TANH_FUNCTION
parameter_keywords.update({GAIN, BIAS, OFFSET})
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
bias
see `bias <Tanh.bias>`
:default value: 0.0
:type: ``float``
gain
see `gain <Tanh.gain>`
:default value: 1.0
:type: ``float``
offset
see `offset <Tanh.offset>`
:default value: 0.0
:type: ``float``
scale
see `scale <Tanh.scale>`
:default value: 1.0
:type: ``float``
x_0
see `x_0 <Tanh.x_0>`
:default value: 0.0
:type: ``float``
"""
gain = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
x_0 = Parameter(0.0, modulable=True)
bias = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
offset = Parameter(0.0, modulable=True)
scale = Parameter(1.0, modulable=True)
bounds = (0, 1)
@tc.typecheck
def __init__(self,
default_variable=None,
gain: tc.optional(parameter_spec) = None,
x_0=None,
bias=None,
offset: tc.optional(parameter_spec) = None,
scale: tc.optional(parameter_spec) = None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(
default_variable=default_variable,
gain=gain,
x_0=x_0,
bias=bias,
offset=offset,
scale=scale,
params=params,
owner=owner,
prefs=prefs,
)
def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params, state, *, tags:frozenset):
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
ptro = builder.gep(vo, [ctx.int32_ty(0), index])
gain_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, GAIN)
bias_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, BIAS)
x_0_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, X_0)
offset_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, OFFSET)
scale_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, SCALE)
gain = pnlvm.helpers.load_extract_scalar_array_one(builder, gain_ptr)
bias = pnlvm.helpers.load_extract_scalar_array_one(builder, bias_ptr)
x_0 = pnlvm.helpers.load_extract_scalar_array_one(builder, x_0_ptr)
offset = pnlvm.helpers.load_extract_scalar_array_one(builder, offset_ptr)
scale = pnlvm.helpers.load_extract_scalar_array_one(builder, scale_ptr)
variable = builder.load(ptri)
exp_f = ctx.get_builtin("exp", [ctx.float_ty])
if "derivative" in tags:
exponent = builder.fadd(variable, bias)
exponent = builder.fsub(exponent, x_0)
exponent = builder.fmul(gain, exponent)
exponent = builder.fadd(exponent, offset)
exponent = builder.fmul(exponent.type(-2), exponent)
mult = builder.fmul(gain, scale)
mult = builder.fmul(mult.type(-2), mult)
exp_val = builder.call(exp_f, [exponent])
numerator = builder.fmul(exp_val.type(-2), exp_val)
denominator = builder.fadd(exp_val.type(1), exp_val)
denominator = builder.fmul(denominator, denominator)
val = builder.fdiv(numerator, denominator)
val = builder.fmul(val, mult)
else:
exp_val = builder.fadd(variable, bias)
exp_val = builder.fsub(exp_val, x_0)
exp_val = builder.fmul(exp_val, gain)
exp_val = builder.fadd(exp_val, offset)
exp_val = builder.fmul(exp_val.type(-2), exp_val)
val = builder.call(exp_f, [exp_val])
val1 = builder.fsub(val.type(1), val)
val2 = builder.fadd(val.type(1), val)
val = builder.fdiv(val1, val2)
val = builder.fmul(val, scale)
builder.store(val, ptro)
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : number or array : default class_defaults.variable
a single value or array to be transformed.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
hyperbolic tangent of variable : number or array
"""
gain = self._get_current_parameter_value(GAIN, context)
bias = self._get_current_parameter_value(BIAS, context)
x_0 = self._get_current_parameter_value(X_0, context)
offset = self._get_current_parameter_value(OFFSET, context)
scale = self._get_current_parameter_value(SCALE, context)
# The following probably doesn't work with autograd (https://github.com/HIPS/autograd/issues/416)
# (since np.exp doesn't work)
# result = 1. / (1 + np.tanh(-gain * (variable - bias) + offset))
exponent = -2 * (gain * (variable + bias - x_0) + offset)
result = scale * (1 - e**exponent)/ (1 + e**exponent)
return self.convert_output_type(result)
@handle_external_context()
def derivative(self, input, output=None, context=None):
"""
derivative(input)
Derivative of `function <Tanh._function>` at **input**.
Arguments
---------
input : number
value of the input to the Tanh transform at which derivative is to be taken.
Returns
-------
derivative : number or array
"""
gain = self._get_current_parameter_value(GAIN, context)
bias = self._get_current_parameter_value(BIAS, context)
x_0 = self._get_current_parameter_value(X_0, context)
offset = self._get_current_parameter_value(OFFSET, context)
scale = self._get_current_parameter_value(SCALE, context)
exponent = -2 * (gain * (input + bias - x_0) + offset)
mult = -2 * gain * scale
numerator = -2 * e**(exponent)
denominator = (1 + e**(exponent))**2
return mult * (numerator / denominator)
# **********************************************************************************************************************
# ReLU
# **********************************************************************************************************************
class ReLU(TransferFunction): # ------------------------------------------------------------------------------------
"""
ReLU( \
default_variable, \
gain=1.0, \
bias=0.0, \
leak=0.0, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _RelU_Function:
`function <ReLU._function>` returns rectified linear tranform of `variable <ReLU.variable>`:
.. math::
x = gain*(variable - bias)
.. math::
max(x, leak * x)
Commonly used by `ReLU <https://en.wikipedia.org/wiki/Rectifier_(neural_networks>`_ units in neural networks.
`derivative <ReLU.derivative>` returns the derivative of of the rectified linear tranform at its **input**:
.. math::
gain\\ if\\ input > 0,\\ gain*leak\\ otherwise
Arguments
---------
default_variable : number or array : default class_defaults.variable
specifies a template for the value to be transformed.
gain : float : default 1.0
specifies a value by which to multiply `variable <ReLU.variable>` after `bias <ReLU.bias>` is subtracted
from it.
bias : float : default 0.0
specifies a value to subtract from each element of `variable <ReLU.variable>`.
leak : float : default 0.0
specifies a scaling factor between 0 and 1 when (variable - bias) is less than or equal to 0.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
contains value to be transformed.
gain : float : default 1.0
value by which to multiply `variable <ReLU.variable>` after `bias <ReLU.bias>` is subtracted
from it.
bias : float : default 0.0
value to subtract from each element of `variable <ReLU.variable>`.
leak : float : default 0.0
scaling factor between 0 and 1 when (variable - bias) is less than or equal to 0.
bounds : (None,None)
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = RELU_FUNCTION
parameter_keywords.update({GAIN, BIAS, LEAK})
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
bias
see `bias <ReLU.bias>`
:default value: 0.0
:type: ``float``
gain
see `gain <ReLU.gain>`
:default value: 1.0
:type: ``float``
leak
see `leak <ReLU.leak>`
:default value: 0.0
:type: ``float``
"""
gain = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
bias = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
leak = Parameter(0.0, modulable=True)
bounds = (None, None)
@tc.typecheck
def __init__(self,
default_variable=None,
gain: tc.optional(parameter_spec) = None,
bias: tc.optional(parameter_spec) = None,
leak: tc.optional(parameter_spec) = None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(
default_variable=default_variable,
gain=gain,
bias=bias,
leak=leak,
params=params,
owner=owner,
prefs=prefs,
)
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : number or array : default class_defaults.variable
a single value or array to be transformed.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
ReLU transformation of variable : number or array
"""
gain = self._get_current_parameter_value(GAIN, context)
bias = self._get_current_parameter_value(BIAS, context)
leak = self._get_current_parameter_value(LEAK, context)
# KAM modified 2/15/19 to match https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Leaky_ReLUs
x = gain * (variable - bias)
result = np.maximum(x, leak * x)
return self.convert_output_type(result)
def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params, state, *, tags:frozenset):
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
ptro = builder.gep(vo, [ctx.int32_ty(0), index])
gain_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, GAIN)
bias_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, BIAS)
leak_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, LEAK)
gain = pnlvm.helpers.load_extract_scalar_array_one(builder, gain_ptr)
bias = pnlvm.helpers.load_extract_scalar_array_one(builder, bias_ptr)
leak = pnlvm.helpers.load_extract_scalar_array_one(builder, leak_ptr)
# Maxnum for some reason needs full function prototype
max_f = ctx.get_builtin("maxnum", [ctx.float_ty])
var = builder.load(ptri)
if "derivative" in tags:
predicate = builder.fcmp_ordered('>', var, var.type(0))
val = builder.select(predicate, gain, builder.fmul(gain, leak))
else:
val = builder.fsub(var, bias)
val1 = builder.fmul(val, gain)
val2 = builder.fmul(val1, leak)
val = builder.call(max_f, [val1, val2])
builder.store(val, ptro)
@handle_external_context()
def derivative(self, input, output=None, context=None):
"""
derivative(input)
Derivative of `function <ReLU._function>` at **input**.
Arguments
---------
input : number
value of the input to the ReLU transform at which derivative is to be taken.
Returns
-------
derivative : number or array
"""
gain = self._get_current_parameter_value(GAIN, context)
leak = self._get_current_parameter_value(LEAK, context)
input = np.asarray(input).copy()
input[input>0] = gain
input[input<=0] = gain * leak
return input
# **********************************************************************************************************************
# Angle
# **********************************************************************************************************************
# FIX: VALIDATE LEN(VARIABLE)>=2
class Angle(TransferFunction): # -------------------------------------------------------------------------------------
"""
Angle( \
default_variable, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _Angle_Function:
`function <angle._function>` returns Angle transform of vector in `variable <Angle.variable>`:
COMMENT:
FIX: WITH PROPER MATHEMATICAL DEFN
.. math::
slope * variable + intercept
`derivative <Angle.derivative>` returns `slope <Angle.slope>`.
COMMENT
Arguments
---------
default_variable : 1array : default class_defaults.variable
specifies a template for the value to be transformed; length must be at least 2.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : 1d array
contains value to be transformed.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = ANGLE_FUNCTION
classPreferences = {
PREFERENCE_SET_NAME: 'AngleClassPreferences',
REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE),
}
_model_spec_class_name_is_generic = True
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
variable
see `variable <Angle.variable>`
:default value: numpy.array([0.,0,])
:type: ``numpy.ndarray``
:read only: True
"""
variable = Parameter(np.array([1,1]),
read_only=True,
pnl_internal=True,
constructor_argument='default_variable')
def _validate_variable(self, variable):
variable = np.squeeze(variable)
if variable.ndim != 1 or len(variable) < 2:
return f"must be list or 1d array of length 2 or greater."
@tc.typecheck
def __init__(self,
default_variable=None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(
default_variable=default_variable,
params=params,
owner=owner,
prefs=prefs,
)
# def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params, state, *, tags:frozenset):
# ptri = builder.gep(vi, [ctx.int32_ty(0), index])
# ptro = builder.gep(vo, [ctx.int32_ty(0), index])
# slope_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, SLOPE)
# intercept_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, INTERCEPT)
#
# slope = pnlvm.helpers.load_extract_scalar_array_one(builder, slope_ptr)
# intercept = pnlvm.helpers.load_extract_scalar_array_one(builder, intercept_ptr)
#
#
# if "derivative" in tags:
# # f'(x) = m
# val = slope
# else:
# # f(x) = mx + b
# val = builder.load(ptri)
# val = builder.fmul(val, slope)
# val = builder.fadd(val, intercept)
#
# builder.store(val, ptro)
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : ndarray : default class_defaults.variable
an array of coordinates on a sphere to be transformed to n+1d angular coordinates; must be at least 2d.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
Angle transformation of variable : ndarray of variable.ndim+1
"""
try:
# By default, result should be returned as np.ndarray with same dimensionality as input
result = self._angle(variable)
except TypeError:
if hasattr(variable, "dtype"):
# If variable is an array with mixed sizes or types, try item-by-item operation
if variable.dtype == object:
result = np.zeros_like(variable)
for i, item in enumerate(variable):
result[i] = self._angle(variable[i])
else:
raise FunctionError("Unrecognized type for {} of {} ({})".format(VARIABLE, self.name, variable))
# KAM 6/28/18: If the variable does not have a "dtype" attr but made it to this line, then it must be of a
# type that even np does not recognize -- typically a custom OutputPort variable with items of different
# shapes (e.g. variable = [[0.0], [0.0], array([[0.0, 0.0]])] )
elif isinstance(variable, list):
result = []
for variable_item in variable:
result.append(self._angle(variable_item))
else:
raise FunctionError("Unrecognized type for {} of {} ({})".format(VARIABLE, self.name, variable))
return self.convert_output_type(result)
def _angle(self, value):
"""Take nd value and return n+1d coordinates for angle on a sphere"""
value = np.squeeze(value)
dim = len(value) + 1
angle = np.zeros(dim)
angle[0] = np.cos(value[0])
prod = np.product([np.sin(value[k]) for k in range(1, dim - 1)])
n_prod = prod
for j in range(dim - 2):
n_prod /= np.sin(value[j + 1])
amt = n_prod * np.cos(value[j + 1])
angle[j + 1] = amt
angle[dim - 1] = prod
return angle
# @handle_external_context()
# def derivative(self, input=None, output=None, context=None):
# """
# derivative(input)
#
# Derivative of `function <Angle._function>` at **input**.
#
# Arguments
# ---------
#
# input : number
# value of the input to the Angle transform at which derivative is to be taken.
#
# Returns
# -------
#
# Slope of function : number or array
#
# """
#
# return self._get_current_parameter_value(SLOPE, context)
#
# def _is_identity(self, context=None):
# return (
# self.parameters.slope._get(context) == 1
# and self.parameters.intercept._get(context) == 0
# )
# **********************************************************************************************************************
# Gaussian
# **********************************************************************************************************************
class Gaussian(TransferFunction): # -----------------------------------------------------------------------------------
"""
Gaussian( \
default_variable, \
standard_deviation=1.0, \
bias=0.0, \
scale=1.0, \
offset=0.0, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _Gaussian_Function:
`function <Gaussian._function>` returns Gaussian transform of `variable <Gaussian.variable>`:
.. math::
scale*\\frac{e^{-\\frac{(varible-bias)^{2}}{2\\sigma^{2}}}}{\\sqrt{2\\pi}\\sigma}+offset
where :math:`\\sigma` = `standard_deviation <Gaussian.standard_deviation>`
.. note::
the value returned is deterministic (i.e., the value of the probability density function at variable),
not a randomly chosen sample from the Gaussian distribution; for the latter, use `GaussianDistort`.
`derivative <Gaussian.derivative>` returns derivative of the Gaussian transform of `variable <Gaussian.variable>`:
.. math::
\\frac{-(variable-bias)*e^{-\\frac{(variable-bias)^{2}}{2\\sigma^{2}}}}{\\sqrt{2\\pi}\\sigma^{3}}
Arguments
---------
default_variable : number or array : default class_defaults.variable
specifies a template for the value used as the mean for the Guassian transform.
standard_deviation : float : default 1.0
specifies "width" of the Gaussian transform applied to each element of `variable <Gaussian.variable>`.
bias : float : default 0.0
value to add to each element of `variable <Gaussian.variable>` before applying Gaussian transform.
offset : float : default 0.0
value to add to each element after applying Gaussian transform and `scale <Gaussian.scale>`.
scale : float : default 1.0
value by which to multiply each element after applying Gaussian transform.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
value used as the mean of the Gaussian transform.
standard_deviation : float : default 1.0
standard_deviation used for Gaussian transform.
bias : float : default 0.0
value added to each element of `variable <Gaussian.variable>` before applying the Gaussian transform.
scale : float : default 0.0
value by which each element is multiplied after applying the Gaussian transform.
offset : float : default 0.0
value added to each element after applying the Gaussian transform and scale.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = GAUSSIAN_FUNCTION
# parameter_keywords.update({STANDARD_DEVIATION, BIAS, SCALE, OFFSET})
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
bias
see `bias <Gaussian.bias>`
:default value: 0.0
:type: ``float``
offset
see `offset <Gaussian.offset>`
:default value: 0.0
:type: ``float``
scale
see `scale <Gaussian.scale>`
:default value: 1.0
:type: ``float``
standard_deviation
see `standard_deviation <Gaussian.standard_deviation>`
:default value: 1.0
:type: ``float``
"""
standard_deviation = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
bias = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
scale = Parameter(1.0, modulable=True)
offset = Parameter(0.0, modulable=True)
bounds = (None, None)
@tc.typecheck
def __init__(self,
default_variable=None,
standard_deviation: tc.optional(parameter_spec) = None,
bias: tc.optional(parameter_spec) = None,
scale: tc.optional(parameter_spec) = None,
offset: tc.optional(parameter_spec) = None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(
default_variable=default_variable,
standard_deviation=standard_deviation,
bias=bias,
scale=scale,
offset=offset,
params=params,
owner=owner,
prefs=prefs,
)
def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params, state, *, tags:frozenset):
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
ptro = builder.gep(vo, [ctx.int32_ty(0), index])
standard_deviation_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, STANDARD_DEVIATION)
bias_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, BIAS)
scale_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, SCALE)
offset_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, OFFSET)
standard_deviation = pnlvm.helpers.load_extract_scalar_array_one(builder, standard_deviation_ptr)
bias = pnlvm.helpers.load_extract_scalar_array_one(builder, bias_ptr)
scale = pnlvm.helpers.load_extract_scalar_array_one(builder, scale_ptr)
offset = pnlvm.helpers.load_extract_scalar_array_one(builder, offset_ptr)
exp_f = ctx.get_builtin("exp", [ctx.float_ty])
sqrt_f = ctx.get_builtin("sqrt", [ctx.float_ty])
var = builder.load(ptri)
exp_num = builder.fsub(var, bias)
exp_num = builder.fmul(exp_num, exp_num)
exp_num = pnlvm.helpers.fneg(builder, exp_num)
exp_denom = builder.fmul(standard_deviation, standard_deviation)
exp_denom = builder.fmul(exp_denom.type(2), exp_denom)
exp = builder.fdiv(exp_num, exp_denom)
numerator = builder.call(exp_f, [exp])
denom = builder.fmul(standard_deviation.type(2 * pi), standard_deviation)
denom = builder.call(sqrt_f, [denom])
val = builder.fdiv(numerator, denom)
val = builder.fmul(scale, val)
val = builder.fadd(offset, val)
builder.store(val, ptro)
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : number or array : default class_defaults.variable
a single value or array to be transformed.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
Gaussian transformation of variable : number or array
"""
standard_deviation = self._get_current_parameter_value(STANDARD_DEVIATION, context)
bias = self._get_current_parameter_value(BIAS, context)
scale = self._get_current_parameter_value(SCALE, context)
offset = self._get_current_parameter_value(OFFSET, context)
gaussian = e**(-(variable - bias)**2 / (2 * standard_deviation**2)) / sqrt(2 * pi * standard_deviation)
result = scale * gaussian + offset
return self.convert_output_type(result)
@handle_external_context()
def derivative(self, input, output=None, context=None):
"""
derivative(input)
Derivative of `function <Gaussian._function>` at **input**.
Arguments
---------
input : number
value of the input of the Gaussian transform at which derivative is to be taken.
Returns
-------
Derivative of Guassian of variable : number or array
"""
sigma = self._get_current_parameter_value(STANDARD_DEVIATION, context)
bias = self._get_current_parameter_value(BIAS, context)
adjusted_input = input - bias
result = (-adjusted_input * e**(-(adjusted_input**2 / (2 * sigma**2)))) / sqrt(2 * pi * sigma**3)
return self.convert_output_type(result)
# **********************************************************************************************************************
# GaussianDistort
# **********************************************************************************************************************
class GaussianDistort(TransferFunction): #-----------------------------------------------------------------------------
"""
GaussianDistort( \
default_variable, \
variance=1.0, \
bias=0.0, \
scale=1.0, \
offset=0.0, \
seed=None, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _GaussianDistort_Function:
`function <GaussianDistort._function>` returns random value from a Gaussian distribution with
mean = `variable <GaussianDistort.variable>` and variance = `variance <GaussianDistort.variance>`
.. note::
if the Gaussian transform of `variable <GaussianDistort.variable>` is desired (i.e., the value of the
probability density function at `variable <GaussianDistort.variable>`, not a randomly chosen sample from the
Gaussian distribution, then use `Gaussian`.
COMMENT:
`derivative <Gaussian.derivative>` returns derivative of the Gaussian transform of `variable <Logistic.variable>`:
.. math::
\\frac{-(variable-bias)*e^{-\\frac{(variable-bias)^{2}}{2\\sigma^{2}}}}{\\sqrt{2\\pi}\\sigma^{3}}
COMMENT
Arguments
---------
default_variable : number or array : default class_defaults.variable
specifies a template for the value(s) used as the mean of the Guassian distribution from which each sample is
drawn.
variance : float : default 1.0
specifies "width" of the Gaussian distribution around each element of `variable <GaussianDistort.variable>`
from which sample is drawn.
bias : float : default 0.0
specifies value to add to each element of `variable <GaussianDistort.variable>` before drawing sample.
scale : float : default 1.0
specifies value by which to multiply each sample.
offset : float : default 0.0
specifies value to add to each sample after it is drawn and `scale <GaussianDistort.scale>` is applied
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
each element determines mean of the Gaussian distribution from which each sample is drawn.
variance : float
determines variance of Gaussian distribution from which each sample is drawn.
bias : float
determines value added to each element of `variable <GaussianDistort.variable>` before drawing sample.
scale : float
determines value by which each sample is multiplied after it is drawn.
offset : float
determines value added to each sample after it is drawn and `scale <GaussianDistort.scale>` is applied
random_state : numpy.RandomState
private pseudorandom number generator
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = GAUSSIAN_DISTORT_FUNCTION
# parameter_keywords.update({VARIANCE, BIAS, SCALE, OFFSET})
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
bias
see `bias <GaussianDistort.bias>`
:default value: 0.0
:type: ``float``
offset
see `offset <GaussianDistort.offset>`
:default value: 0.0
:type: ``float``
random_state
see `random_state <GaussianDistort.random_state>`
:default value: None
:type: ``numpy.random.RandomState``
scale
see `scale <GaussianDistort.scale>`
:default value: 1.0
:type: ``float``
variance
see `variance <GaussianDistort.variance>`
:default value: 1.0
:type: ``float``
"""
variance = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
bias = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM])
scale = Parameter(1.0, modulable=True)
offset = Parameter(0.0, modulable=True)
random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed')
seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter)
bounds = (None, None)
@tc.typecheck
def __init__(self,
default_variable=None,
variance: tc.optional(parameter_spec) = None,
bias: tc.optional(parameter_spec) = None,
scale: tc.optional(parameter_spec) = None,
offset: tc.optional(parameter_spec) = None,
seed=None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(
default_variable=default_variable,
variance=variance,
bias=bias,
scale=scale,
offset=offset,
seed=seed,
params=params,
owner=owner,
prefs=prefs,
)
def _gen_llvm_transfer(self, builder, index, ctx, vi, vo, params, state, *, tags:frozenset):
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
ptro = builder.gep(vo, [ctx.int32_ty(0), index])
variance_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, VARIANCE)
bias_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, BIAS)
scale_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, SCALE)
offset_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, OFFSET)
variance = pnlvm.helpers.load_extract_scalar_array_one(builder, variance_ptr)
bias = pnlvm.helpers.load_extract_scalar_array_one(builder, bias_ptr)
scale = pnlvm.helpers.load_extract_scalar_array_one(builder, scale_ptr)
offset = pnlvm.helpers.load_extract_scalar_array_one(builder, offset_ptr)
rvalp = builder.alloca(ptri.type.pointee)
rand_state_ptr = ctx.get_random_state_ptr(builder, self, state, params)
normal_f = ctx.get_normal_dist_function_by_state(rand_state_ptr)
builder.call(normal_f, [rand_state_ptr, rvalp])
rval = builder.load(rvalp)
rval = builder.fmul(rval, variance)
val = builder.load(ptri)
val = builder.fadd(val, bias)
val = builder.fadd(rval, val)
val = builder.fmul(val, scale)
val = builder.fadd(offset, val)
builder.store(val, ptro)
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : number or array : default class_defaults.variable
a single value or array to be transformed.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
Sample from Gaussian distribution for each element of variable : number or array
"""
variance = self._get_current_parameter_value(VARIANCE, context)
bias = self._get_current_parameter_value(BIAS, context)
scale = self._get_current_parameter_value(SCALE, context)
offset = self._get_current_parameter_value(OFFSET, context)
random_state = self._get_current_parameter_value('random_state', context)
# The following doesn't work with autograd (https://github.com/HIPS/autograd/issues/416)
result = scale * random_state.normal(variable + bias, variance) + offset
return self.convert_output_type(result)
# def derivative(self, output, input=None, context=None):
# """
# derivative(output, input):
#
# Derivative of `function <Logistic.function>`:
#
# -input/:math:`{variance^3}*\\sqrt{2\\pi}`
#
#
# Returns
# -------
#
# Derivative of Guassian of variable : number or array
#
# """
# variance = self._get_current_parameter_value(VARIANCE, context)
# bias = self._get_current_parameter_value(BIAS, context)
# scale = self._get_current_parameter_value(SCALE, context)
# offset = self._get_current_parameter_value(OFFSET, context)
#
# # The following doesn't work with autograd (https://github.com/HIPS/autograd/issues/416)
# f = scale * np.random.normal(input+bias, variance) + offset
#
# # FIX: SHOULD THIS BE variance**1.5 (since variance = sd**2 and term below is supposed to be sd**3)??
# df = -input(variance**3 * np.sqrt(2 * np.pi))
#
# return self.convert_output_type(df*f)
# **********************************************************************************************************************
# SoftMax
# **********************************************************************************************************************
class SoftMax(TransferFunction):
"""
SoftMax( \
default_variable, \
gain=1.0, \
output=ALL, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _SoftMax:
SoftMax transform of `variable <Softmax.variable>`
`function <SoftMax._function>` returns SoftMax transform of `variable <Softmax.variable>`:
.. math::
\\frac{e^{gain * variable_i}}{\\sum\\limits^{len(variable)}e^{gain * variable}}
filtered by `ouptput <SoftMax.output>` specification (see `The Softmax function and its derivative
<http://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/>`_ for a nice discussion).
`derivative <SoftMax.derivative>` returns the derivative of the SoftMax. If *OUTPUT_TYPE* for the SoftMax
is *ALL*, returns Jacobian matrix (derivative for each element of the output array with respect to each of the
others):
.. math::
D_jS_i = S_i(\\delta_{i,j} - S_j),\\ where\\ \\delta_{i,j}=1\\ if\\ i=j\\ and\\ \\delta_{i,j}=0\\ if\\ i≠j.
If *OUTPUT_TYPE* is *MAX_VAL* or *MAX_INDICATOR*, returns 1d array of the derivatives of the maximum
value with respect to the others (calculated as above). If *OUTPUT_TYPE* is *PROB*, raises an exception
(since it is ambiguous as to which element would have been chosen by the SoftMax function)
Arguments
---------
default_variable : 1d array : default class_defaults.variable
specifies a template for the value to be transformed.
gain : float : default 1.0
specifies a value by which to multiply `variable <Linear.variable>` before SoftMax transformation.
output : ALL, MAX_VAL, MAX_INDICATOR, or PROB : default ALL
specifies the format of array returned by `function <SoftMax._function>`
(see `output <SoftMax.output>` for details).
per_item : boolean : default True
for 2d variables, determines whether the SoftMax function will be applied to the entire variable (per_item =
False), or applied to each item in the variable separately (per_item = True).
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : 1d array
contains value to be transformed.
gain : float
value by which `variable <Logistic.variable>` is multiplied before the SoftMax transformation; determines
the "sharpness" of the distribution.
output : ALL, MAX_VAL, MAX_INDICATOR, or PROB
determines how the SoftMax-transformed values of the elements in `variable <SoftMax.variable>` are reported
in the array returned by `function <SoftMax._function>`:
* **ALL**: array of all SoftMax-transformed values (the default);
* **MAX_VAL**: SoftMax-transformed value for the element with the maximum such value, 0 for all others;
* **MAX_INDICATOR**: 1 for the element with the maximum SoftMax-transformed value, 0 for all others;
* **PROB**: probabilistically chosen element based on SoftMax-transformed values after setting the
sum of values to 1 (i.e., their `Luce Ratio <https://en.wikipedia.org/wiki/Luce%27s_choice_axiom>`_),
0 for all others.
per_item : boolean : default True
for 2d variables, determines whether the SoftMax function will be applied to the entire variable (per_item =
False), or applied to each item in the variable separately (per_item = True).
bounds : None if `output <SoftMax.output>` == MAX_VAL, else (0,1) : default (0,1)
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentName = SOFTMAX_FUNCTION
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
variable
see `variable <SoftMax.variable>`
:default value: numpy.array(0.)
:type: ``numpy.ndarray``
:read only: True
bounds
see `bounds <SoftMax.bounds>`
:default value: (0, 1)
:type: <class 'tuple'>
gain
see `gain <SoftMax.gain>`
:default value: 1.0
:type: ``float``
output
see `output <SoftMax.output>`
:default value: `ALL`
:type: ``str``
per_item
see `per_item <SoftMax.per_item>`
:default value: True
:type: ``bool``
"""
variable = Parameter(np.array([[0.0]]), read_only=True, pnl_internal=True, constructor_argument='default_variable')
gain = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
bounds = (0, 1)
output = ALL
per_item = Parameter(True, pnl_internal=True)
one_hot_function = Parameter(OneHot, stateful=False, loggable=False)
def _validate_output(self, output):
options = {ALL, MAX_VAL, MAX_INDICATOR, PROB}
if output in options:
return None
else:
return 'not one of {0}'.format(options)
@tc.typecheck
def __init__(self,
default_variable=None,
gain: tc.optional(parameter_spec) = None,
output=None,
per_item=None,
params: tc.optional(tc.optional(dict)) = None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
try:
# needed because one_hot_function is initialized here based
# on output argument, which may also be passed in params
output = params['output']
except (TypeError, KeyError):
pass
if output not in {None, ALL}:
one_hot_function = OneHot(mode=output)
else:
one_hot_function = None
super().__init__(
default_variable=default_variable,
gain=gain,
per_item=per_item,
output=output,
one_hot_function=one_hot_function,
params=params,
owner=owner,
prefs=prefs,
)
def _parse_one_hot_function_variable(self, variable):
if self.defaults.per_item and len(np.shape(variable)) > 1:
variable = variable[0]
if self.defaults.output in {PROB, PROB_INDICATOR}:
prob_dist = np.asarray(variable)
# creates probability distribution in shape of variable
prob_dist = np.ones(variable.shape) / safe_len(prob_dist)
variable = np.asarray([variable, prob_dist])
return variable
def _validate_variable(self, variable, context=None):
if variable is None:
try:
return self.defaults.variable
except AttributeError:
return self.class_defaults.variable
return np.asarray(variable)
def __gen_llvm_exp_sum_max(self, builder, index, ctx, vi, gain, max_ptr, exp_sum_ptr, max_ind_ptr):
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
exp_f = ctx.get_builtin("exp", [ctx.float_ty])
orig_val = builder.load(ptri)
val = builder.fmul(orig_val, gain)
exp_val = builder.call(exp_f, [val])
exp_sum = builder.load(exp_sum_ptr)
new_exp_sum = builder.fadd(exp_sum, exp_val)
builder.store(new_exp_sum, exp_sum_ptr)
old_max = builder.load(max_ptr)
gt = builder.fcmp_ordered(">", exp_val, old_max)
new_max = builder.select(gt, exp_val, old_max)
builder.store(new_max, max_ptr)
old_index = builder.load(max_ind_ptr)
new_index = builder.select(gt, index, old_index)
builder.store(new_index, max_ind_ptr)
def __gen_llvm_exp_div(self, builder, index, ctx, vi, vo, gain, exp_sum):
assert self.output == ALL
ptro = builder.gep(vo, [ctx.int32_ty(0), index])
ptri = builder.gep(vi, [ctx.int32_ty(0), index])
exp_f = ctx.get_builtin("exp", [ctx.float_ty])
orig_val = builder.load(ptri)
val = builder.fmul(orig_val, gain)
val = builder.call(exp_f, [val])
val = builder.fdiv(val, exp_sum)
builder.store(val, ptro)
def __gen_llvm_apply(self, ctx, builder, params, _, arg_in, arg_out):
exp_sum_ptr = builder.alloca(ctx.float_ty)
builder.store(exp_sum_ptr.type.pointee(0), exp_sum_ptr)
max_ptr = builder.alloca(ctx.float_ty)
builder.store(max_ptr.type.pointee(float('-inf')), max_ptr)
max_ind_ptr = builder.alloca(ctx.int32_ty)
builder.store(max_ind_ptr.type.pointee(-1), max_ind_ptr)
gain_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, GAIN)
gain = pnlvm.helpers.load_extract_scalar_array_one(builder, gain_ptr)
with pnlvm.helpers.array_ptr_loop(builder, arg_in, "exp_sum_max") as args:
self.__gen_llvm_exp_sum_max(*args, ctx=ctx, vi=arg_in,
max_ptr=max_ptr, gain=gain,
max_ind_ptr=max_ind_ptr,
exp_sum_ptr=exp_sum_ptr)
output_type = self.output
exp_sum = builder.load(exp_sum_ptr)
index = builder.load(max_ind_ptr)
ptro = builder.gep(arg_out, [ctx.int32_ty(0), index])
if output_type == ALL:
with pnlvm.helpers.array_ptr_loop(builder, arg_in, "exp_div") as args:
self.__gen_llvm_exp_div(ctx=ctx, vi=arg_in, vo=arg_out,
gain=gain, exp_sum=exp_sum, *args)
elif output_type == MAX_VAL:
# zero out the output array
with pnlvm.helpers.array_ptr_loop(builder, arg_in, "zero_output") as (b,i):
b.store(ctx.float_ty(0), b.gep(arg_out, [ctx.int32_ty(0), i]))
ptri = builder.gep(arg_in, [ctx.int32_ty(0), index])
exp_f = ctx.get_builtin("exp", [ctx.float_ty])
orig_val = builder.load(ptri)
val = builder.fmul(orig_val, gain)
val = builder.call(exp_f, [val])
val = builder.fdiv(val, exp_sum)
builder.store(val, ptro)
elif output_type == MAX_INDICATOR:
# zero out the output array
with pnlvm.helpers.array_ptr_loop(builder, arg_in, "zero_output") as (b,i):
b.store(ctx.float_ty(0), b.gep(arg_out, [ctx.int32_ty(0), i]))
builder.store(ctx.float_ty(1), ptro)
return builder
def _gen_llvm_function_body(self, ctx, builder, params, _, arg_in, arg_out, *, tags:frozenset):
if self.parameters.per_item.get():
assert isinstance(arg_in.type.pointee.element, pnlvm.ir.ArrayType)
assert isinstance(arg_out.type.pointee.element, pnlvm.ir.ArrayType)
for i in range(arg_in.type.pointee.count):
inner_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(i)])
inner_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(i)])
builder = self.__gen_llvm_apply(ctx, builder, params, _, inner_in, inner_out)
return builder
else:
return self.__gen_llvm_apply(ctx, builder, params, _, arg_in, arg_out)
def apply_softmax(self, input_value, gain, output_type):
# Modulate input_value by gain
v = gain * input_value
# Shift by max to avoid extreme values:
v = v - np.max(v)
# Exponentiate
v = np.exp(v)
# Normalize (to sum to 1)
sm = v / np.sum(v, axis=0)
# Generate one-hot encoding based on selected output_type
if output_type in {MAX_VAL, MAX_INDICATOR}:
return self.one_hot_function(sm)
elif output_type in {PROB, PROB_INDICATOR}:
return self.one_hot_function([input_value, sm])
else:
return sm
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : 1d array : default class_defaults.variable
an array to be transformed.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
SoftMax transformation of variable : number or array
"""
# Assign the params and return the result
output_type = self._get_current_parameter_value(OUTPUT_TYPE, context)
gain = self._get_current_parameter_value(GAIN, context)
per_item = self._get_current_parameter_value(PER_ITEM, context)
# Compute softmax and assign to sm
if per_item and len(np.shape(variable)) > 1:
output = []
for item in variable:
output.append(self.apply_softmax(item, gain, output_type))
else:
output = self.apply_softmax(variable, gain, output_type)
return self.convert_output_type(output)
@handle_external_context()
def derivative(self, output, input=None, context=None):
"""
derivative(output)
Returns
-------
derivative of values returned by SoftMax : 1d or 2d array (depending on *OUTPUT_TYPE* of SoftMax)
"""
output_type = self.output_type
size = len(output)
sm = self.function(output, params={OUTPUT_TYPE: ALL}, context=context)
if output_type == ALL:
# Return full Jacobian matrix of derivatives
derivative = np.empty([size, size])
for j in range(size):
for i, val in zip(range(size), output):
if i == j:
d = 1
else:
d = 0
derivative[j, i] = sm[i] * (d - sm[j])
elif output_type in {MAX_VAL, MAX_INDICATOR}:
# Return 1d array of derivatives for max element (i.e., the one chosen by SoftMax)
derivative = np.empty(size)
# Get the element of output returned as non-zero when output_type is not ALL
index_of_max = int(np.where(output == np.max(output))[0])
max_val = sm[index_of_max]
for i in range(size):
if i == index_of_max:
d = 1
else:
d = 0
derivative[i] = sm[i] * (d - max_val)
else:
raise FunctionError("Can't assign derivative for SoftMax function{} since OUTPUT_TYPE is PROB "
"(and therefore the relevant element is ambiguous)".format(self.owner_name))
return derivative
# **********************************************************************************************************************
# LinearMatrix
# **********************************************************************************************************************
class LinearMatrix(TransferFunction): # -------------------------------------------------------------------------------
"""
LinearMatrix( \
default_variable, \
matrix=None, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _LinearMatrix:
Matrix transform of `variable <LinearMatrix.variable>`.
`function <LinearMatrix._function>` returns dot product of variable with matrix:
.. math::
variable \\bullet matrix
COMMENT: [CONVERT TO FIGURE]
----------------------------------------------------------------------------------------------------------
MATRIX FORMAT <shape: (3,5)>
INDICES:
Output elements:
0 1 2 3 4
0 [0,0] [0,1] [0,2] [0,3] [0,4]
Input elements: 1 [1,0] [1,1] [1,2] [1,3] [1,4]
2 [2,0] [2,1] [2,2] [2,3] [2,4]
matrix.shape => (input/rows, output/cols)
----------------------------------------------------------------------------------------------------------
ARRAY FORMAT
INDICES
[ [ Input 0 (row0) ], [ Input 1 (row1) ]... ]
[ [ out0, out1, out2, out3 ], [ out0, out1, out2, out3 ]... ]
matrix[input/rows, output/cols]: [ [ row0, row0, row0, row0 ], [ row1, row1, row1, row1 ]... ]
[ [ col0, col1, col2, col3 ], [ col0, col1, col2, col3 ]... ]
[ [[0,0], [0,1], [0,2], [0,3] ], [[1,0], [1,1], [1,2], [1,3] ]... ]
----------------------------------------------------------------------------------------------------------
COMMENT
Arguments
---------
variable : list or 1d array : default class_defaults.variable
specifies a template for the value to be transformed; length must equal the number of rows of `matrix
<LinearMatrix.matrix>`.
matrix : number, list, 1d or 2d np.ndarray, np.matrix, function, or matrix keyword : default IDENTITY_MATRIX
specifies matrix used to transform `variable <LinearMatrix.variable>`
(see `matrix <LinearMatrix.matrix>` for specification details).
When LinearMatrix is the `function <Projection_Base._function>` of a projection:
- the matrix specification must be compatible with the variables of the `sender <Projection_Base.sender>`
and `receiver <Projection_Base.receiver>`
- a matrix keyword specification generates a matrix based on the sender and receiver shapes
When LinearMatrix is instantiated on its own, or as the function of a `Mechanism <Mechanism>` or `Port`:
- the matrix specification must be compatible with the function's own `variable <LinearMatrix.variable>`
- if matrix is not specified, a square identity matrix is generated based on the number of columns in
`variable <LinearMatrix.variable>`
- matrix keywords are not valid matrix specifications
bounds : None
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : 1d array
contains value to be transformed.
matrix : 2d array
matrix used to transform `variable <LinearMatrix.variable>`.
Can be specified as any of the following:
* number - used as the filler value for all elements of the :keyword:`matrix` (call to np.fill);
* list of arrays, 2d array or np.matrix - assigned as the value of :keyword:`matrix`;
* matrix keyword - see `MatrixKeywords` for list of options.
Rows correspond to elements of the input array (outer index), and
columns correspond to elements of the output array (inner index).
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `PreferenceSet`
for details).
"""
componentName = LINEAR_MATRIX_FUNCTION
DEFAULT_FILLER_VALUE = 0
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
matrix
see `matrix <LinearMatrix.matrix>`
:default value: None
:type:
"""
matrix = Parameter(None, modulable=True)
bounds = None
# def is_matrix_spec(m):
# if m is None:
# return True
# if m in MATRIX_KEYWORD_VALUES:
# return True
# if isinstance(m, (list, np.ndarray, np.matrix, types.FunctionType)):
# return True
# return False
@tc.typecheck
def __init__(self,
default_variable=None,
matrix=None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
# Note: this calls _validate_variable and _validate_params which are overridden below;
# the latter implements the matrix if required
# super(LinearMatrix, self).__init__(default_variable=default_variable,
super().__init__(
default_variable=default_variable,
matrix=matrix,
params=params,
owner=owner,
prefs=prefs,
)
self.parameters.matrix.set(
self.instantiate_matrix(self.parameters.matrix.get()),
skip_log=True,
)
# def _validate_variable(self, variable, context=None):
# """Insure that variable passed to LinearMatrix is a max 2D array
#
# :param variable: (max 2D array)
# :param context:
# :return:
# """
# variable = super()._validate_variable(variable, context)
#
# # Check that variable <= 2D
# try:
# if not variable.ndim <= 2:
# raise FunctionError("variable ({0}) for {1} must be a numpy.ndarray of dimension at most 2".format(variable, self.__class__.__name__))
# except AttributeError:
# raise FunctionError("PROGRAM ERROR: variable ({0}) for {1} should be a numpy.ndarray".
# format(variable, self.__class__.__name__))
#
# return variable
def _validate_params(self, request_set, target_set=None, context=None):
"""Validate params and assign to targets
This overrides the class method, to perform more detailed type checking (see explanation in class method).
Note: this method (or the class version) is called only if the parameter_validation attribute is `True`
:param request_set: (dict) - params to be validated
:param target_set: (dict) - destination of validated params
:param context: (str)
:return none:
"""
super()._validate_params(request_set, target_set, context)
param_set = target_set
# proxy for checking whether the owner is a projection
if hasattr(self.owner, "receiver"):
sender = self.defaults.variable
sender_len = np.size(np.atleast_2d(self.defaults.variable), 1)
# FIX: RELABEL sender -> input AND receiver -> output
# FIX: THIS NEEDS TO BE CLEANED UP:
# - AT LEAST CHANGE THE NAME FROM kwReceiver TO output_template OR SOMETHING LIKE THAT
# - MAKE ARG? OR ADD OTHER PARAMS: E.G., FILLER?
# - OR REFACTOR TO INCLUDE AS MATRIX SPEC:
# IF MATRIX IS 1D, USE AS OUTPUT TEMPLATE
# IF ALL ITS VALUES ARE 1'S => FULL CONNECTIVITY MATRIX
# IF ALL ITS VALUES ARE 0'S => RANDOM CONNECTIVITY MATRIX
# NOTE: NO NEED FOR IDENTITY MATRIX, AS THAT WOULD BE SQUARE SO NO NEED FOR OUTPUT TEMPLATE
# - DOCUMENT WHEN DONE
# MODIFIED 3/26/17 OLD:
# Check for and validate kwReceiver first, since it may be needed to validate and/or construct the matrix
# First try to get receiver from specification in params
if RECEIVER in param_set:
self.receiver = param_set[RECEIVER]
# Check that specification is a list of numbers or an array
if ((isinstance(self.receiver, list) and all(
isinstance(elem, numbers.Number) for elem in self.receiver)) or
isinstance(self.receiver, np.ndarray)):
self.receiver = np.atleast_1d(self.receiver)
else:
raise FunctionError("receiver param ({0}) for {1} must be a list of numbers or an np.array".
format(self.receiver, self.name))
# No receiver, so use sender as template (assuming square -- e.g., identity -- matrix)
else:
if (self.owner and self.owner.prefs.verbosePref) or self.prefs.verbosePref:
print("Identity matrix requested but kwReceiver not specified; sender length ({0}) will be used".
format(sender_len))
self.receiver = param_set[RECEIVER] = sender
receiver_len = len(self.receiver)
# Check rest of params
message = ""
for param_name, param_value in param_set.items():
# Receiver param already checked above
if param_name == RECEIVER:
continue
# Not currently used here
if param_name in function_keywords:
continue
if param_name == HAS_INITIALIZERS:
continue
# Matrix specification param
elif param_name == MATRIX:
# A number (to be used as a filler), so OK
if isinstance(param_value, numbers.Number):
continue
# np.matrix or np.ndarray provided, so validate that it is numeric and check dimensions
elif isinstance(param_value, (list, np.ndarray, np.matrix)):
# get dimensions specified by:
# variable (sender): width/cols/outer index
# kwReceiver param: height/rows/inner index
weight_matrix = np.atleast_2d(param_value)
if 'U' in repr(weight_matrix.dtype):
raise FunctionError("Non-numeric entry in MATRIX "
"specification ({}) for the {} "
"function of {}".format(param_value,
self.name,
self.owner_name))
if weight_matrix.ndim != 2:
raise FunctionError("The matrix provided for the {} function of {} must be 2d (it is {}d".
format(weight_matrix.ndim, self.name, self.owner_name))
matrix_rows = weight_matrix.shape[0]
matrix_cols = weight_matrix.shape[1]
# Check that number of rows equals length of sender vector (variable)
if matrix_rows != sender_len:
raise FunctionError("The number of rows ({}) of the "
"matrix provided for {} function "
"of {} does not equal the length "
"({}) of the sender vector "
"(variable)".format(matrix_rows,
self.name,
self.owner_name,
sender_len))
# Auto, full or random connectivity matrix requested (using keyword):
# Note: assume that these will be properly processed by caller
# (e.g., MappingProjection._instantiate_receiver)
elif param_value in MATRIX_KEYWORD_VALUES:
continue
# Identity matrix requested (using keyword), so check send_len == receiver_len
elif param_value in {IDENTITY_MATRIX, HOLLOW_MATRIX}:
# Receiver length doesn't equal sender length
if not (self.receiver.shape == sender.shape and self.receiver.size == sender.size):
# if self.owner.prefs.verbosePref:
# print ("Identity matrix requested, but length of receiver ({0})"
# " does not match length of sender ({1}); sender length will be used".
# format(receiver_len, sender_len))
# # Set receiver to sender
# param_set[kwReceiver] = sender
raise FunctionError("{} requested for the {} function of {}, "
"but length of receiver ({}) does not match length of sender ({})".
format(param_value, self.name, self.owner_name, receiver_len,
sender_len))
continue
# list used to describe matrix, so convert to 2D array and pass to validation of matrix below
elif isinstance(param_value, list):
try:
param_value = np.atleast_2d(param_value)
except (ValueError, TypeError) as error_msg:
raise FunctionError(
"Error in list specification ({}) of matrix for the {} function of {}: {})".
# format(param_value, self.__class__.__name__, error_msg))
format(param_value, self.name, self.owner_name, error_msg))
# string used to describe matrix, so convert to np.matrix and pass to validation of matrix below
elif isinstance(param_value, str):
try:
param_value = np.atleast_2d(param_value)
except (ValueError, TypeError) as error_msg:
raise FunctionError("Error in string specification ({}) of the matrix "
"for the {} function of {}: {})".
# format(param_value, self.__class__.__name__, error_msg))
format(param_value, self.name, self.owner_name, error_msg))
# function so:
# - assume it uses random.rand()
# - call with two args as place markers for cols and rows
# - validate that it returns an array or np.matrix
elif isinstance(param_value, types.FunctionType):
test = param_value(1, 1)
if not isinstance(test, (np.ndarray, np.matrix)):
raise FunctionError("A function is specified for the matrix of the {} function of {}: {}) "
"that returns a value ({}) that is neither a matrix nor an array".
# format(param_value, self.__class__.__name__, test))
format(self.name, self.owner_name, param_value, test))
elif param_value is None:
raise FunctionError("TEMP ERROR: param value is None.")
else:
raise FunctionError("Value of {} param ({}) for the {} function of {} "
"must be a matrix, a number (for filler), or a matrix keyword ({})".
format(param_name,
param_value,
self.name,
self.owner_name,
MATRIX_KEYWORD_NAMES))
else:
continue
if message:
raise FunctionError(message)
# owner is a mechanism, state
# OR function was defined on its own (no owner)
else:
if MATRIX in param_set:
param_value = param_set[MATRIX]
# numeric value specified; verify that it is compatible with variable
if isinstance(param_value, (float, list, np.ndarray, np.matrix)):
param_size = np.size(np.atleast_2d(param_value), 0)
param_shape = np.shape(np.atleast_2d(param_value))
variable_size = np.size(np.atleast_2d(self.defaults.variable),1)
variable_shape = np.shape(np.atleast_2d(self.defaults.variable))
if param_size != variable_size:
raise FunctionError("Specification of matrix and/or default_variable for {} is not valid. The "
"shapes of variable {} and matrix {} are not compatible for multiplication".
format(self.name, variable_shape, param_shape))
# keyword matrix specified - not valid outside of a projection
elif param_value in MATRIX_KEYWORD_VALUES:
raise FunctionError("{} is not a valid specification for the matrix parameter of {}. Keywords "
"may only be used to specify the matrix parameter of a Projection's "
"LinearMatrix function. When the LinearMatrix function is implemented in a "
"mechanism, such as {}, the correct matrix cannot be determined from a "
"keyword. Instead, the matrix must be fully specified as a float, list, "
"np.ndarray, or np.matrix".
format(param_value, self.name, self.owner.name))
# The only remaining valid option is matrix = None (sorted out in instantiate_attribs_before_fn)
elif param_value is not None:
raise FunctionError("Value of the matrix param ({}) for the {} function of {} "
"must be a matrix, a number (for filler), or a matrix keyword ({})".
format(param_value,
self.name,
self.owner_name,
MATRIX_KEYWORD_NAMES))
def _instantiate_attributes_before_function(self, function=None, context=None):
# replicates setting of receiver in _validate_params
if isinstance(self.owner, Projection):
self.receiver = self.defaults.variable
matrix = self.parameters.matrix._get(context)
if matrix is None and not hasattr(self.owner, "receiver"):
variable_length = np.size(np.atleast_2d(self.defaults.variable), 1)
matrix = np.identity(variable_length)
self.parameters.matrix._set(self.instantiate_matrix(matrix), context)
def instantiate_matrix(self, specification, context=None):
"""Implements matrix indicated by specification
Specification is derived from MATRIX param (passed to self.__init__ or self._function)
Specification (validated in _validate_params):
+ single number (used to fill self.matrix)
+ matrix keyword (see get_matrix)
+ 2D list or np.ndarray of numbers
:return matrix: (2D list)
"""
from psyneulink.core.components.projections.projection import Projection
if isinstance(self.owner, Projection):
# Matrix provided (and validated in _validate_params); convert to array
if isinstance(specification, np.matrix):
return np.array(specification)
sender = self.defaults.variable
sender_len = sender.shape[0]
try:
receiver = self.receiver
except:
raise FunctionError("Can't instantiate matrix specification ({}) for the {} function of {} "
"since its receiver has not been specified".
format(specification, self.name, self.owner_name))
# receiver = sender
receiver_len = receiver.shape[0]
matrix = get_matrix(specification, rows=sender_len, cols=receiver_len, context=context)
# This should never happen (should have been picked up in validate_param or above)
if matrix is None:
raise FunctionError("MATRIX param ({}) for the {} function of {} must be a matrix, a function "
"that returns one, a matrix specification keyword ({}), or a number (filler)".
format(specification, self.name, self.owner_name, MATRIX_KEYWORD_NAMES))
else:
return matrix
else:
return np.array(specification)
def _gen_llvm_function_body(self, ctx, builder, params, _, arg_in, arg_out, *, tags:frozenset):
# Restrict to 1d arrays
if self.defaults.variable.ndim != 1:
warnings.warn("Shape mismatch: {} (in {}) got 2D input: {}".format(self, self.owner, self.defaults.variable))
arg_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)])
if self.defaults.value.ndim != 1:
warnings.warn("Shape mismatch: {} (in {}) has 2D output: {}".format(self, self.owner, self.defaults.value))
arg_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)])
matrix = pnlvm.helpers.get_param_ptr(builder, self, params, MATRIX)
# Convert array pointer to pointer to the fist element
matrix = builder.gep(matrix, [ctx.int32_ty(0), ctx.int32_ty(0)])
vec_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)])
vec_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)])
input_length = ctx.int32_ty(arg_in.type.pointee.count)
output_length = ctx.int32_ty(arg_out.type.pointee.count)
builtin = ctx.import_llvm_function("__pnl_builtin_vxm")
builder.call(builtin, [vec_in, matrix, input_length, output_length, vec_out])
return builder
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Arguments
---------
variable : list or 1d array
array to be transformed; length must equal the number of rows of `matrix <LinearMatrix.matrix>`.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
---------
dot product of variable and matrix : 1d array
length of the array returned equals the number of columns of `matrix <LinearMatrix.matrix>`.
"""
matrix = self._get_current_parameter_value(MATRIX, context)
result = np.dot(variable, matrix)
return self.convert_output_type(result)
@staticmethod
def keyword(obj, keyword):
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
rows = None
cols = None
# use of variable attribute here should be ok because it's using it as a format/type
if isinstance(obj, MappingProjection):
if isinstance(obj.sender.defaults.value, numbers.Number):
rows = 1
else:
rows = len(obj.sender.defaults.value)
if isinstance(obj.receiver.defaults.variable, numbers.Number):
cols = 1
else:
cols = obj.receiver.socket_width
matrix = get_matrix(keyword, rows, cols)
if matrix is None:
raise FunctionError("Unrecognized keyword ({}) specified for the {} function of {}".
format(keyword, obj.name, obj.owner_name))
else:
return matrix
def param_function(owner, function):
sender_len = len(owner.sender.defaults.value)
receiver_len = len(owner.receiver.defaults.variable)
return function(sender_len, receiver_len)
def _is_identity(self, context=None, defaults=False):
if defaults:
matrix = self.defaults.matrix
else:
matrix = self.parameters.matrix._get(context)
# if matrix is not an np array with at least one dimension,
# this isn't an identity matrix
try:
size = matrix.shape[0]
except (AttributeError, IndexError):
return False
# check if the matrix is the same as the identity matrix
# note that we can use the first dimension size to create the identity matrix
# because if the matrix is not square, this comparison will fail anyway
identity_matrix = np.identity(size)
# numpy has deprecated == comparisons of arrays
try:
return np.array_equal(matrix, identity_matrix)
except TypeError:
return matrix == identity_matrix
# def is_matrix_spec(m):
# if m is None:
# return True
# if isinstance(m, (list, np.ndarray, np.matrix, types.FunctionType)):
# return True
# if m in MATRIX_KEYWORD_VALUES:
# return True
# return False
# **********************************************************************************************************************
# TransferWithCosts
# **********************************************************************************************************************
# Keywords for TransferWithCosts arguments, cost functions and their parameters ----------------------------------------
# Make accessible externally
__all__.extend(['ENABLED_COST_FUNCTIONS',
'INTENSITY_COST',
'INTENSITY_COST_FUNCTION',
'INTENSITY_COST_FCT_MULTIPLICATIVE_PARAM',
'INTENSITY_COST_FCT_ADDITIVE_PARAM',
'ADJUSTMENT_COST',
'ADJUSTMENT_COST_FUNCTION',
'ADJUSTMENT_COST_FCT_MULTIPLICATIVE_PARAM',
'ADJUSTMENT_COST_FCT_ADDITIVE_PARAM',
'DURATION_COST',
'DURATION_COST_FUNCTION',
'DURATION_COST_FCT_MULTIPLICATIVE_PARAM',
'DURATION_COST_FCT_ADDITIVE_PARAM',
'COMBINED_COSTS',
'COMBINE_COSTS_FUNCTION',
'COMBINE_COSTS_FCT_MULTIPLICATIVE_PARAM',
'COMBINE_COSTS_FCT_ADDITIVE_PARAM',
'costFunctionNames', 'CostFunctions'
])
ENABLED_COST_FUNCTIONS = 'enabled_cost_functions'
# These are assigned to TransferWithCosts Function to make them accesible for modulation
INTENSITY_COST = 'intensity_cost'
INTENSITY_COST_FUNCTION = 'intensity_cost_fct'
INTENSITY_COST_FCT_MULTIPLICATIVE_PARAM = 'intensity_cost_fct_mult_param'
INTENSITY_COST_FCT_ADDITIVE_PARAM = 'intensity_cost_fct_add_param'
ADJUSTMENT_COST = 'adjustment_cost'
ADJUSTMENT_COST_FUNCTION = 'adjustment_cost_fct'
ADJUSTMENT_COST_FCT_MULTIPLICATIVE_PARAM = 'adjustment_cost_fct_mult_param'
ADJUSTMENT_COST_FCT_ADDITIVE_PARAM = 'adjustment_cost_fct_add_param'
DURATION_COST = 'duration_cost'
DURATION_COST_FUNCTION = 'duration_cost_fct'
DURATION_COST_FCT_MULTIPLICATIVE_PARAM = 'duration_cost_fct_mult_param'
DURATION_COST_FCT_ADDITIVE_PARAM = 'duration_cost_fct_add_param'
COMBINED_COSTS = 'combined_costs'
COMBINE_COSTS_FUNCTION = 'combine_costs_fct'
COMBINE_COSTS_FCT_MULTIPLICATIVE_PARAM = 'combine_costs_fct_mult_param'
COMBINE_COSTS_FCT_ADDITIVE_PARAM = 'combine_costs_fct_add_param'
costFunctionNames = [INTENSITY_COST_FUNCTION,
ADJUSTMENT_COST_FUNCTION,
DURATION_COST_FUNCTION,
COMBINE_COSTS_FUNCTION]
class CostFunctions(IntFlag):
"""Options for selecting constituent cost functions to be used by a `TransferWithCosts` Function.
These can be used alone or in combination with one another, by enabling or disabling each using the
`TransferWithCosts` Function's `enable_costs <TransferWithCosts.enable_costs>`,
`disable_costs <TransferWithCosts.disable_costs>`, `toggle_cost <TransferWithCosts.toggle_cost>` and
`assign_costs <TransferWithCosts.assign_costs>` methods.
Attributes
----------
NONE
`cost <TransferWithCosts.cost>` is not computed.
INTENSITY
`duration_cost_fct` is used to calculate a contribution to the `cost <TransferWithCosts.cost>`
based its current `intensity <TransferWithCosts.intensity>` value.
ADJUSTMENT
`adjustment_cost_fct` is used to calculate a contribution to the `cost <TransferWithCosts.cost>`
based on the change in its `intensity <TransferWithCosts.intensity>` from its last value.
DURATION
`duration_cost_fct` is used to calculate a contribitution to the `cost <TransferWithCosts.cost>`
based on its integral (i.e., it accumulated value over multiple executions).
ALL
all of the cost functions are used to calculate `cost <TransferWithCosts.cost>`.
DEFAULTS
assign default set of cost functions as `INTENSITY`).
"""
NONE = 0
INTENSITY = 1 << 1
ADJUSTMENT = 1 << 2
DURATION = 1 << 3
ALL = INTENSITY | ADJUSTMENT | DURATION
DEFAULTS = NONE
TRANSFER_FCT = 'transfer_fct'
INTENSITY_COST_FCT = 'intensity_cost_fct'
ADJUSTMENT_COST_FCT = 'adjustment_cost_fct'
DURATION_COST_FCT = 'duration_cost_fct'
COMBINE_COSTS_FCT = 'combine_costs_fct'
class TransferWithCosts(TransferFunction):
"""
TransferWithCosts( \
default_variable=None, \
size=None, \
transfer_fct=Line \
enabled_cost_functions=None, \
intensity_fct=Exponential \
adjustment_fct=Linear \
duration_fct=SimpleIntegrator \
combine_costs_fct=LinearCombination \
params=None, \
owner=None, \
prefs=None \
)
.. _TransferWithCosts:
returns value of `variable <TransferWithCosts.variable>` transformed by `transfer_fct
<TransferWithCosts.transfer_fct>`, after calling any cost functions that are enabled and assigning
the result(s) to the corresponding parameter(s), as described below.
.. _TransferWithCosts_Cost_Functions:
**Cost Functions**
The TransferWithCosts function has three individual cost functions that it can execute when its `function
<TransferWithCosts._function>` is executed, which assign their results to the attributes indicated below:
* `intensity_cost_fct <TransferWithCosts.intensity_cost_fct>` -> `intensity_cost <TransferWithCosts.intensity_cost>`;
* `adjustment_cost_fct <TransferWithCosts.adjustment_cost_fct>` -> `adjustment_cost <TransferWithCosts.adjustment_cost>`;
* `duration_cost_fct <TransferWithCosts.duration_cost_fct>` -> `duration_cost <TransferWithCosts.duration_cost>`;
Which functions are called is determined by the settings in `enabled_cost_functions
<TransferWithCosts.enabled_cost_functions>`, that can be initialized in the constructor using the
**enabled_cost_functions** argument, and later modified using the `enable_costs <TransferWithCosts.enable_costs>`,
`disable_costs <TransferWithCosts.disable_costs>`, `toggle_cost <TransferWithCosts.toggle_cost>` and
`assign_costs <TransferWithCosts.assign_costs>` methods. The value of any cost for which its function has
*never* been enabled is None; otherwise, it is the value assigned when it was last enabled and executed
(see `duration_cost_fct <TransferWithCosts.duration_cost_fct>` for additional details concerning that function).
If any cost functions are enabled, then the `combine_costs_fct <TransferWithCosts.combine_costs_fct>` function
is executed, which sums the results of those that are enabled (Hadamard style, if the costs are arrays), and
stores the result in the `combined_costs <TransferWithCosts.combined_costs>` attribute. Its value is None if no
cost functions have ever been enabled; otherwise it is the value assigned the last time one or more cost functions
were enabled.
.. _TransferWithCosts_Modulation_of_Cost_Params:
**Modulation of Cost Function Parameters**
The `multiplicative_param <Function_Modulatory_Params>` and `additive_param <Function_Modulatory_Params>` of each
`cost function <TransferWithCosts_Cost_Functions>` is assigned as a parameter of the TransferWithCost `Function`.
This makes them accessible for `modulation <ModulatorySignal_Modulation>` when the Function is assigned to a
`Port` (e.g., as the default `function <ControlSignal._function>` of a `ControlSignal`), or a `Mechanism
<Mechanism>`. They can be referred to in the **modulation** argument of a `ModulatorySignal`\\'s constructor
(see `ModulatorySignal_Types`) using the following keywords:
*INTENSITY_COST_FCT_MULTIPLICATIVE_PARAM*
*INTENSITY_COST_FCT_ADDITIVE_PARAM*
*ADJUSTMENT_COST_FCT_MULTIPLICATIVE_PARAM*
*ADJUSTMENT_COST_FCT_ADDITIVE_PARAM*
*DURATION_COST_FCT_MULTIPLICATIVE_PARAM*
*DURATION_COST_FCT_ADDITIVE_PARAM*
*COMBINE_COSTS_FCT_MULTIPLICATIVE_PARAM*
*COMBINE_COSTS_FCT_ADDITIVE_PARAM*
|
See `example <ControlSignal_Example_Modulate_Costs>` of how these keywords can be used to
modulate the parameters of the cost functions of a TransferMechanism assigned to a ControlSignal.
Arguments
---------
variable : list or 1d array of numbers: Default class_defaults.variable
specifies shape and default value of the array for variable used by `transfer_fct
<TransferWithCosts.transfer_fct>`
on which costs are calculated.
size : int : None
specifies length of the array for `variable <TransferWithCosts.variable>` used by `function
<TransferWithCosts._function>` and on which costs are calculated; can be used in place of
default_value, in which case zeros are assigned as the value(s). An error is generated if both are
specified but size != len(default_value).
transfer_fct : TransferFunction : Linear
specifies the primary function, used to generate the value it returns.
enabled_cost_functions : CostFunctions or List[CostFunctions] : None
specifies the costs to execute when `function <TransferWithCosts._function>` is called, and
include in the computation of `combined_costs <TransferWithCosts.combined_costs>`.
intensity_cost_fct : Optional[`TransferFunction`] : default `Exponential`
specifies the function used to compute the `intensity_cost <TransferWithCosts.intensity_cost>`.
adjustment_cost_fct : Optional[`TransferFunction`] : default `Linear`
specifies the function used to compute the `adjustment_cost <TransferWithCosts.adjustment_cost>`.
duration_cost_fct : `IntegratorFunction` : default `IntegratorFunction`
specifies the function used to compute the `duration_cost <TransferWithCosts.duration_cost>`.
combine_costs_fct : function : default `LinearCombination`
specifies the function used to compute `combined_costs <TransferWithCosts.combined_costs>`.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : 1d array
value used by `function <TransferWithCosts._function>`, and on which `intensity <TransferWithCosts.intensity>`
and associated costs are calculated.
size : int
length of array for `variable <TransferWithCosts.variable>`.
intensity : 1 array
the result of the transfer_fct <TransferWithCosts.transfer_fct>`, and the value returned by
`function <TransferWithCosts._function>`.
function : TransferFunction
primary function, specified by **transfer_fct** argument of constructor, and also stored in
`transfer_fct <TransferWithCosts.transfer_fct>`.
transfer_fct : TransferMechanism
the TransferWithCosts Function's primary function, used to generate the value it returns;
same as `function <TransferWithCosts._function>`.
enabled_cost_functions : CostFunctions or None
boolean combination of currently enabled CostFunctions; determines which `cost functions
<TransferWithCosts_Cost_Functions>` are calculated when `function <TransferWithCosts._function>`
is called, and are included in the computation of `combined_costs <TransferWithCosts.combined_costs>`
(see `Cost Functions <TransferWithCosts_Cost_Functions>` for additional details).
intensity_cost : float or None
cost computed by `intensity_cost_fct <TransferWithCosts.intensity_cost_fct>` for current `intensity
<TransferWithCosts.intensity>`. Value is None if `intensity_cost_fct <TransferWithCosts.intensity_cost_fct>`
has not been enabled (see `Cost Functions <TransferWithCosts_Cost_Functions>` for additional details).
intensity_cost_fct : TransferFunction
calculates `intensity_cost` from the current value of `intensity <TransferWithCosts.intensity>`.
It can be any `TransferFunction`, or any other function that takes and returns a scalar value.
The default is `Exponential`.
intensity_cost_fct_mult_param : value
references value of the `multiplicative_param <Function_Modulatory_Params>` of `intensity_cost_fct
<TransferWithCosts.intensity_cost_fct>`.
intensity_cost_fct_add_param : value
references value of the `additive_param <Function_Modulatory_Params>` of `intensity_cost_fct
<TransferWithCosts.intensity_cost_fct>`.
adjustment_cost : float or None
cost of change in `intensity <TransferWithCosts.intensity>` from the last time `function
<TransferWithCosts._function>` was executed. Value is None if `adjustment_cost_fct
<TransferWithCosts.adjustment_cost_fct>` has not been enabled (see `Cost Functions
<TransferWithCosts_Cost_Functions>` for additional details).
adjustment_cost_fct : TransferFunction
calculates `adjustment_cost <TransferWithCosts.adjustment_cost>` based on the change in `intensity
<TransferWithCosts.intensity>` from its value the last time `function <TransferWithCosts._function>` was
executed. It can be any `TransferFunction`, or any other function that takes and returns a scalar value.
adjustment_cost_fct_mult_param : value
references value of the `multiplicative_param <Function_Modulatory_Params>` of `adjustment_cost_fct
<TransferWithCosts.adjustment_cost_fct>`.
adjustment_cost_fct_add_param : value
references value of the `additive_param <Function_Modulatory_Params>` of `adjustment_cost_fct
<TransferWithCosts.adjustment_cost_fct>`.
duration_cost : float or None
integral of `intensity <intensity <TransferWithCosts.intensity>`, computed by `duration_cost_fct
<TransferWithCosts.duration_cost_fct>`. Value is None if `duration_cost_fct
<TransferWithCosts.duration_cost_fct>` has not been enabled; othewise, the integral of
`intensity <intensity <TransferWithCosts.intensity>` is only for those executions of `function
<TransferWithCosts._function>` in which `function <TransferWithCosts.duration_cost_fct>` was enabled.
duration_cost_fct : IntegratorFunction
calculates an integral of `intensity <TransferWithCosts.intensity>`. It can be any `IntegratorFunction`,
or any other function that takes a list or array of two values and returns a scalar value.
duration_cost_fct_mult_param : value
references value of the `multiplicative_param <Function_Modulatory_Params>` of `duration_cost_fct
<TransferWithCosts.duration_cost_fct>`.
duration_cost_fct_add_param : value
references value of the `additive_param <Function_Modulatory_Params>` of `duration_cost_fct
<TransferWithCosts.duration_cost_fct>`.
combined_costs : float or None
combined result of all `cost functions <TransferWithCostss_Cost_Functions>` that are enabled;
computed by `combined_costs_fct <TransferWithCosts.combined_costs_fct>` for current `intensity
<TransferWithCosts.intensity>`. Value is None if no costs have been enabled (see
`Cost Functions <TransferWithCosts_Cost_Functions>` for additional details).
combine_costs_fct : function
combines the results of all `cost functions <TransferWithCostss_Cost_Functions>` that are enabled, and assigns
the result to `cost <TransferWithCosts.cost>`. It can be any function that takes an array and returns a scalar
value.
combined_costs_fct_mult_param : value
references value of the `multiplicative_param <Function_Modulatory_Params>` of `combined_costs_fct
<TransferWithCosts.combined_costs_fct>`.
combined_costs_fct_add_param : value
references value of the `additive_param <Function_Modulatory_Params>` of `combined_costs_fct
<TransferWithCosts.combined_costs_fct>`.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
name : str
name of the Function.
owner : Component
`component <Component>` to which to assign the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
determines the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
"""
componentName = TRANSFER_WITH_COSTS_FUNCTION
classPreferences = {
PREFERENCE_SET_NAME: 'TransferWithCostssClassPreferences',
REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE),
}
class Parameters(TransferFunction.Parameters):
"""
Attributes
----------
variable
see `variable <TransferWithCosts.variable>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
LinearCombination
see `LinearCombination <TransferWithCosts.LinearCombination>`
:default value: `LinearCombination`
:type: `Function`
SimpleIntegrator
see `SimpleIntegrator <TransferWithCosts.SimpleIntegrator>`
:default value: `SimpleIntegrator`
:type: `Function`
adjustment_cost
see `adjustment_cost <TransferWithCosts.adjustment_cost>`
:default value: None
:type:
adjustment_cost_fct
see `adjustment_cost_fct <TransferWithCosts.adjustment_cost_fct>`
:default value: `Linear`
:type: `Function`
adjustment_cost_fct_add_param
see `adjustment_cost_fct_add_param <TransferWithCosts.adjustment_cost_fct_add_param>`
:default value: None
:type:
adjustment_cost_fct_mult_param
see `adjustment_cost_fct_mult_param <TransferWithCosts.adjustment_cost_fct_mult_param>`
:default value: None
:type:
combine_costs_fct
see `combine_costs_fct <TransferWithCosts.combine_costs_fct>`
:default value: `LinearCombination`
:type: `Function`
combine_costs_fct_add_param
see `combine_costs_fct_add_param <TransferWithCosts.combine_costs_fct_add_param>`
:default value: None
:type:
combine_costs_fct_mult_param
see `combine_costs_fct_mult_param <TransferWithCosts.combine_costs_fct_mult_param>`
:default value: None
:type:
combined_costs
see `combined_costs <TransferWithCosts.combined_costs>`
:default value: None
:type:
duration_cost
see `duration_cost <TransferWithCosts.duration_cost>`
:default value: None
:type:
duration_cost_fct
see `duration_cost_fct <TransferWithCosts.duration_cost_fct>`
:default value: `SimpleIntegrator`
:type: `Function`
duration_cost_fct_add_param
see `duration_cost_fct_add_param <TransferWithCosts.duration_cost_fct_add_param>`
:default value: None
:type:
duration_cost_fct_mult_param
see `duration_cost_fct_mult_param <TransferWithCosts.duration_cost_fct_mult_param>`
:default value: None
:type:
enabled_cost_functions
see `enabled_cost_functions <TransferWithCosts.enabled_cost_functions>`
:default value: CostFunctions.INTENSITY
:type: `CostFunctions`
intensity
see `intensity <TransferWithCosts.intensity>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
intensity_cost
see `intensity_cost <TransferWithCosts.intensity_cost>`
:default value: None
:type:
intensity_cost_fct
see `intensity_cost_fct <TransferWithCosts.intensity_cost_fct>`
:default value: `Exponential`
:type: `Function`
intensity_cost_fct_add_param
see `intensity_cost_fct_add_param <TransferWithCosts.intensity_cost_fct_add_param>`
:default value: None
:type:
intensity_cost_fct_mult_param
see `intensity_cost_fct_mult_param <TransferWithCosts.intensity_cost_fct_mult_param>`
:default value: None
:type:
transfer_fct
see `transfer_fct <TransferWithCosts.transfer_fct>`
:default value: `Linear`
:type: `Function`
transfer_fct_add_param
see `transfer_fct_add_param <TransferWithCosts.transfer_fct_add_param>`
:default value: None
:type:
transfer_fct_mult_param
see `transfer_fct_mult_param <TransferWithCosts.transfer_fct_mult_param>`
:default value: None
:type:
"""
variable = Parameter(np.array([0]),
history_min_length=1)
intensity = Parameter(np.zeros_like(variable.default_value),
history_min_length=1)
# Create primary functions' modulation params for TransferWithCosts
transfer_fct = Parameter(Linear, stateful=False)
_validate_transfer_fct = get_validator_by_function(is_function_type)
transfer_fct_mult_param = FunctionParameter(
aliases=MULTIPLICATIVE_PARAM,
modulation_combination_function=PRODUCT,
function_name='transfer_fct',
function_parameter_name=MULTIPLICATIVE_PARAM,
)
transfer_fct_add_param = FunctionParameter(
aliases=ADDITIVE_PARAM,
modulation_combination_function=SUM,
function_name='transfer_fct',
function_parameter_name=ADDITIVE_PARAM,
)
enabled_cost_functions = Parameter(
CostFunctions.DEFAULTS,
valid_types=(CostFunctions, list)
)
# Create versions of cost functions' modulation params for TransferWithCosts
intensity_cost = None
intensity_cost_fct = Parameter(Exponential, stateful=False)
_validate_intensity_cost_fct = get_validator_by_function(is_function_type)
intensity_cost_fct_mult_param = FunctionParameter(
modulation_combination_function=PRODUCT,
function_name='intensity_cost_fct',
function_parameter_name=MULTIPLICATIVE_PARAM,
)
intensity_cost_fct_add_param = FunctionParameter(
modulation_combination_function=SUM,
function_name='intensity_cost_fct',
function_parameter_name=ADDITIVE_PARAM,
)
adjustment_cost = None
adjustment_cost_fct = Parameter(Linear, stateful=False)
_validate_adjustment_cost_fct = get_validator_by_function(is_function_type)
adjustment_cost_fct_mult_param = FunctionParameter(
modulation_combination_function=PRODUCT,
function_name='adjustment_cost_fct',
function_parameter_name=MULTIPLICATIVE_PARAM,
)
adjustment_cost_fct_add_param = FunctionParameter(
modulation_combination_function=SUM,
function_name='adjustment_cost_fct',
function_parameter_name=ADDITIVE_PARAM,
)
duration_cost = None
duration_cost_fct = Parameter(SimpleIntegrator, stateful=False)
_validate_duration_cost_fct = get_validator_by_function(is_function_type)
duration_cost_fct_mult_param = FunctionParameter(
modulation_combination_function=PRODUCT,
function_name='duration_cost_fct',
function_parameter_name=MULTIPLICATIVE_PARAM,
)
duration_cost_fct_add_param = FunctionParameter(
modulation_combination_function=SUM,
function_name='duration_cost_fct',
function_parameter_name=ADDITIVE_PARAM,
)
combined_costs = None
combine_costs_fct = Parameter(LinearCombination, stateful=False)
_validate_combine_costs_fct = get_validator_by_function(is_function_type)
combine_costs_fct_mult_param = FunctionParameter(
modulation_combination_function=PRODUCT,
function_name='combine_costs_fct',
function_parameter_name=MULTIPLICATIVE_PARAM,
)
combine_costs_fct_add_param = FunctionParameter(
modulation_combination_function=SUM,
function_name='combine_costs_fct',
function_parameter_name=ADDITIVE_PARAM,
)
@tc.typecheck
def __init__(self,
default_variable=None,
size=None,
transfer_fct:tc.optional(is_function_type)=None,
enabled_cost_functions:tc.optional(tc.any(CostFunctions, list))=None,
intensity_cost_fct:tc.optional(is_function_type)=None,
adjustment_cost_fct:tc.optional(is_function_type)=None,
duration_cost_fct:tc.optional(is_function_type)=None,
combine_costs_fct:tc.optional(is_function_type)=None,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
# if size:
# if default_variable is None:
# default_variable = np.zeros(size)
# elif size != len(default_variable):
# raise FunctionError(f"Both {repr(DEFAULT_VARIABLE)} ({default_variable}) and {repr(SIZE)} ({size}) "
# f"are specified for {self.name} but are {SIZE}!=len({DEFAULT_VARIABLE}).")
super().__init__(
default_variable=default_variable,
transfer_fct=transfer_fct,
enabled_cost_functions=enabled_cost_functions,
intensity_cost_fct=intensity_cost_fct,
adjustment_cost_fct=adjustment_cost_fct,
duration_cost_fct=duration_cost_fct,
combine_costs_fct=combine_costs_fct,
params=params,
owner=owner,
prefs=prefs,
)
# # MODIFIED 6/12/19 NEW: [JDC]
# self._variable_shape_flexibility = DefaultsFlexibility.FLEXIBLE
# # MODIFIED 6/12/19 END
def _instantiate_attributes_before_function(self, function=None, context=None):
"""Instantiate `cost functions <TransferWithCosts_Cost_Functions>` specified in `enabled_cost_functions
<TransferWithCostss.enabled_cost_functions>`.
"""
super()._instantiate_attributes_before_function(function=function, context=None)
self._instantiate_cost_functions(context=context)
def _instantiate_cost_functions(self, context):
"""Instantiate cost functions and the multiplicative and additive modulatory parameters for them.
Parse specification of cost functions to enable
Instantiate cost functions specified in construtor arguments, and enable ones in enabled_cost_functions
Assign default value for multipicative and additive parameters for each, from the values of those parameters
on the respective cost functions just instantiated.
Initialize intensity_cost
"""
if self.enabled_cost_functions:
self.assign_costs(self.enabled_cost_functions)
def instantiate_fct(fct_name, fct):
if not fct:
self.toggle_cost(fct_name, OFF)
return None
# # MODIFIED 3/10/20 OLD:
# if isinstance(fct, (Function, types.FunctionType, types.MethodType)):
# MODIFIED 3/10/20 NEW: [JDC]
elif isinstance(fct, Function):
return fct
elif isinstance(fct, (types.FunctionType, types.MethodType)):
from psyneulink.core.components.functions.userdefinedfunction import UserDefinedFunction
return UserDefinedFunction(#default_variable=function_variable,
custom_function=fct,
owner=self,
context=context)
# MODIFIED 3/10/20 END
elif issubclass(fct, Function):
return fct()
else:
raise FunctionError(f"{fct} is not a valid cost function for {fct_name}.")
self.intensity_cost_fct = instantiate_fct(INTENSITY_COST_FUNCTION, self.intensity_cost_fct)
# Initialize default_value for TransferWithCosts' modulation params from intensity_cost_fct's values
self.parameters.intensity_cost_fct_mult_param.default_value = \
self.parameters.intensity_cost_fct_mult_param.get()
self.parameters.intensity_cost_fct_add_param.default_value = \
self.parameters.intensity_cost_fct_add_param.get()
self.adjustment_cost_fct = instantiate_fct(ADJUSTMENT_COST_FUNCTION, self.adjustment_cost_fct)
# Initialize default_value for TransferWithCosts' modulation params from adjustment_cost_fct's values
self.parameters.adjustment_cost_fct_mult_param.default_value = \
self.parameters.adjustment_cost_fct_mult_param.get()
self.parameters.adjustment_cost_fct_add_param.default_value = \
self.parameters.adjustment_cost_fct_add_param.get()
self.duration_cost_fct = instantiate_fct(DURATION_COST_FUNCTION, self.duration_cost_fct)
# Initialize default_value for TransferWithCosts' modulation params from duration_cost_fct's values
self.parameters.duration_cost_fct_mult_param.default_value = \
self.parameters.duration_cost_fct_add_param.get()
self.parameters.duration_cost_fct_add_param.default_value = \
self.parameters.duration_cost_fct_add_param.get()
self.combine_costs_fct = instantiate_fct(COMBINE_COSTS_FUNCTION, self.combine_costs_fct)
# Initialize default_value for TransferWithCosts' modulation params from combined_costs_fct's values
self.parameters.combine_costs_fct_mult_param.default_value = \
self.parameters.combine_costs_fct_mult_param.get()
self.parameters.combine_costs_fct_add_param.default_value = \
self.parameters.combine_costs_fct_add_param.get()
# Initialize intensity attributes
if self.enabled_cost_functions:
# Default cost params
if self.owner:
if self.owner.context.initialization_status != ContextFlags.DEFERRED_INIT:
self.intensity_cost = self.intensity_cost_fct(self.owner.defaults.variable)
else:
self.intensity_cost = self.intensity_cost_fct(self.owner.class_defaults.variable)
else:
self.intensity_cost = self.intensity_cost_fct(self.defaults.variable)
self.defaults.intensity_cost = self.intensity_cost
def _function(self,
variable=None,
params=None,
context=None):
"""
Arguments
---------
variable : number or array : default class_defaults.variable
a single value or array to be transformed.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the function.
Values specified for parameters in the dictionary override any assigned to those parameters in arguments
of the constructor.
Returns
-------
transformation of variable using `transfer_fct <TransferWithCostss.transfer_fct>` : number or array
"""
self._check_args(variable=variable, params=params, context=context)
# FIRST, DEAL WITH CURRENT INTENSITY
# Compute current intensity
intensity = self.parameters.transfer_fct._get(context)(variable, context=context)
# THEN, DEAL WITH COSTS
# Note: only compute costs that are enabled; others are left as None, or with their value when last enabled.
# Get costs for each cost function that is enabled in enabled_cost_functions
enabled_cost_functions = self.parameters.enabled_cost_functions._get(context)
enabled_costs = [] # Used to aggregate costs that are enabled and submit to combine_costs_fct
if enabled_cost_functions:
# For each cost function that is enabled:
# - get params for the cost functon using _get_current_parameter_value:
# - if TransferWithControl is owned by a Mechanism, get value from ParameterPort for param
# - otherwise, get from TransferWithControl modulation parameter (which is also subject to modulation)
# Compute intensity_cost
if enabled_cost_functions & CostFunctions.INTENSITY:
# Execute intensity_cost function
intensity_cost = self.intensity_cost_fct(intensity, context=context)
self.parameters.intensity_cost._set(intensity_cost, context)
enabled_costs.append(intensity_cost)
# Compute adjustment_cost
if enabled_cost_functions & CostFunctions.ADJUSTMENT:
# Compute intensity change
try:
intensity_change = np.abs(intensity - self.parameters.intensity._get(context))
except TypeError:
intensity_change = np.zeros_like(self.parameters_intensity._get(context))
# Execute adjustment_cost function
adjustment_cost = self.adjustment_cost_fct(intensity_change, context=context)
self.parameters.adjustment_cost._set(adjustment_cost, context)
enabled_costs.append(adjustment_cost)
# Compute duration_cost
if enabled_cost_functions & CostFunctions.DURATION:
# Execute duration_cost function
duration_cost = self.duration_cost_fct(intensity, context=context)
self.parameters.duration_cost._set(duration_cost, context)
enabled_costs.append(duration_cost)
# Alwasy execute combined_costs_fct if *any* costs are enabled
# Execute combine_costs function
combined_costs = self.combine_costs_fct(enabled_costs,
context=context)
self.parameters.combined_costs._set(combined_costs, context)
# Store current intensity
self.parameters.intensity._set(intensity, context)
return intensity
def _is_identity(self, context=None, defaults=False):
transfer_fct = self.parameters.transfer_fct.get()
if defaults:
enabled_cost_functions = self.defaults.enabled_cost_functions
else:
enabled_cost_functions = self.parameters.enabled_cost_functions.get(context)
return (
transfer_fct._is_identity(context, defaults=defaults)
and enabled_cost_functions == CostFunctions.NONE
)
@tc.typecheck
def assign_costs(self, cost_functions: tc.any(CostFunctions, list), execution_context=None):
"""Assigns specified functions; all others are disabled.
Arguments
---------
cost_functions: CostFunctions or List[CostFunctions]
`cost function <TransferWithCosts_Cost_Functions>` or list of ones to be used; all other will be disabled.
Returns
-------
enabled_cost_functions : boolean combination of CostFunctions
current value of `enabled_cost_functions <TransferWithCosts.enabled_cost_functions>`.
"""
if isinstance(cost_functions, CostFunctions):
cost_functions = [cost_functions]
self.parameters.enabled_cost_functions.set(CostFunctions.NONE, execution_context)
return self.enable_costs(cost_functions, execution_context)
@tc.typecheck
def enable_costs(self, cost_functions: tc.any(CostFunctions, list), execution_context=None):
"""Enable specified `cost functions <TransferWithCosts_Cost_Functions>`;
settings for all other cost functions are left intact.
Arguments
---------
cost_functions: CostFunctions or List[CostFunctions]
`cost function <TransferWithCosts_Cost_Functions>` or list of ones to be enabled,
in addition to any that are already enabled.
Returns
-------
enabled_cost_functions : boolean combination of CostFunctions
current value of `enabled_cost_functions <TransferWithCosts.enabled_cost_functions>`.
"""
if isinstance(cost_functions, CostFunctions):
cost_functions = [cost_functions]
enabled_cost_functions = self.parameters.enabled_cost_functions.get(execution_context)
for cost_function in cost_functions:
enabled_cost_functions |= cost_function
self.parameters.enabled_cost_functions.set(enabled_cost_functions, execution_context)
return enabled_cost_functions
@tc.typecheck
def disable_costs(self, cost_functions: tc.any(CostFunctions, list), execution_context=None):
"""Disable specified `cost functions <TransferWithCosts_Cost_Functions>`;
settings for all other cost functions are left intact.
Arguments
---------
cost_functions: CostFunction or List[CostFunctions]
`cost function <TransferWithCosts_Cost_Functions>` or list of ones to be disabled.
Returns
-------
enabled_cost_functions : boolean combination of CostFunctions
current value of `enabled_cost_functions <TransferWithCosts.enabled_cost_functions>`.
"""
if isinstance(cost_functions, CostFunctions):
cost_functions = [cost_functions]
enabled_cost_functions = self.parameters.enabled_cost_functions.get(execution_context)
for cost_function in cost_functions:
enabled_cost_functions &= ~cost_function
self.parameters.enabled_cost_functions.set(enabled_cost_functions, execution_context)
return enabled_cost_functions
def toggle_cost(self, cost_function_name:tc.any(str, CostFunctions),
assignment:bool=ON,
execution_context=None):
"""Enable/disable a `cost functions <TransferWithCosts_Cost_Functions>`.
Arguments
---------
cost_function_name : str or CostFunction
Must be the name of a `cost function <TransferWithCosts_Cost_Functions>` or a value of CostFunction enum.
Returns
-------
enabled_cost_functions : boolean combination of CostFunctions
current value of `enabled_cost_functions <TransferWithCosts.enabled_cost_functions>`.
"""
if cost_function_name in {INTENSITY_COST_FUNCTION, CostFunctions.INTENSITY}:
cost_function = CostFunctions.INTENSITY
cost_function_name = INTENSITY_COST_FUNCTION
elif cost_function_name in {ADJUSTMENT_COST_FUNCTION, CostFunctions.ADJUSTMENT}:
cost_function = CostFunctions.ADJUSTMENT
cost_function_name = ADJUSTMENT_COST_FUNCTION
elif cost_function_name in {DURATION_COST_FUNCTION, CostFunctions.DURATION}:
cost_function = CostFunctions.DURATION
cost_function_name = DURATION_COST_FUNCTION
elif cost_function_name == COMBINE_COSTS_FUNCTION:
raise FunctionError("{} cannot be disabled".format(COMBINE_COSTS_FUNCTION))
else:
raise FunctionError("toggle_cost: unrecognized cost function: {}".format(cost_function_name))
enabled_cost_functions = self.parameters.enabled_cost_functions.get(execution_context)
if assignment:
if cost_function_name not in self.parameters.names():
raise FunctionError("Unable to toggle {} ON as function assignment is \'None\'".
format(cost_function_name))
if not enabled_cost_functions:
enabled_cost_functions = cost_function
else:
enabled_cost_functions |= cost_function
else:
enabled_cost_functions &= ~cost_function
self.parameters.enabled_cost_functions.set(enabled_cost_functions, execution_context)
return enabled_cost_functions
def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset):
# Run transfer function first
transfer_f = self.parameters.transfer_fct
trans_f = ctx.import_llvm_function(transfer_f.get())
trans_p = pnlvm.helpers.get_param_ptr(builder, self, params, transfer_f.name)
trans_s = pnlvm.helpers.get_state_ptr(builder, self, state, transfer_f.name)
trans_in = arg_in
if trans_in.type != trans_f.args[2].type:
warnings.warn("Shape mismatch: {} input does not match the transfer function ({}): {} vs. {}".format(self, transfer_f.get(), self.defaults.variable, transfer_f.get().defaults.variable))
trans_in = builder.gep(trans_in, [ctx.int32_ty(0), ctx.int32_ty(0)])
trans_out = arg_out
if trans_out.type != trans_f.args[3].type:
warnings.warn("Shape mismatch: {} output does not match the transfer function ({}): {} vs. {}".format(self, transfer_f.get(), self.defaults.value, transfer_f.get().defaults.value))
trans_out = builder.gep(trans_out, [ctx.int32_ty(0), ctx.int32_ty(0)])
builder.call(trans_f, [trans_p, trans_s, trans_in, trans_out])
# TODO: Implement cost calculations
return builder
|
{"hexsha": "e04beea9d192547fa647471f0760b4ec2da65d3b", "size": 177428, "ext": "py", "lang": "Python", "max_stars_repo_path": "psyneulink/core/components/functions/nonstateful/transferfunctions.py", "max_stars_repo_name": "MetaCell/PsyNeuLink", "max_stars_repo_head_hexsha": "aeddf3e8ea62504a5d928b100b59aa18e593156c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 67, "max_stars_repo_stars_event_min_datetime": "2018-01-05T22:18:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T11:27:31.000Z", "max_issues_repo_path": "psyneulink/core/components/functions/nonstateful/transferfunctions.py", "max_issues_repo_name": "MetaCell/PsyNeuLink", "max_issues_repo_head_hexsha": "aeddf3e8ea62504a5d928b100b59aa18e593156c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1064, "max_issues_repo_issues_event_min_datetime": "2017-12-01T18:58:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:22:24.000Z", "max_forks_repo_path": "psyneulink/core/components/functions/nonstateful/transferfunctions.py", "max_forks_repo_name": "MetaCell/PsyNeuLink", "max_forks_repo_head_hexsha": "aeddf3e8ea62504a5d928b100b59aa18e593156c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2017-12-01T20:27:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T21:49:39.000Z", "avg_line_length": 41.8462264151, "max_line_length": 197, "alphanum_fraction": 0.601759587, "include": true, "reason": "import numpy", "num_tokens": 36485}
|
'''
Created on Sep 19, 2013
@author: johannes
'''
# create some test cases
import scipy as SP
from limix_legacy.ensemble import lmm_forest_utils as utils
import h5py
from limix_legacy.ensemble.lmm_forest import Forest as MF
import os
import unittest
class TestMixedForest(unittest.TestCase):
def setUp(self, n=100, m=1):
self.dir_name = os.path.dirname(os.path.realpath(__file__))
self.data = h5py.File(os.path.join(self.dir_name,
'test_data/lmm_forest_toy_data.h5'),
'r')
SP.random.seed(1)
self.x, self.y = utils.lin_data_cont_predictors(n=n,m=m)
self.n, self.m = self.x.shape
[self.train, self.test] = utils.crossValidationScheme(2,self.n)
self.n_estimators = 100
@unittest.skip("someone has to fix it")
def test_toy_data_rand(self):
y_conf = self.data['y_conf'].value
kernel = self.data['kernel'].value
X = self.data['X'].value
# This is a non-random cross validation
(training, test) = utils.crossValidationScheme(2, y_conf.size)
lm_forest = MF(kernel=kernel[SP.ix_(training, training)],
sampsize=.5, verbose=0, n_estimators=100)
lm_forest.fit(X[training], y_conf[training])
response_tot = lm_forest.predict(X[test],
kernel[SP.ix_(test, training)])
random_forest = MF(kernel='iid')
random_forest.fit(X[training], y_conf[training])
response_iid = random_forest.predict(X[test])
response_fixed = lm_forest.predict(X[test])
feature_scores_lmf = lm_forest.log_importance
feature_scores_rf = random_forest.log_importance
# All consistency checks
err = (feature_scores_lmf-self.data['feature_scores_lmf'].value).sum()
self.assertTrue(SP.absolute(err) < 10)
err = (feature_scores_rf-self.data['feature_scores_rf'].value).sum()
self.assertTrue(SP.absolute(err) < 10)
err = SP.absolute(self.data['response_tot'] - response_tot).sum()
self.assertTrue(SP.absolute(err) < 2)
err = SP.absolute(self.data['response_fixed'] - response_fixed).sum()
self.assertTrue(SP.absolute(err) < 4)
err = SP.absolute(self.data['response_iid'] - response_iid).sum()
self.assertTrue(SP.absolute(err) < 8)
def test_delta_updating(self):
n_sample = 100
# A 20 x 2 random integer matrix
X = SP.empty((n_sample, 2))
X[:, 0] = SP.arange(0, 1, 1.0/n_sample)
X[:, 1] = SP.random.rand(n_sample)
sd_noise = .5
sd_conf = .5
noise = SP.random.randn(n_sample, 1)*sd_noise
# print 'true delta equals', (sd_noise**2)/(sd_conf**2)
# Here, the observed y is just a linear function of the first column
# in X and # a little independent gaussian noise
y_fixed = (X[:, 0:1] > .5)*1.0
y_fn = y_fixed + noise
# Divide into training and test sample using 2/3 of data for training
training_sample = SP.zeros(n_sample, dtype='bool')
training_sample[
SP.random.permutation(n_sample)[:SP.int_(.66*n_sample)]] = True
test_sample = ~training_sample
kernel = utils.getQuadraticKernel(X[:, 0], d=0.0025) +\
1e-3*SP.eye(n_sample)
# The confounded version of y_lin is computed as
y_conf = sd_conf*SP.random.multivariate_normal(SP.zeros(n_sample),
kernel, 1).reshape(-1, 1)
y_tot = y_fn + y_conf
# Selects rows and columns
kernel_train = kernel[SP.ix_(training_sample, training_sample)]
kernel_test = kernel[SP.ix_(test_sample, training_sample)]
lm_forest = MF(kernel=kernel_train, update_delta=False, max_depth=1,
verbose=0)
# Returns prediction for random effect
lm_forest.fit(X[training_sample], y_tot[training_sample])
response_lmf = lm_forest.predict(X[test_sample], k=kernel_test)
# print 'fitting forest (delta-update)'
# earn random forest, not accounting for the confounding
random_forest = MF(kernel=kernel_train, update_delta=True, max_depth=5,
verbose=0)
random_forest.fit(X[training_sample], y_tot[training_sample])
response_rf = random_forest.predict(X[test_sample], k=kernel_test)
def test_kernel_builing(self):
X = (SP.random.rand(5, 10) > .5)*1.0
kernel = utils.estimateKernel(X, scale=False)
small_kernel = utils.estimateKernel(X[:, 0:5], scale=False)
small_kernel_test = utils.update_Kernel(kernel, X[:, 5:], scale=False)
self.assertAlmostEqual((small_kernel -
small_kernel_test).sum(), 0)
def test_depth_building(self):
self.setUp(m=10)
X = self.x.copy()
X -= X.mean(axis=0)
X /= X.std(axis=0)
kernel = SP.dot(X, X.T)
train = SP.where(self.train)[0]
test = SP.where(~self.train)[0]
model = MF(fit_optimal_depth=True, max_depth=3,
kernel=kernel[SP.ix_(train, train)])
model.fit(self.x[self.train], self.y[self.train],
fit_optimal_depth=True)
prediction_1 = model.predict(X[test], k=kernel[test, train],
depth=model.opt_depth)
# Grow to end
model.further()
# Prediction again
prediction_2 = model.predict(X[test], k=kernel[test, train],
depth=model.opt_depth)
self.assertEqual((prediction_1 - prediction_2).sum(), 0.0)
@unittest.skip("someone has to fix it")
def test_forest_stump_recycling(self):
self.setUp(m=5)
SP.random.seed(42)
model = MF(fit_optimal_depth=True, kernel='iid',
build_to_opt_depth=True)
model.fit(self.x[self.train], self.y[self.train])
prediction_1 = model.predict(self.x[self.test], depth=model.opt_depth)
model.fit(self.x[self.train], self.y[self.train], recycle=True)
prediction_2 = model.predict(self.x[self.test], depth=model.opt_depth)
self.assertGreater(.7, ((prediction_1 - prediction_2)**2).sum())
@unittest.skip("someone has to fix it")
def test_normalization_kernel(self):
#SP.random.seed(42)
n = 50
m = 100
X = (SP.random.rand(n, m) > .5)*1.
X_test = (SP.random.rand(10, m) > .5)*1.
K = utils.estimateKernel(X)
y = SP.random.rand(n, 1)
SP.random.seed(1)
mf = MF(kernel=K)
mf.fit(X, y)
results_1 = mf.predict(X_test)
X -= X.mean(axis=0)
X /= X.std(axis=0)
X_test -= X_test.mean(axis=0)
X_test /= X_test.std(axis=0)
SP.random.seed(1)
mf = MF(kernel=K)
mf.fit(X, y)
results_2 = mf.predict(X_test)
self.assertEqual(results_1.sum(), results_2.sum())
def polynom(self, x):
return -x + x**3
def complete_sample(self, x, mean=0, var=.3**2):
return self.polynom(x) + SP.random.randn(x.size) * SP.sqrt(var) + mean
def test_covariate_shift(self):
n_sample = 100
# Biased training
var_bias = .5**2
mean_bias = .7
x_train = SP.random.randn(n_sample)*SP.sqrt(var_bias) + mean_bias
y_train = self.complete_sample(x_train)
# Unbiased test set
var = .3**2
mean = 0
x_test = SP.random.randn(n_sample)*SP.sqrt(var) + mean
x_complete = SP.hstack((x_train, x_test))
kernel = utils.getQuadraticKernel(x_complete, d=1) +\
10 * SP.dot(x_complete.reshape(-1, 1), x_complete.reshape(1, -1))
kernel = utils.scale_K(kernel)
kernel_train = kernel[SP.ix_(SP.arange(x_train.size),
SP.arange(x_train.size))]
kernel_test = kernel[SP.ix_(SP.arange(x_train.size, x_complete.size),
SP.arange(x_train.size))]
mf = MF(n_estimators=100, kernel=kernel_train, min_depth=0,
subsampling=False)
mf.fit(x_train.reshape(-1, 1), y_train.reshape(-1, 1))
response_gp = mf.predict(x_test.reshape(-1, 1), kernel_test, depth=0)
self.assertTrue(((response_gp - self.polynom(x_test))**2).sum() < 2.4)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "7f95d5fbd911415024080d45b6f958627f9b4fb3", "size": 8456, "ext": "py", "lang": "Python", "max_stars_repo_path": "limix_legacy/test/lmm_forest/test_lmm_forest.py", "max_stars_repo_name": "michoel-lab/limix-legacy", "max_stars_repo_head_hexsha": "cd6c9887a2c411372beeddde3a86979b2aa21837", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-03-08T19:23:49.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-05T14:44:20.000Z", "max_issues_repo_path": "limix_legacy/test/lmm_forest/test_lmm_forest.py", "max_issues_repo_name": "michoel-lab/limix-legacy", "max_issues_repo_head_hexsha": "cd6c9887a2c411372beeddde3a86979b2aa21837", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-10T12:29:46.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-10T12:29:46.000Z", "max_forks_repo_path": "limix_legacy/test/lmm_forest/test_lmm_forest.py", "max_forks_repo_name": "michoel-lab/limix-legacy", "max_forks_repo_head_hexsha": "cd6c9887a2c411372beeddde3a86979b2aa21837", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-20T18:33:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-18T13:29:27.000Z", "avg_line_length": 41.2487804878, "max_line_length": 80, "alphanum_fraction": 0.5976821192, "include": true, "reason": "import scipy", "num_tokens": 2110}
|
import numpy as np
import pandas as pd
import random
import subprocess
from termcolor import colored
import matplotlib.pyplot as plt
MIN_NUMBERS = 8
MAX_NUMBERS = 24
NUMBER_STEP = 1
TEST_REPEAT = 5
MIN_RANGE = 0
MAX_RANGE = 100
FLOAT_MIN = -3.40282e+38
def compute_angles(numbers):
angles = [round(np.arctan( (number - numbers[0]) / (idx+1)), 5) for idx, number in enumerate(numbers[1:])]
angles.insert(0, FLOAT_MIN)
angles_series = pd.Series(angles)
# max-scan
min_previous_max = angles_series.cummax().tolist()
# remove overall maximum (transform to max-prescan)
del min_previous_max[-1]
# add neutral item I (transform to max-prescan)
min_previous_max.insert(0, FLOAT_MIN)
return min_previous_max
def run_test_check():
for numbers_count in range (MIN_NUMBERS, MAX_NUMBERS, NUMBER_STEP):
for _ in range (0, TEST_REPEAT):
input_num = [random.randint(MIN_RANGE, MAX_RANGE) for _ in range (0, numbers_count)]
input_str = ','.join([str(number) for number in input_num])
reference_output = compute_angles(input_num)
for option in range(1, 4):
out = subprocess.check_output(["./test.sh", input_str, str(option)])
out_num = out.decode("utf-8").split("\n")[0].split(",")
out_num = \
[round(float(number), 5) if float(number) != FLOAT_MIN else float(number) for number in out_num]
if out_num == reference_output:
print(colored("Test (" + str(numbers_count) + " - " + str(option) + ") successful.", 'green'))
else:
print(colored("Test (" + str(numbers_count) + " - " + str(option) + ") unsuccessful.", 'red'))
print("-----------------------------------------------------")
print(input_num)
print(reference_output)
print(out_num)
print("-----------------------------------------------------")
def create_graph(elapsed_time, samples):
fig, ax = plt.subplots()
print(samples, elapsed_time[0])
ax.plot(samples, elapsed_time[0], linestyle='-', marker='o', color='b')
ax.plot(samples, elapsed_time[1], linestyle='-', marker='o', color='r')
ax.plot(samples, elapsed_time[2], linestyle='-', marker='o', color='g')
ax.set(xlabel='n - points count', ylabel='time (us)',
title='Line-of-Sight')
ax.grid()
fig.savefig("plot.png")
plt.show()
def rewrite_results(results, filename):
with open(filename, 'r+') as f:
_ = f.read()
f.seek(0, 0)
for result in results:
f.write(str(result) + '\n')
def run_test_measure():
elapsed_times = [[],[],[]]
for numbers_count in range (MIN_NUMBERS, MAX_NUMBERS, NUMBER_STEP):
print(numbers_count)
sub_times_log = []
sub_times_n_2 = []
sub_times_n = []
for _ in range (0, TEST_REPEAT):
input_num = [random.randint(MIN_RANGE, MAX_RANGE) for _ in range (0, numbers_count)]
input_str = ','.join([str(number) for number in input_num])
for option in range(1, 4):
out = subprocess.check_output(["./test.sh", input_str, str(option)])
if option == 1:
sub_times_log.append(float(out.decode("utf-8").split("\n")[0]))
elif option == 2:
sub_times_n_2.append(float(out.decode("utf-8").split("\n")[0]))
elif option == 3:
sub_times_n.append(float(out.decode("utf-8").split("\n")[0]))
elapsed_times[0].append(min(sub_times_log))
elapsed_times[1].append(min(sub_times_n_2))
elapsed_times[2].append(min(sub_times_n))
rewrite_results(elapsed_times[0], "results_1.txt")
rewrite_results(elapsed_times[1], "results_2.txt")
rewrite_results(elapsed_times[2], "results_3.txt")
create_graph(elapsed_times, range(MIN_NUMBERS, MAX_NUMBERS, NUMBER_STEP))
if __name__ == '__main__':
run_test_check()
# run_test_measure()
|
{"hexsha": "3e64f2c66628474f8a76cf12e98c699e62cda4be", "size": 4104, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "xstupi00/Line-of-Sight", "max_stars_repo_head_hexsha": "1d4b00d655e34017cd152c088a7329a12cd3baaa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "xstupi00/Line-of-Sight", "max_issues_repo_head_hexsha": "1d4b00d655e34017cd152c088a7329a12cd3baaa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "xstupi00/Line-of-Sight", "max_forks_repo_head_hexsha": "1d4b00d655e34017cd152c088a7329a12cd3baaa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.75, "max_line_length": 116, "alphanum_fraction": 0.5850389864, "include": true, "reason": "import numpy", "num_tokens": 976}
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dopamine.discrete_domains.atari_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from dopamine.discrete_domains import atari_lib
import gym
import mock
import numpy as np
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
class AtariLibTest(tf.test.TestCase):
def testCreateAtariEnvironmentWithoutGameName(self):
with self.assertRaises(AssertionError):
atari_lib.create_atari_environment()
@mock.patch.object(atari_lib, 'AtariPreprocessing')
@mock.patch.object(gym, 'make')
def testCreateAtariEnvironment(self, mock_gym_make, mock_atari_lib):
class MockGymEnv(object):
def __init__(self, env_name):
self.env = 'gym({})'.format(env_name)
def fake_make_env(name):
return MockGymEnv(name)
mock_gym_make.side_effect = fake_make_env
# pylint: disable=unnecessary-lambda
mock_atari_lib.side_effect = lambda x: 'atari({})'.format(x)
# pylint: enable=unnecessary-lambda
game_name = 'Test'
env = atari_lib.create_atari_environment(game_name)
self.assertEqual('atari(gym(TestNoFrameskip-v0))', env)
class MockALE(object):
"""Mock internal ALE for testing."""
def __init__(self):
pass
def lives(self):
return 1
def getScreenGrayscale(self, screen): # pylint: disable=invalid-name
screen.fill(self.screen_value)
class MockEnvironment(object):
"""Mock environment for testing."""
def __init__(self, screen_size=10, max_steps=10):
self.max_steps = max_steps
self.screen_size = screen_size
self.ale = MockALE()
self.observation_space = np.empty((screen_size, screen_size))
self.game_over = False
def reset(self):
self.ale.screen_value = 10
self.num_steps = 0
return self.get_observation()
def get_observation(self):
observation = np.empty((self.screen_size, self.screen_size))
return self.ale.getScreenGrayscale(observation)
def step(self, action):
reward = -1. if action > 0 else 1.
self.num_steps += 1
is_terminal = self.num_steps >= self.max_steps
unused = 0
self.ale.screen_value -= 2
return (self.get_observation(), reward, is_terminal, unused)
def render(self, mode):
pass
class AtariPreprocessingTest(tf.test.TestCase):
def testResetPassesObservation(self):
env = MockEnvironment()
env = atari_lib.AtariPreprocessing(env, frame_skip=1, screen_size=16)
observation = env.reset()
self.assertEqual(observation.shape, (16, 16, 1))
def testTerminalPassedThrough(self):
max_steps = 10
env = MockEnvironment(max_steps=max_steps)
env = atari_lib.AtariPreprocessing(env, frame_skip=1)
env.reset()
# Make sure we get the right number of steps.
for _ in range(max_steps - 1):
_, _, is_terminal, _ = env.step(0)
self.assertFalse(is_terminal)
_, _, is_terminal, _ = env.step(0)
self.assertTrue(is_terminal)
def testFrameSkipAccumulatesReward(self):
frame_skip = 2
env = MockEnvironment()
env = atari_lib.AtariPreprocessing(env, frame_skip=frame_skip)
env.reset()
# Make sure we get the right number of steps. Reward is 1 when we
# pass in action 0.
_, reward, _, _ = env.step(0)
self.assertEqual(reward, frame_skip)
def testMaxFramePooling(self):
frame_skip = 2
env = MockEnvironment()
env = atari_lib.AtariPreprocessing(env, frame_skip=frame_skip)
env.reset()
# The first observation is 2, the second 0; max is 2.
observation, _, _, _ = env.step(0)
self.assertTrue((observation == 8).all())
if __name__ == '__main__':
tf.test.main()
|
{"hexsha": "c70b3687ccdd277d412fcd8a7f5f08bebc97526c", "size": 4265, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/dopamine/discrete_domains/atari_lib_test.py", "max_stars_repo_name": "aghinsa/dopamine", "max_stars_repo_head_hexsha": "e7d780d7c80954b7c396d984325002d60557f7d1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-03-06T14:34:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-12T16:12:40.000Z", "max_issues_repo_path": "tests/dopamine/discrete_domains/atari_lib_test.py", "max_issues_repo_name": "aghinsa/dopamine", "max_issues_repo_head_hexsha": "e7d780d7c80954b7c396d984325002d60557f7d1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/dopamine/discrete_domains/atari_lib_test.py", "max_forks_repo_name": "aghinsa/dopamine", "max_forks_repo_head_hexsha": "e7d780d7c80954b7c396d984325002d60557f7d1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-10-05T15:52:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-23T01:34:06.000Z", "avg_line_length": 28.0592105263, "max_line_length": 74, "alphanum_fraction": 0.7172332943, "include": true, "reason": "import numpy", "num_tokens": 1083}
|
import os
import numpy as np
import argparse
from sklearn.model_selection import StratifiedKFold
#from data.image_folder import make_dataset
#from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import json
import pandas as pd
import numpy as np
import os, sys
import glob
import re
import hashlib
import pathlib
import cv2
#TO-DO acrescentar isTB no options
isTB = True
#from options.train_options import TrainOptions
#from data import create_dataset
#from models import create_model
#from rxwgan.models import *
#from rxwgan.wgangp import wgangp_optimizer
#from rxcore import stratified_train_val_test_splits
def run(command: object) -> object:
print(command)
exit_status = os.system(command)
if exit_status > 0:
exit(1)
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def expand_folder( path , extension):
l = glob.glob(path+'/*.'+extension)
l.sort()
return l
def get_md5(path):
return hashlib.md5(pathlib.Path(path).read_bytes()).hexdigest()
#import numpy as np
#
# Split train/val/test splits
#
def stratified_train_val_test_splits( df_kfold, seed=512 ):
cv_train_test = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
cv_train_val = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
sorts_train_test = []
for train_val_idx, test_idx in cv_train_test.split( df_kfold.values,df_kfold.target.values ):
train_val_df = df_kfold.iloc[train_val_idx]
sorts = []
for train_idx, val_idx in cv_train_val.split( train_val_df.values, train_val_df.target.values ):
sorts.append((train_val_df.index[train_idx].values, train_val_df.index[val_idx].values, test_idx))
sorts_train_test.append(sorts)
return sorts_train_test
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def prepare_my_table(clinical_path, images_path, masks_path, combine = False):
d = {
'target': [],
'image_ID': [],
'raw_image_path': [],
'mask_image_path': [],
'paired_image_path': [],
'raw_image_md5': [],
'age': [],
'sex': [],
'comment': [],
}
def treat_string(lines):
string = ''
for s in lines:
string += s.replace('\n', '').replace('\t', '')
return re.sub(' +', ' ', string)
for idx, path in enumerate(expand_folder(clinical_path, 'txt')):
with open(path, 'r') as f:
lines = f.readlines()
sex = 'male' if 'male' in lines[0] else 'female' # 1 for male and 0 for female
age = int(re.sub('\D', '', lines[0]))
# get TB by file name (_1.txt is PTB or _0.txt is NTB)
target = 1 if '_1.txt' in path else 0
filename = path.split('/')[-1]
image_filename = filename.replace('txt', 'png')
# image_path = images_path+('/tb/' if target else '/no_tb/')+image_filename
image_path = images_path + '/' + image_filename
d['target'].append(target)
d['age'].append(age)
d['sex'].append(sex)
d['raw_image_path'].append(image_path)
d['raw_image_md5'].append(get_md5(image_path))
d['mask_image_path'].append('')
d['paired_image_path'].append('')
d['comment'].append(treat_string(lines[1::]))
d['image_ID'].append(filename.replace('.txt', ''))
l_masks = make_dataset(masks_path)
for mask in l_masks:
if image_path[-17:] == mask[-17:]:
idx = np.where(np.array(d['raw_image_path']) == image_path)[0][0]
d['mask_image_path'][idx] = mask
if combine == True:
path_paired = image_path[:-25] + 'foldAB'
path_paired_img = path_paired + '/' + image_path[-17:]
d['paired_image_path'][idx] = path_paired_img
if not os.path.isdir(path_paired):
os.makedirs(path_paired)
im_A = cv2.imread(image_path)
im_B = cv2.imread(mask)
im_AB = np.concatenate([im_B, im_A], 1)
cv2.imwrite(path_paired_img, im_AB)
return pd.DataFrame(d)
# NOTE: this is optional.
#from rxcore import allow_tf_growth
#allow_tf_growth()
#
# Start your job here
#
#job = json.load(open(args.job, 'r'))
#sort = job['sort']
#target = 1 # tb active
#test = job['test']
seed = 512
#epochs = 1000
#batch_size = 32
base_data_raw_path = '/Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned'
clinical_path = base_data_raw_path + '/ClinicalReadings'
images_path = base_data_raw_path + '/trainA'
masks_path = base_data_raw_path + '/trainB'
df = prepare_my_table(clinical_path, images_path, masks_path, combine = True)
splits = stratified_train_val_test_splits(df,seed)[0]
training_data = df.iloc[splits[0][0]]
validation_data = df.iloc[splits[0][1]]
if(isTB == True):
train_tb = training_data.loc[df.target==1]
val_tb = validation_data.loc[df.target==1]
else:
train_ntb = training_data.loc[df.target==0]
val_ntb = validation_data.loc[df.target == 0]
#training_data = training_data.loc[training_data.target==target]
#validation_data = validation_data.loc[validation_data.target==target]
extra_d = {'sort' : sort, 'test':test, 'target':target, 'seed':seed}
# Run!
#history = optimizer.fit( train_generator , val_generator, extra_d=extra_d, wandb=wandb )
combine_ab = 'python datasets/combine_A_and_B.py --fold_A /Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned/trainA --fold_B /Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned/trainB --fold_AB /Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned'
run(combine_ab)
# pix2pix train/test
#train_cmd = 'python train.py --model pix2pix --name ' + 'test_%d_sort_%d'%(test,sort) + '--dataroot . --n_epochs 1 --n_epochs_decay 5 --save_latest_freq 10 --display_id -1'
#run(train_cmd)
#run('python test.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --num_test 1')
|
{"hexsha": "d1fc417939f9071ab9558bc0b04de9acb77ad0f7", "size": 6734, "ext": "py", "lang": "Python", "max_stars_repo_path": "versions/v1/v1_tb/aux_kfold.py", "max_stars_repo_name": "otavares93/rxpix2pix", "max_stars_repo_head_hexsha": "cc72ff165769bc4f0c312372fe7c3b52ecda45a0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "versions/v1/v1_tb/aux_kfold.py", "max_issues_repo_name": "otavares93/rxpix2pix", "max_issues_repo_head_hexsha": "cc72ff165769bc4f0c312372fe7c3b52ecda45a0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "versions/v1/v1_tb/aux_kfold.py", "max_forks_repo_name": "otavares93/rxpix2pix", "max_forks_repo_head_hexsha": "cc72ff165769bc4f0c312372fe7c3b52ecda45a0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8391959799, "max_line_length": 298, "alphanum_fraction": 0.6421146421, "include": true, "reason": "import numpy", "num_tokens": 1709}
|
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
⊢ HasSum (fun x => 0) 0
[PROOFSTEP]
simp [HasSum, tendsto_const_nhds]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
inst✝ : IsEmpty β
⊢ HasSum f 0
[PROOFSTEP]
convert @hasSum_zero α β _ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
h : ¬Summable f
⊢ ∑' (b : β), f b = 0
[PROOFSTEP]
simp [tsum_def, h]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g✝ : β → α
a b : α
s : Finset β
g : γ → β
hg : Injective g
hf : ∀ (x : β), ¬x ∈ Set.range g → f x = 0
⊢ HasSum (f ∘ g) a ↔ HasSum f a
[PROOFSTEP]
simp only [HasSum, Tendsto, comp_apply, hg.map_atTop_finset_sum_eq hf]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g✝ : β → α
a b : α
s : Finset β
g : β → γ
hg : Injective g
⊢ HasSum (extend g f 0) a ↔ HasSum f a
[PROOFSTEP]
rw [← hg.hasSum_iff, extend_comp hg]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g✝ : β → α
a b : α
s : Finset β
g : β → γ
hg : Injective g
⊢ ∀ (x : γ), ¬x ∈ Set.range g → extend g f 0 x = 0
[PROOFSTEP]
exact extend_apply' _ _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s✝ : Finset β
s : Set β
hf : support f ⊆ s
⊢ ∀ (x : β), (¬x ∈ Set.range fun a => ↑a) → f x = 0
[PROOFSTEP]
simpa using support_subset_iff'.1 hf
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s✝ : Finset β
s : Set β
⊢ HasSum (f ∘ Subtype.val) a ↔ HasSum (Set.indicator s f) a
[PROOFSTEP]
rw [← Set.indicator_range_comp, Subtype.range_coe, hasSum_subtype_iff_of_support_subset Set.support_indicator_subset]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a b : α
s✝ s : Finset β
f : β → α
⊢ HasSum (f ∘ Subtype.val) (∑ b in s, f b)
[PROOFSTEP]
rw [← sum_attach]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a b : α
s✝ s : Finset β
f : β → α
⊢ HasSum (f ∘ Subtype.val) (∑ x in attach s, f ↑x)
[PROOFSTEP]
exact hasSum_fintype _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a b : α
s✝ : Finset β
s : Set β
hs : Set.Finite s
f : β → α
⊢ Summable (f ∘ Subtype.val)
[PROOFSTEP]
have := hs.toFinset.summable f
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a b : α
s✝ : Finset β
s : Set β
hs : Set.Finite s
f : β → α
this : Summable (f ∘ Subtype.val)
⊢ Summable (f ∘ Subtype.val)
[PROOFSTEP]
rwa [hs.coe_toFinset] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
h : Set.Finite (support f)
⊢ Summable f
[PROOFSTEP]
apply summable_of_ne_finset_zero (s := h.toFinset)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
h : Set.Finite (support f)
⊢ ∀ (b : β), ¬b ∈ Set.Finite.toFinset h → f b = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
ha : Summable f
⊢ HasSum f (∑' (b : β), f b)
[PROOFSTEP]
simp only [tsum_def, ha, dite_true]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
ha : Summable f
⊢ HasSum f (if Set.Finite (support fun b => f b) then ∑ᶠ (b : β), f b else Classical.choose (_ : Summable fun b => f b))
[PROOFSTEP]
by_cases H : (support f).Finite
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
ha : Summable f
H : Set.Finite (support f)
⊢ HasSum f (if Set.Finite (support fun b => f b) then ∑ᶠ (b : β), f b else Classical.choose (_ : Summable fun b => f b))
[PROOFSTEP]
simp [H, hasSum_sum_of_ne_finset_zero, finsum_eq_sum]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
ha : Summable f
H : ¬Set.Finite (support f)
⊢ HasSum f (if Set.Finite (support fun b => f b) then ∑ᶠ (b : β), f b else Classical.choose (_ : Summable fun b => f b))
[PROOFSTEP]
simpa [H] using Classical.choose_spec ha
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a b✝ : α
s : Finset β
f : β → α
b : β
hf : ∀ (b' : β), b' ≠ b → f b' = 0
⊢ ∀ (b_1 : β), ¬b_1 ∈ {b} → f b_1 = 0
[PROOFSTEP]
simpa [hf]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a b✝ : α
s : Finset β
f : β → α
b : β
hf : ∀ (b' : β), b' ≠ b → f b' = 0
this : HasSum f (∑ b' in {b}, f b')
⊢ HasSum f (f b)
[PROOFSTEP]
simpa using this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ b✝ : α
s : Finset β
b : β
inst✝ : DecidablePred fun x => x = b
a : α
⊢ HasSum (fun b' => if b' = b then a else 0) a
[PROOFSTEP]
convert @hasSum_single _ _ _ _ (fun b' => if b' = b then a else 0) b (fun b' hb' => if_neg hb')
[GOAL]
case h.e'_6
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ b✝ : α
s : Finset β
b : β
inst✝ : DecidablePred fun x => x = b
a : α
⊢ a = if b = b then a else 0
[PROOFSTEP]
exact (if_pos rfl).symm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ b✝ : α
s : Finset β
inst✝ : DecidableEq β
b : β
a : α
⊢ HasSum (Pi.single b a) a
[PROOFSTEP]
convert hasSum_ite_eq b a
[GOAL]
case h.e'_5.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ b✝ : α
s : Finset β
inst✝ : DecidableEq β
b : β
a : α
x✝ : β
⊢ Pi.single b a x✝ = if x✝ = b then a else 0
[PROOFSTEP]
simp [Pi.single_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
e : γ ≃ β
⊢ ∀ (x : β), ¬x ∈ Set.range ↑e → f x = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g✝ : β → α
a b : α
s : Finset β
g : γ → α
e : ↑(support f) ≃ ↑(support g)
he : ∀ (x : ↑(support f)), g ↑(↑e x) = f ↑x
⊢ HasSum f a ↔ HasSum g a
[PROOFSTEP]
have : (g ∘ (↑)) ∘ e = f ∘ (↑) := funext he
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g✝ : β → α
a b : α
s : Finset β
g : γ → α
e : ↑(support f) ≃ ↑(support g)
he : ∀ (x : ↑(support f)), g ↑(↑e x) = f ↑x
this : (g ∘ Subtype.val) ∘ ↑e = f ∘ Subtype.val
⊢ HasSum f a ↔ HasSum g a
[PROOFSTEP]
rw [← hasSum_subtype_support, ← this, e.hasSum_iff, hasSum_subtype_support]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommMonoid α
inst✝⁴ : TopologicalSpace α
f g✝ : β → α
a b : α
s : Finset β
inst✝³ : AddCommMonoid γ
inst✝² : TopologicalSpace γ
G : Type u_5
G' : Type u_6
inst✝¹ : AddMonoidHomClass G α γ
inst✝ : AddMonoidHomClass G' γ α
g : G
g' : G'
hg : Continuous ↑g
hg' : Continuous ↑g'
hinv : LeftInverse ↑g' ↑g
h : Summable (↑g ∘ f)
⊢ Summable f
[PROOFSTEP]
have := h.map _ hg'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommMonoid α
inst✝⁴ : TopologicalSpace α
f g✝ : β → α
a b : α
s : Finset β
inst✝³ : AddCommMonoid γ
inst✝² : TopologicalSpace γ
G : Type u_5
G' : Type u_6
inst✝¹ : AddMonoidHomClass G α γ
inst✝ : AddMonoidHomClass G' γ α
g : G
g' : G'
hg : Continuous ↑g
hg' : Continuous ↑g'
hinv : LeftInverse ↑g' ↑g
h : Summable (↑g ∘ f)
this : Summable (↑g' ∘ ↑g ∘ f)
⊢ Summable f
[PROOFSTEP]
rwa [← Function.comp.assoc, hinv.id] at this
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a✝ b : α
s : Finset β
inst✝ : T2Space α
f : ℕ → α
a : α
hf : Summable f
⊢ HasSum f a ↔ Tendsto (fun n => ∑ i in range n, f i) atTop (𝓝 a)
[PROOFSTEP]
refine' ⟨fun h => h.tendsto_sum_nat, fun h => _⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a✝ b : α
s : Finset β
inst✝ : T2Space α
f : ℕ → α
a : α
hf : Summable f
h : Tendsto (fun n => ∑ i in range n, f i) atTop (𝓝 a)
⊢ HasSum f a
[PROOFSTEP]
rw [tendsto_nhds_unique h hf.hasSum.tendsto_sum_nat]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a✝ b : α
s : Finset β
inst✝ : T2Space α
f : ℕ → α
a : α
hf : Summable f
h : Tendsto (fun n => ∑ i in range n, f i) atTop (𝓝 a)
⊢ HasSum f (∑' (b : ℕ), f b)
[PROOFSTEP]
exact hf.hasSum
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
inst✝ : ContinuousAdd α
hf : HasSum f a
hg : HasSum g b
⊢ HasSum (fun b => f b + g b) (a + b)
[PROOFSTEP]
dsimp only [HasSum] at hf hg ⊢
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
inst✝ : ContinuousAdd α
hf : Tendsto (fun s => ∑ b in s, f b) atTop (𝓝 a)
hg : Tendsto (fun s => ∑ b in s, g b) atTop (𝓝 b)
⊢ Tendsto (fun s => ∑ b in s, (f b + g b)) atTop (𝓝 (a + b))
[PROOFSTEP]
simp_rw [sum_add_distrib]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a b : α
s : Finset β
inst✝ : ContinuousAdd α
hf : Tendsto (fun s => ∑ b in s, f b) atTop (𝓝 a)
hg : Tendsto (fun s => ∑ b in s, g b) atTop (𝓝 b)
⊢ Tendsto (fun s => ∑ x in s, f x + ∑ x in s, g x) atTop (𝓝 (a + b))
[PROOFSTEP]
exact hf.add hg
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a✝ b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
f : γ → β → α
a : γ → α
s : Finset γ
⊢ (∀ (i : γ), i ∈ ∅ → HasSum (f i) (a i)) → HasSum (fun b => ∑ i in ∅, f i b) (∑ i in ∅, a i)
[PROOFSTEP]
simp only [hasSum_zero, sum_empty, forall_true_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a✝ b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
f : γ → β → α
a : γ → α
s : Finset γ
⊢ ∀ ⦃a_1 : γ⦄ {s : Finset γ},
¬a_1 ∈ s →
((∀ (i : γ), i ∈ s → HasSum (f i) (a i)) → HasSum (fun b => ∑ i in s, f i b) (∑ i in s, a i)) →
(∀ (i : γ), i ∈ insert a_1 s → HasSum (f i) (a i)) →
HasSum (fun b => ∑ i in insert a_1 s, f i b) (∑ i in insert a_1 s, a i)
[PROOFSTEP]
simp (config := { contextual := true }) only [mem_insert, forall_eq_or_imp, not_false_iff, sum_insert, and_imp]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a✝ b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
f : γ → β → α
a : γ → α
s : Finset γ
⊢ ∀ ⦃a_1 : γ⦄ {s : Finset γ},
¬a_1 ∈ s →
((∀ (i : γ), i ∈ s → HasSum (f i) (a i)) → HasSum (fun b => ∑ i in s, f i b) (∑ i in s, a i)) →
HasSum (f a_1) (a a_1) →
(∀ (a_5 : γ), a_5 ∈ s → HasSum (f a_5) (a a_5)) →
HasSum (fun b => f a_1 b + ∑ i in s, f i b) (a a_1 + ∑ i in s, a i)
[PROOFSTEP]
exact fun x s _ IH hx h ↦ hx.add (IH h)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
s t : Set β
hs : Disjoint s t
ha : HasSum (f ∘ Subtype.val) a
hb : HasSum (f ∘ Subtype.val) b
⊢ HasSum (f ∘ Subtype.val) (a + b)
[PROOFSTEP]
rw [hasSum_subtype_iff_indicator] at *
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
s t : Set β
hs : Disjoint s t
ha : HasSum (Set.indicator s f) a
hb : HasSum (Set.indicator t f) b
⊢ HasSum (Set.indicator (s ∪ t) f) (a + b)
[PROOFSTEP]
rw [Set.indicator_union_of_disjoint hs]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
s t : Set β
hs : Disjoint s t
ha : HasSum (Set.indicator s f) a
hb : HasSum (Set.indicator t f) b
⊢ HasSum (fun a => Set.indicator s f a + Set.indicator t f a) (a + b)
[PROOFSTEP]
exact ha.add hb
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
ι : Type u_5
s : Finset ι
t : ι → Set β
a : ι → α
hs : Set.Pairwise (↑s) (Disjoint on t)
hf : ∀ (i : ι), i ∈ s → HasSum (f ∘ Subtype.val) (a i)
⊢ HasSum (f ∘ Subtype.val) (∑ i in s, a i)
[PROOFSTEP]
simp_rw [hasSum_subtype_iff_indicator] at *
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
ι : Type u_5
s : Finset ι
t : ι → Set β
a : ι → α
hs : Set.Pairwise (↑s) (Disjoint on t)
hf : ∀ (i : ι), i ∈ s → HasSum (Set.indicator (t i) f) (a i)
⊢ HasSum (Set.indicator (⋃ (i : ι) (_ : i ∈ s), t i) f) (∑ i in s, a i)
[PROOFSTEP]
rw [Set.indicator_finset_biUnion _ _ hs]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
ι : Type u_5
s : Finset ι
t : ι → Set β
a : ι → α
hs : Set.Pairwise (↑s) (Disjoint on t)
hf : ∀ (i : ι), i ∈ s → HasSum (Set.indicator (t i) f) (a i)
⊢ HasSum (fun a => ∑ i in s, Set.indicator (t i) f a) (∑ i in s, a i)
[PROOFSTEP]
exact hasSum_sum hf
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a b : α
s✝ : Finset β
inst✝ : ContinuousAdd α
s t : Set β
hs : IsCompl s t
ha : HasSum (f ∘ Subtype.val) a
hb : HasSum (f ∘ Subtype.val) b
⊢ HasSum f (a + b)
[PROOFSTEP]
simpa [← hs.compl_eq] using (hasSum_subtype_iff_indicator.1 ha).add (hasSum_subtype_iff_indicator.1 hb)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a b : α
s : Finset β
inst✝ : ContinuousAdd α
f : ℕ → α
he : HasSum (fun k => f (2 * k)) a
ho : HasSum (fun k => f (2 * k + 1)) b
⊢ HasSum f (a + b)
[PROOFSTEP]
have := mul_right_injective₀ (two_ne_zero' ℕ)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a b : α
s : Finset β
inst✝ : ContinuousAdd α
f : ℕ → α
he : HasSum (fun k => f (2 * k)) a
ho : HasSum (fun k => f (2 * k + 1)) b
this : Injective ((fun x x_1 => x * x_1) 2)
⊢ HasSum f (a + b)
[PROOFSTEP]
replace he := this.hasSum_range_iff.2 he
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a b : α
s : Finset β
inst✝ : ContinuousAdd α
f : ℕ → α
ho : HasSum (fun k => f (2 * k + 1)) b
this : Injective ((fun x x_1 => x * x_1) 2)
he : HasSum (fun x => f ↑x) a
⊢ HasSum f (a + b)
[PROOFSTEP]
replace ho := ((add_left_injective 1).comp this).hasSum_range_iff.2 ho
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a b : α
s : Finset β
inst✝ : ContinuousAdd α
f : ℕ → α
this : Injective ((fun x x_1 => x * x_1) 2)
he : HasSum (fun x => f ↑x) a
ho : HasSum (fun x => f ↑x) b
⊢ HasSum f (a + b)
[PROOFSTEP]
refine' he.add_isCompl _ ho
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a b : α
s : Finset β
inst✝ : ContinuousAdd α
f : ℕ → α
this : Injective ((fun x x_1 => x * x_1) 2)
he : HasSum (fun x => f ↑x) a
ho : HasSum (fun x => f ↑x) b
⊢ IsCompl (Set.range ((fun x x_1 => x * x_1) 2)) (Set.range ((fun x => x + 1) ∘ (fun x x_1 => x * x_1) 2))
[PROOFSTEP]
simpa [(· ∘ ·)] using Nat.isCompl_even_odd
[GOAL]
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
⊢ HasSum g a
[PROOFSTEP]
refine' (atTop_basis.tendsto_iff (closed_nhds_basis a)).mpr _
[GOAL]
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
⊢ ∀ (ib : Set α), ib ∈ 𝓝 a ∧ IsClosed ib → ∃ ia, True ∧ ∀ (x : Finset β), x ∈ Set.Ici ia → ∑ b in x, g b ∈ id ib
[PROOFSTEP]
rintro s ⟨hs, hsc⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
⊢ ∃ ia, True ∧ ∀ (x : Finset β), x ∈ Set.Ici ia → ∑ b in x, g b ∈ id s
[PROOFSTEP]
rcases mem_atTop_sets.mp (ha hs) with ⟨u, hu⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
hu : ∀ (b : Finset ((b : β) × γ b)), b ≥ u → b ∈ (fun s => ∑ b in s, f b) ⁻¹' s
⊢ ∃ ia, True ∧ ∀ (x : Finset β), x ∈ Set.Ici ia → ∑ b in x, g b ∈ id s
[PROOFSTEP]
use u.image Sigma.fst, trivial
[GOAL]
case right
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
hu : ∀ (b : Finset ((b : β) × γ b)), b ≥ u → b ∈ (fun s => ∑ b in s, f b) ⁻¹' s
⊢ ∀ (x : Finset β), x ∈ Set.Ici (image Sigma.fst u) → ∑ b in x, g b ∈ id s
[PROOFSTEP]
intro bs hbs
[GOAL]
case right
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
hu : ∀ (b : Finset ((b : β) × γ b)), b ≥ u → b ∈ (fun s => ∑ b in s, f b) ⁻¹' s
bs : Finset β
hbs : bs ∈ Set.Ici (image Sigma.fst u)
⊢ ∑ b in bs, g b ∈ id s
[PROOFSTEP]
simp only [Set.mem_preimage, ge_iff_le, Finset.le_iff_subset] at hu
[GOAL]
case right
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
bs : Finset β
hbs : bs ∈ Set.Ici (image Sigma.fst u)
hu : ∀ (b : Finset ((b : β) × γ b)), u ⊆ b → ∑ b in b, f b ∈ s
⊢ ∑ b in bs, g b ∈ id s
[PROOFSTEP]
have : Tendsto (fun t : Finset (Σ b, γ b) => ∑ p in t.filter fun p => p.1 ∈ bs, f p) atTop (𝓝 <| ∑ b in bs, g b) :=
by
simp only [← sigma_preimage_mk, sum_sigma]
refine' tendsto_finset_sum _ fun b _ => _
change Tendsto (fun t => (fun t => ∑ s in t, f ⟨b, s⟩) (preimage t (Sigma.mk b) _)) atTop (𝓝 (g b))
exact (hf b).comp (tendsto_finset_preimage_atTop_atTop (sigma_mk_injective))
[GOAL]
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
bs : Finset β
hbs : bs ∈ Set.Ici (image Sigma.fst u)
hu : ∀ (b : Finset ((b : β) × γ b)), u ⊆ b → ∑ b in b, f b ∈ s
⊢ Tendsto (fun t => ∑ p in filter (fun p => p.fst ∈ bs) t, f p) atTop (𝓝 (∑ b in bs, g b))
[PROOFSTEP]
simp only [← sigma_preimage_mk, sum_sigma]
[GOAL]
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
bs : Finset β
hbs : bs ∈ Set.Ici (image Sigma.fst u)
hu : ∀ (b : Finset ((b : β) × γ b)), u ⊆ b → ∑ b in b, f b ∈ s
⊢ Tendsto
(fun t =>
∑ a in bs,
∑ s in preimage t (Sigma.mk a) (_ : Set.InjOn (Sigma.mk a) (Sigma.mk a ⁻¹' ↑t)), f { fst := a, snd := s })
atTop (𝓝 (∑ b in bs, g b))
[PROOFSTEP]
refine' tendsto_finset_sum _ fun b _ => _
[GOAL]
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b✝ : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
bs : Finset β
hbs : bs ∈ Set.Ici (image Sigma.fst u)
hu : ∀ (b : Finset ((b : β) × γ b)), u ⊆ b → ∑ b in b, f b ∈ s
b : β
x✝ : b ∈ bs
⊢ Tendsto
(fun t => ∑ s in preimage t (Sigma.mk b) (_ : Set.InjOn (Sigma.mk b) (Sigma.mk b ⁻¹' ↑t)), f { fst := b, snd := s })
atTop (𝓝 (g b))
[PROOFSTEP]
change Tendsto (fun t => (fun t => ∑ s in t, f ⟨b, s⟩) (preimage t (Sigma.mk b) _)) atTop (𝓝 (g b))
[GOAL]
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b✝ : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
bs : Finset β
hbs : bs ∈ Set.Ici (image Sigma.fst u)
hu : ∀ (b : Finset ((b : β) × γ b)), u ⊆ b → ∑ b in b, f b ∈ s
b : β
x✝ : b ∈ bs
⊢ Tendsto
(fun t =>
(fun t => ∑ s in t, f { fst := b, snd := s })
(preimage t (Sigma.mk b) (_ : Set.InjOn (Sigma.mk b) (Sigma.mk b ⁻¹' ↑t))))
atTop (𝓝 (g b))
[PROOFSTEP]
exact (hf b).comp (tendsto_finset_preimage_atTop_atTop (sigma_mk_injective))
[GOAL]
case right
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
bs : Finset β
hbs : bs ∈ Set.Ici (image Sigma.fst u)
hu : ∀ (b : Finset ((b : β) × γ b)), u ⊆ b → ∑ b in b, f b ∈ s
this : Tendsto (fun t => ∑ p in filter (fun p => p.fst ∈ bs) t, f p) atTop (𝓝 (∑ b in bs, g b))
⊢ ∑ b in bs, g b ∈ id s
[PROOFSTEP]
refine' hsc.mem_of_tendsto this (eventually_atTop.2 ⟨u, fun t ht => hu _ fun x hx => _⟩)
[GOAL]
case right
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s✝ : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : RegularSpace α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum f a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
s : Set α
hs : s ∈ 𝓝 a
hsc : IsClosed s
u : Finset ((b : β) × γ b)
bs : Finset β
hbs : bs ∈ Set.Ici (image Sigma.fst u)
hu : ∀ (b : Finset ((b : β) × γ b)), u ⊆ b → ∑ b in b, f b ∈ s
this : Tendsto (fun t => ∑ p in filter (fun p => p.fst ∈ bs) t, f p) atTop (𝓝 (∑ b in bs, g b))
t : Finset ((b : β) × γ b)
ht : t ≥ u
x : (b : β) × γ b
hx : x ∈ u
⊢ x ∈ filter (fun p => p.fst ∈ bs) t
[PROOFSTEP]
exact mem_filter.2 ⟨ht hx, hbs <| mem_image_of_mem _ hx⟩
[GOAL]
α : Type u_1
β : Type u_2
γ✝ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g✝ : β → α
a✝ b : α
s : Finset β
inst✝¹ : ContinuousAdd α
inst✝ : T3Space α
γ : β → Type u_5
f : (b : β) × γ b → α
g : β → α
a : α
ha : HasSum g a
hf : ∀ (b : β), HasSum (fun c => f { fst := b, snd := c }) (g b)
hf' : Summable f
⊢ HasSum f a
[PROOFSTEP]
simpa [(hf'.hasSum.sigma hf).unique ha] using hf'.hasSum
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a a' : α
hf : HasSum f a
b : β
x : α
hf' : HasSum (update f b x) a'
⊢ a + x = a' + f b
[PROOFSTEP]
have : ∀ b', f b' + ite (b' = b) x 0 = update f b x b' + ite (b' = b) (f b) 0 :=
by
intro b'
split_ifs with hb'
· simpa only [Function.update_apply, hb', eq_self_iff_true] using add_comm (f b) x
· simp only [Function.update_apply, hb', if_false]
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a a' : α
hf : HasSum f a
b : β
x : α
hf' : HasSum (update f b x) a'
⊢ ∀ (b' : β), (f b' + if b' = b then x else 0) = update f b x b' + if b' = b then f b else 0
[PROOFSTEP]
intro b'
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a a' : α
hf : HasSum f a
b : β
x : α
hf' : HasSum (update f b x) a'
b' : β
⊢ (f b' + if b' = b then x else 0) = update f b x b' + if b' = b then f b else 0
[PROOFSTEP]
split_ifs with hb'
[GOAL]
case pos
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a a' : α
hf : HasSum f a
b : β
x : α
hf' : HasSum (update f b x) a'
b' : β
hb' : b' = b
⊢ f b' + x = update f b x b' + f b
[PROOFSTEP]
simpa only [Function.update_apply, hb', eq_self_iff_true] using add_comm (f b) x
[GOAL]
case neg
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a a' : α
hf : HasSum f a
b : β
x : α
hf' : HasSum (update f b x) a'
b' : β
hb' : ¬b' = b
⊢ f b' + 0 = update f b x b' + 0
[PROOFSTEP]
simp only [Function.update_apply, hb', if_false]
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a a' : α
hf : HasSum f a
b : β
x : α
hf' : HasSum (update f b x) a'
this : ∀ (b' : β), (f b' + if b' = b then x else 0) = update f b x b' + if b' = b then f b else 0
⊢ a + x = a' + f b
[PROOFSTEP]
have h := hf.add (hasSum_ite_eq b x)
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a a' : α
hf : HasSum f a
b : β
x : α
hf' : HasSum (update f b x) a'
this : ∀ (b' : β), (f b' + if b' = b then x else 0) = update f b x b' + if b' = b then f b else 0
h : HasSum (fun b_1 => f b_1 + if b_1 = b then x else 0) (a + x)
⊢ a + x = a' + f b
[PROOFSTEP]
simp_rw [this] at h
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a a' : α
hf : HasSum f a
b : β
x : α
hf' : HasSum (update f b x) a'
this : ∀ (b' : β), (f b' + if b' = b then x else 0) = update f b x b' + if b' = b then f b else 0
h : HasSum (fun b_1 => update f b x b_1 + if b_1 = b then f b else 0) (a + x)
⊢ a + x = a' + f b
[PROOFSTEP]
exact HasSum.unique h (hf'.add (hasSum_ite_eq b (f b)))
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a : α
hf : HasSum f a
b : β
a' : α
hf' : HasSum (fun n => if n = b then 0 else f n) a'
⊢ a = a' + f b
[PROOFSTEP]
refine' (add_zero a).symm.trans (hf.update' b 0 _)
[GOAL]
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a : α
hf : HasSum f a
b : β
a' : α
hf' : HasSum (fun n => if n = b then 0 else f n) a'
⊢ HasSum (update f b 0) a'
[PROOFSTEP]
convert hf'
[GOAL]
case h.e'_5.h
α✝ : Type u_1
β✝ : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommMonoid α✝
inst✝⁵ : TopologicalSpace α✝
f✝ g : β✝ → α✝
a✝ b✝ : α✝
s : Finset β✝
inst✝⁴ : ContinuousAdd α✝
α : Type u_5
β : Type u_6
inst✝³ : TopologicalSpace α
inst✝² : AddCommMonoid α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
a : α
hf : HasSum f a
b : β
a' : α
hf' : HasSum (fun n => if n = b then 0 else f n) a'
x✝ : β
⊢ update f b 0 x✝ = if x✝ = b then 0 else f x✝
[PROOFSTEP]
apply update_apply
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
s t : Set β
h : s = t
⊢ ∑' (x : ↑s), f ↑x = ∑' (x : ↑t), f ↑x
[PROOFSTEP]
rw [h]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
hf : Set.Finite (support f)
⊢ ∑' (b : β), f b = ∑ᶠ (b : β), f b
[PROOFSTEP]
simp [tsum_def, summable_of_finite_support hf, hf]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
s : Finset β
hf : ∀ (b : β), ¬b ∈ s → f b = 0
⊢ ∑' (b : β), f b = ∑ b in s, f b
[PROOFSTEP]
have I : support f ⊆ s := by
intros x hx
contrapose! hx
rw [nmem_support]
exact hf _ hx
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
s : Finset β
hf : ∀ (b : β), ¬b ∈ s → f b = 0
⊢ support f ⊆ ↑s
[PROOFSTEP]
intros x hx
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
s : Finset β
hf : ∀ (b : β), ¬b ∈ s → f b = 0
x : β
hx : x ∈ support f
⊢ x ∈ ↑s
[PROOFSTEP]
contrapose! hx
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
s : Finset β
hf : ∀ (b : β), ¬b ∈ s → f b = 0
x : β
hx : ¬x ∈ ↑s
⊢ ¬x ∈ support f
[PROOFSTEP]
rw [nmem_support]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
s : Finset β
hf : ∀ (b : β), ¬b ∈ s → f b = 0
x : β
hx : ¬x ∈ ↑s
⊢ f x = 0
[PROOFSTEP]
exact hf _ hx
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
s : Finset β
hf : ∀ (b : β), ¬b ∈ s → f b = 0
I : support f ⊆ ↑s
⊢ ∑' (b : β), f b = ∑ b in s, f b
[PROOFSTEP]
simp [tsum_def, summable_of_ne_finset_zero hf, Set.Finite.subset (finite_toSet s) I]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
s : Finset β
hf : ∀ (b : β), ¬b ∈ s → f b = 0
I : support f ⊆ ↑s
⊢ ∑ᶠ (b : β), f b = ∑ b in s, f b
[PROOFSTEP]
exact finsum_eq_sum_of_support_subset f I
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
⊢ ∑' (x : β), 0 = 0
[PROOFSTEP]
rw [tsum_eq_finsum]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
⊢ ∑ᶠ (b : β), 0 = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
⊢ Set.Finite (support fun x => 0)
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝ : IsEmpty β
⊢ ∑' (b : β), f b = 0
[PROOFSTEP]
rw [tsum_eq_sum (s := (∅ : Finset β))]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝ : IsEmpty β
⊢ ∑ b in ∅, f b = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝ : IsEmpty β
⊢ ∀ (b : β), ¬b ∈ ∅ → f b = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : Fintype β
f : β → α
⊢ ∑' (b : β), f b = ∑ b : β, f b
[PROOFSTEP]
apply tsum_eq_sum
[GOAL]
case hf
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : Fintype β
f : β → α
⊢ ∀ (b : β), ¬b ∈ univ → f b = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
f : Bool → α
⊢ ∑' (i : Bool), f i = f (decide False) + f (decide True)
[PROOFSTEP]
rw [tsum_fintype, Finset.sum_eq_add]
[GOAL]
case hn
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
f : Bool → α
⊢ decide False ≠ decide True
[PROOFSTEP]
simp
[GOAL]
case h₀
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
f : Bool → α
⊢ ∀ (c : Bool), c ∈ univ → c ≠ decide False ∧ c ≠ decide True → f c = 0
[PROOFSTEP]
simp
[GOAL]
case ha
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
f : Bool → α
⊢ ¬decide False ∈ univ → f (decide False) = 0
[PROOFSTEP]
simp
[GOAL]
case hb
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
f : Bool → α
⊢ ¬decide True ∈ univ → f (decide True) = 0
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
b : β
hf : ∀ (b' : β), b' ≠ b → f b' = 0
⊢ ∑' (b : β), f b = f b
[PROOFSTEP]
rw [tsum_eq_sum (s := { b }), sum_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
b : β
hf : ∀ (b' : β), b' ≠ b → f b' = 0
⊢ ∀ (b_1 : β), ¬b_1 ∈ {b} → f b_1 = 0
[PROOFSTEP]
exact fun b' hb' ↦ hf b' (by simpa using hb')
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
b : β
hf : ∀ (b' : β), b' ≠ b → f b' = 0
b' : β
hb' : ¬b' ∈ {b}
⊢ b' ≠ b
[PROOFSTEP]
simpa using hb'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ a₁ a₂ : α
b : β
inst✝ : DecidablePred fun x => x = b
a : α
⊢ (∑' (b' : β), if b' = b then a else 0) = a
[PROOFSTEP]
rw [tsum_eq_single b]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ a₁ a₂ : α
b : β
inst✝ : DecidablePred fun x => x = b
a : α
⊢ (if b = b then a else 0) = a
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ a₁ a₂ : α
b : β
inst✝ : DecidablePred fun x => x = b
a : α
⊢ ∀ (b' : β), b' ≠ b → (if b' = b then a else 0) = 0
[PROOFSTEP]
intro b' hb'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ a₁ a₂ : α
b : β
inst✝ : DecidablePred fun x => x = b
a : α
b' : β
hb' : b' ≠ b
⊢ (if b' = b then a else 0) = 0
[PROOFSTEP]
simp [hb']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ a₁ a₂ : α
inst✝ : DecidableEq β
b : β
a : α
⊢ ∑' (b' : β), Pi.single b a b' = a
[PROOFSTEP]
rw [tsum_eq_single b]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ a₁ a₂ : α
inst✝ : DecidableEq β
b : β
a : α
⊢ Pi.single b a b = a
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ a₁ a₂ : α
inst✝ : DecidableEq β
b : β
a : α
⊢ ∀ (b' : β), b' ≠ b → Pi.single b a b' = 0
[PROOFSTEP]
intro b' hb'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f g : β → α
a✝ a₁ a₂ : α
inst✝ : DecidableEq β
b : β
a : α
b' : β
hb' : b' ≠ b
⊢ Pi.single b a b' = 0
[PROOFSTEP]
simp [hb']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
s : Finset β
f : β → α
⊢ ∑' (x : { x // x ∈ s }), f ↑x = ∑ x in s, f x
[PROOFSTEP]
rw [← sum_attach]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
s : Finset β
f : β → α
⊢ ∑' (x : { x // x ∈ s }), f ↑x = ∑ x in attach s, f ↑x
[PROOFSTEP]
exact tsum_fintype _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
s : Finset β
f : β → α
⊢ ∑' (x : ↑↑s), f ↑x = ∑ x in s, f x
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝¹ : AddCommMonoid α
inst✝ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
b : β
f : β → α
⊢ ∑' (x : ↑{b}), f ↑x = f b
[PROOFSTEP]
rw [← coe_singleton, Finset.tsum_subtype', sum_singleton]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
P : Prop
inst✝ : Decidable P
x : β → ¬P → α
⊢ (∑' (b : β), if h : P then 0 else x b h) = if h : P then 0 else ∑' (b : β), x b h
[PROOFSTEP]
by_cases hP : P
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
P : Prop
inst✝ : Decidable P
x : β → ¬P → α
hP : P
⊢ (∑' (b : β), if h : P then 0 else x b h) = if h : P then 0 else ∑' (b : β), x b h
[PROOFSTEP]
simp [hP]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
P : Prop
inst✝ : Decidable P
x : β → ¬P → α
hP : ¬P
⊢ (∑' (b : β), if h : P then 0 else x b h) = if h : P then 0 else ∑' (b : β), x b h
[PROOFSTEP]
simp [hP]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
P : Prop
inst✝ : Decidable P
x : β → P → α
⊢ (∑' (b : β), if h : P then x b h else 0) = if h : P then ∑' (b : β), x b h else 0
[PROOFSTEP]
by_cases hP : P
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
P : Prop
inst✝ : Decidable P
x : β → P → α
hP : P
⊢ (∑' (b : β), if h : P then x b h else 0) = if h : P then ∑' (b : β), x b h else 0
[PROOFSTEP]
simp [hP]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
P : Prop
inst✝ : Decidable P
x : β → P → α
hP : ¬P
⊢ (∑' (b : β), if h : P then x b h else 0) = if h : P then ∑' (b : β), x b h else 0
[PROOFSTEP]
simp [hP]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f✝ g✝ : β → α
a a₁ a₂ : α
inst✝² : T2Space α
α' : Type u_5
inst✝¹ : AddCommMonoid α'
inst✝ : TopologicalSpace α'
e : α' → α
hes : Surjective e
h0 : e 0 = 0
f : β → α
g : γ → α'
h : ∀ {a : α'}, HasSum f (e a) ↔ HasSum g a
hg : ¬Summable g
⊢ ∑' (b : β), f b = e (∑' (c : γ), g c)
[PROOFSTEP]
have hf : ¬Summable f := mt (hes.summable_iff_of_hasSum_iff @h).1 hg
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f✝ g✝ : β → α
a a₁ a₂ : α
inst✝² : T2Space α
α' : Type u_5
inst✝¹ : AddCommMonoid α'
inst✝ : TopologicalSpace α'
e : α' → α
hes : Surjective e
h0 : e 0 = 0
f : β → α
g : γ → α'
h : ∀ {a : α'}, HasSum f (e a) ↔ HasSum g a
hg : ¬Summable g
hf : ¬Summable f
⊢ ∑' (b : β), f b = e (∑' (c : γ), g c)
[PROOFSTEP]
simp [tsum_def, hf, hg, h0]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g✝ : β → α
a a₁ a₂ : α
inst✝ : T2Space α
g : γ → β
f : β → α
hg : Injective g
⊢ ∑' (x : ↑(Set.range g)), f ↑x = ∑' (x : γ), f (g x)
[PROOFSTEP]
rw [← Set.image_univ, tsum_image f (hg.injOn _)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f✝ g✝ : β → α
a a₁ a₂ : α
inst✝ : T2Space α
g : γ → β
f : β → α
hg : Injective g
⊢ ∑' (x : ↑Set.univ), f (g ↑x) = ∑' (x : γ), f (g x)
[PROOFSTEP]
simp_rw [← comp_apply (g := g), tsum_univ (f ∘ g)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
b : β
hf : Summable (update f b 0)
n : β
⊢ f n = (if n = b then f n else 0) + update f b 0 n
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
b : β
hf : Summable (update f b 0)
n : β
h : n = b
⊢ f n = f n + update f b 0 n
[PROOFSTEP]
simp [update_apply, h]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
b : β
hf : Summable (update f b 0)
n : β
h : ¬n = b
⊢ f n = 0 + update f b 0 n
[PROOFSTEP]
simp [update_apply, h]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
b : β
hf : Summable (update f b 0)
⊢ (∑' (x : β), if x = b then f x else 0) + ∑' (x : β), update f b 0 x =
(if b = b then f b else 0) + ∑' (x : β), update f b 0 x
[PROOFSTEP]
congr
[GOAL]
case e_a
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
b : β
hf : Summable (update f b 0)
⊢ (∑' (x : β), if x = b then f x else 0) = if b = b then f b else 0
[PROOFSTEP]
exact tsum_eq_single b fun b' hb' => if_neg hb'
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
inst✝ : ContinuousAdd α
f : β → α
b : β
hf : Summable (update f b 0)
⊢ (if b = b then f b else 0) + ∑' (x : β), update f b 0 x = f b + ∑' (x : β), if x = b then 0 else f x
[PROOFSTEP]
simp only [update, eq_self_iff_true, if_true, eq_rec_constant, dite_eq_ite]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁷ : AddCommMonoid α
inst✝⁶ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝⁵ : T2Space α
inst✝⁴ : ContinuousAdd α
inst✝³ : AddCommMonoid δ
inst✝² : TopologicalSpace δ
inst✝¹ : T3Space δ
inst✝ : ContinuousAdd δ
f : β → γ → δ
h : Summable (uncurry f)
h₁ : ∀ (b : β), Summable (f b)
h₂ : ∀ (c : γ), Summable fun b => f b c
⊢ ∑' (c : γ) (b : β), f b c = ∑' (b : β) (c : γ), f b c
[PROOFSTEP]
erw [← tsum_prod' h h₁, ← tsum_prod' h.prod_symm h₂, ← (Equiv.prodComm γ β).tsum_eq (uncurry f)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁷ : AddCommMonoid α
inst✝⁶ : TopologicalSpace α
f✝ g : β → α
a a₁ a₂ : α
inst✝⁵ : T2Space α
inst✝⁴ : ContinuousAdd α
inst✝³ : AddCommMonoid δ
inst✝² : TopologicalSpace δ
inst✝¹ : T3Space δ
inst✝ : ContinuousAdd δ
f : β → γ → δ
h : Summable (uncurry f)
h₁ : ∀ (b : β), Summable (f b)
h₂ : ∀ (c : γ), Summable fun b => f b c
⊢ ∑' (p : γ × β), uncurry f (Prod.swap p) = ∑' (c : γ × β), uncurry f (↑(Equiv.prodComm γ β) c)
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
⊢ ∑' (i : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b) = ∑' (b : γ), m (s b)
[PROOFSTEP]
have H : ∀ n, m (⨆ b ∈ decode₂ γ n, s b) ≠ 0 → (decode₂ γ n).isSome :=
by
intro n h
generalize decode₂ γ n = foo at *
cases' foo with b
· refine' (h <| by simp [m0]).elim
· exact rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
⊢ ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
[PROOFSTEP]
intro n h
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
n : ℕ
h : m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0
⊢ Option.isSome (decode₂ γ n) = true
[PROOFSTEP]
generalize decode₂ γ n = foo at *
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
n : ℕ
foo : Option γ
h : m (⨆ (b : γ) (_ : b ∈ foo), s b) ≠ 0
⊢ Option.isSome foo = true
[PROOFSTEP]
cases' foo with b
[GOAL]
case none
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
n : ℕ
h : m (⨆ (b : γ) (_ : b ∈ none), s b) ≠ 0
⊢ Option.isSome none = true
[PROOFSTEP]
refine' (h <| by simp [m0]).elim
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
n : ℕ
h : m (⨆ (b : γ) (_ : b ∈ none), s b) ≠ 0
⊢ m (⨆ (b : γ) (_ : b ∈ none), s b) = 0
[PROOFSTEP]
simp [m0]
[GOAL]
case some
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
n : ℕ
b : γ
h : m (⨆ (b_1 : γ) (_ : b_1 ∈ some b), s b_1) ≠ 0
⊢ Option.isSome (some b) = true
[PROOFSTEP]
exact rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
⊢ ∑' (i : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b) = ∑' (b : γ), m (s b)
[PROOFSTEP]
symm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
⊢ ∑' (b : γ), m (s b) = ∑' (i : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
[PROOFSTEP]
refine' tsum_eq_tsum_of_ne_zero_bij (fun a => Option.get _ (H a.1 a.2)) _ _ _
[GOAL]
case refine'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
⊢ ∀ ⦃x y : ↑(support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b))⦄,
(fun a => Option.get (decode₂ γ ↑a) (_ : Option.isSome (decode₂ γ ↑a) = true)) x =
(fun a => Option.get (decode₂ γ ↑a) (_ : Option.isSome (decode₂ γ ↑a) = true)) y →
↑x = ↑y
[PROOFSTEP]
dsimp only []
[GOAL]
case refine'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
⊢ ∀ ⦃x y : ↑(support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b))⦄,
Option.get (decode₂ γ ↑x) (_ : Option.isSome (decode₂ γ ↑x) = true) =
Option.get (decode₂ γ ↑y) (_ : Option.isSome (decode₂ γ ↑y) = true) →
↑x = ↑y
[PROOFSTEP]
rintro ⟨m, hm⟩ ⟨n, hn⟩ e
[GOAL]
case refine'_1.mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m✝ : β → α
m0 : m✝ ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m✝ (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
m : ℕ
hm : m ∈ support fun i => m✝ (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
n : ℕ
hn : n ∈ support fun i => m✝ (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
e :
Option.get (decode₂ γ ↑{ val := m, property := hm })
(_ : Option.isSome (decode₂ γ ↑{ val := m, property := hm }) = true) =
Option.get (decode₂ γ ↑{ val := n, property := hn })
(_ : Option.isSome (decode₂ γ ↑{ val := n, property := hn }) = true)
⊢ ↑{ val := m, property := hm } = ↑{ val := n, property := hn }
[PROOFSTEP]
have := mem_decode₂.1 (Option.get_mem (H n hn))
[GOAL]
case refine'_1.mk.mk
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m✝ : β → α
m0 : m✝ ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m✝ (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
m : ℕ
hm : m ∈ support fun i => m✝ (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
n : ℕ
hn : n ∈ support fun i => m✝ (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
e :
Option.get (decode₂ γ ↑{ val := m, property := hm })
(_ : Option.isSome (decode₂ γ ↑{ val := m, property := hm }) = true) =
Option.get (decode₂ γ ↑{ val := n, property := hn })
(_ : Option.isSome (decode₂ γ ↑{ val := n, property := hn }) = true)
this : encode (Option.get (decode₂ γ n) (_ : Option.isSome (decode₂ γ n) = true)) = n
⊢ ↑{ val := m, property := hm } = ↑{ val := n, property := hn }
[PROOFSTEP]
rwa [← e, mem_decode₂.1 (Option.get_mem (H m hm))] at this
[GOAL]
case refine'_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
⊢ (support fun b => m (s b)) ⊆ Set.range fun a => Option.get (decode₂ γ ↑a) (_ : Option.isSome (decode₂ γ ↑a) = true)
[PROOFSTEP]
intro b h
[GOAL]
case refine'_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
b : γ
h : b ∈ support fun b => m (s b)
⊢ b ∈ Set.range fun a => Option.get (decode₂ γ ↑a) (_ : Option.isSome (decode₂ γ ↑a) = true)
[PROOFSTEP]
refine' ⟨⟨encode b, _⟩, _⟩
[GOAL]
case refine'_2.refine'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
b : γ
h : b ∈ support fun b => m (s b)
⊢ encode b ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
[PROOFSTEP]
simp only [mem_support, encodek₂] at h ⊢
[GOAL]
case refine'_2.refine'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
b : γ
h : m (s b) ≠ 0
⊢ m (⨆ (b_1 : γ) (_ : b_1 ∈ some b), s b_1) ≠ 0
[PROOFSTEP]
convert h
[GOAL]
case h.e'_2.h.e'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
b : γ
h : m (s b) ≠ 0
⊢ ⨆ (b_1 : γ) (_ : b_1 ∈ some b), s b_1 = s b
[PROOFSTEP]
simp [Set.ext_iff, encodek₂]
[GOAL]
case refine'_2.refine'_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
b : γ
h : b ∈ support fun b => m (s b)
⊢ (fun a => Option.get (decode₂ γ ↑a) (_ : Option.isSome (decode₂ γ ↑a) = true))
{ val := encode b, property := (_ : encode b ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)) } =
b
[PROOFSTEP]
exact Option.get_of_mem _ (encodek₂ _)
[GOAL]
case refine'_3
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
⊢ ∀ (x : ↑(support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b))),
m (s ((fun a => Option.get (decode₂ γ ↑a) (_ : Option.isSome (decode₂ γ ↑a) = true)) x)) =
m (⨆ (b : γ) (_ : b ∈ decode₂ γ ↑x), s b)
[PROOFSTEP]
rintro ⟨n, h⟩
[GOAL]
case refine'_3.mk
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ m (s ((fun a => Option.get (decode₂ γ ↑a) (_ : Option.isSome (decode₂ γ ↑a) = true)) { val := n, property := h })) =
m (⨆ (b : γ) (_ : b ∈ decode₂ γ ↑{ val := n, property := h }), s b)
[PROOFSTEP]
dsimp only [Subtype.coe_mk]
[GOAL]
case refine'_3.mk
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ m (s (Option.get (decode₂ γ n) (_ : Option.isSome (decode₂ γ n) = true))) = m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b)
[PROOFSTEP]
trans
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ m (s (Option.get (decode₂ γ n) (_ : Option.isSome (decode₂ γ n) = true))) = ?m.392413
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ ?m.392413 = m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b)
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ α
[PROOFSTEP]
swap
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ ?m.392413 = m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b)
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ m (s (Option.get (decode₂ γ n) (_ : Option.isSome (decode₂ γ n) = true))) = ?m.392413
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ α
[PROOFSTEP]
rw [show decode₂ γ n = _ from Option.get_mem (H n h)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ m (s (Option.get (decode₂ γ n) (_ : Option.isSome (decode₂ γ n) = true))) =
m (⨆ (b : γ) (_ : b ∈ some (Option.get (decode₂ γ n) (_ : Option.isSome (decode₂ γ n) = true))), s b)
[PROOFSTEP]
congr
[GOAL]
case e_a
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Encodable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
s : γ → β
H : ∀ (n : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ n), s b) ≠ 0 → Option.isSome (decode₂ γ n) = true
n : ℕ
h : n ∈ support fun i => m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b)
⊢ s (Option.get (decode₂ γ n) (_ : Option.isSome (decode₂ γ n) = true)) =
⨆ (b : γ) (_ : b ∈ some (Option.get (decode₂ γ n) (_ : Option.isSome (decode₂ γ n) = true))), s b
[PROOFSTEP]
simp [ext_iff, -Option.some_get]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Countable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
R : α → α → Prop
m_iSup : ∀ (s : ℕ → β), R (m (⨆ (i : ℕ), s i)) (∑' (i : ℕ), m (s i))
s : γ → β
⊢ R (m (⨆ (b : γ), s b)) (∑' (b : γ), m (s b))
[PROOFSTEP]
cases nonempty_encodable γ
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Countable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
R : α → α → Prop
m_iSup : ∀ (s : ℕ → β), R (m (⨆ (i : ℕ), s i)) (∑' (i : ℕ), m (s i))
s : γ → β
val✝ : Encodable γ
⊢ R (m (⨆ (b : γ), s b)) (∑' (b : γ), m (s b))
[PROOFSTEP]
rw [← iSup_decode₂, ← tsum_iSup_decode₂ _ m0 s]
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Countable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
R : α → α → Prop
m_iSup : ∀ (s : ℕ → β), R (m (⨆ (i : ℕ), s i)) (∑' (i : ℕ), m (s i))
s : γ → β
val✝ : Encodable γ
⊢ R (m (⨆ (i : ℕ) (b : γ) (_ : b ∈ decode₂ γ i), s b)) (∑' (i : ℕ), m (⨆ (b : γ) (_ : b ∈ decode₂ γ i), s b))
[PROOFSTEP]
exact m_iSup _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Countable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
R : α → α → Prop
m_iSup : ∀ (s : ℕ → β), R (m (⨆ (i : ℕ), s i)) (∑' (i : ℕ), m (s i))
s : δ → β
t : Finset δ
⊢ R (m (⨆ (d : δ) (_ : d ∈ t), s d)) (∑ d in t, m (s d))
[PROOFSTEP]
rw [iSup_subtype', ← Finset.tsum_subtype]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Countable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
R : α → α → Prop
m_iSup : ∀ (s : ℕ → β), R (m (⨆ (i : ℕ), s i)) (∑' (i : ℕ), m (s i))
s : δ → β
t : Finset δ
⊢ R (m (⨆ (x : { d // d ∈ t }), s ↑x)) (∑' (x : { x // x ∈ t }), m (s ↑x))
[PROOFSTEP]
exact rel_iSup_tsum m m0 R m_iSup _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Countable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
R : α → α → Prop
m_iSup : ∀ (s : ℕ → β), R (m (⨆ (i : ℕ), s i)) (∑' (i : ℕ), m (s i))
s₁ s₂ : β
⊢ R (m (s₁ ⊔ s₂)) (m s₁ + m s₂)
[PROOFSTEP]
convert rel_iSup_tsum m m0 R m_iSup fun b => cond b s₁ s₂
[GOAL]
case h.e'_1.h.e'_1
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Countable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
R : α → α → Prop
m_iSup : ∀ (s : ℕ → β), R (m (⨆ (i : ℕ), s i)) (∑' (i : ℕ), m (s i))
s₁ s₂ : β
⊢ s₁ ⊔ s₂ = ⨆ (b : Bool), bif b then s₁ else s₂
[PROOFSTEP]
simp only [iSup_bool_eq, cond]
[GOAL]
case h.e'_2
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
f g : β → α
a a₁ a₂ : α
inst✝² : T2Space α
inst✝¹ : Countable γ
inst✝ : CompleteLattice β
m : β → α
m0 : m ⊥ = 0
R : α → α → Prop
m_iSup : ∀ (s : ℕ → β), R (m (⨆ (i : ℕ), s i)) (∑' (i : ℕ), m (s i))
s₁ s₂ : β
⊢ m s₁ + m s₂ = ∑' (b : Bool), m (bif b then s₁ else s₂)
[PROOFSTEP]
rw [tsum_fintype, Fintype.sum_bool, cond, cond]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
h : HasSum f a
⊢ HasSum (fun b => -f b) (-a)
[PROOFSTEP]
simpa only using h.map (-AddMonoidHom.id α) continuous_neg
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
hf : Summable fun b => -f b
⊢ Summable f
[PROOFSTEP]
simpa only [neg_neg] using hf.neg
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
hf : HasSum f a₁
hg : HasSum g a₂
⊢ HasSum (fun b => f b - g b) (a₁ - a₂)
[PROOFSTEP]
simp only [sub_eq_add_neg]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
hf : HasSum f a₁
hg : HasSum g a₂
⊢ HasSum (fun b => f b + -g b) (a₁ + -a₂)
[PROOFSTEP]
exact hf.add hg.neg
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
hg : Summable g
hfg : Summable fun b => f b - g b
⊢ Summable f
[PROOFSTEP]
simpa only [sub_add_cancel] using hfg.add hg
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
hfg : Summable fun b => f b - g b
hf : Summable f
⊢ Summable fun b => g b - f b
[PROOFSTEP]
simpa only [neg_sub] using hfg.neg
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a✝ a₁ a₂ : α
hf : HasSum f a₁
b : β
inst✝ : DecidableEq β
a : α
⊢ HasSum (Function.update f b a) (a - f b + a₁)
[PROOFSTEP]
convert (hasSum_ite_eq b (a - f b)).add hf
[GOAL]
case h.e'_5.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a✝ a₁ a₂ : α
hf : HasSum f a₁
b : β
inst✝ : DecidableEq β
a : α
x✝ : β
⊢ Function.update f b a x✝ = (if x✝ = b then a - f b else 0) + f x✝
[PROOFSTEP]
rename_i b'
[GOAL]
case h.e'_5.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a✝ a₁ a₂ : α
hf : HasSum f a₁
b : β
inst✝ : DecidableEq β
a : α
b' : β
⊢ Function.update f b a b' = (if b' = b then a - f b else 0) + f b'
[PROOFSTEP]
by_cases h : b' = b
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a✝ a₁ a₂ : α
hf : HasSum f a₁
b : β
inst✝ : DecidableEq β
a : α
b' : β
h : b' = b
⊢ Function.update f b a b' = (if b' = b then a - f b else 0) + f b'
[PROOFSTEP]
rw [h, update_same]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a✝ a₁ a₂ : α
hf : HasSum f a₁
b : β
inst✝ : DecidableEq β
a : α
b' : β
h : b' = b
⊢ a = (if b = b then a - f b else 0) + f b
[PROOFSTEP]
simp [eq_self_iff_true, if_true, sub_add_cancel]
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a✝ a₁ a₂ : α
hf : HasSum f a₁
b : β
inst✝ : DecidableEq β
a : α
b' : β
h : ¬b' = b
⊢ Function.update f b a b' = (if b' = b then a - f b else 0) + f b'
[PROOFSTEP]
simp only [h, update_noteq, if_false, Ne.def, zero_add, not_false_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
s : Set β
hf : HasSum (f ∘ Subtype.val) a₁
⊢ HasSum (f ∘ Subtype.val) a₂ ↔ HasSum f (a₁ + a₂)
[PROOFSTEP]
refine' ⟨fun h => hf.add_compl h, fun h => _⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
s : Set β
hf : HasSum (f ∘ Subtype.val) a₁
h : HasSum f (a₁ + a₂)
⊢ HasSum (f ∘ Subtype.val) a₂
[PROOFSTEP]
rw [hasSum_subtype_iff_indicator] at hf ⊢
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
s : Set β
hf : HasSum (Set.indicator s f) a₁
h : HasSum f (a₁ + a₂)
⊢ HasSum (Set.indicator sᶜ f) a₂
[PROOFSTEP]
rw [Set.indicator_compl]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
s : Set β
hf : HasSum (Set.indicator s f) a₁
h : HasSum f (a₁ + a₂)
⊢ HasSum (f - Set.indicator s f) a₂
[PROOFSTEP]
simpa only [add_sub_cancel'] using h.sub hf
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
s : Set β
hf : HasSum (f ∘ Subtype.val) a₁
⊢ HasSum f (a₁ + (a₂ - a₁)) ↔ HasSum f a₂
[PROOFSTEP]
rw [add_sub_cancel'_right]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
s : Finset β
⊢ HasSum f (∑ b in s, f b + a) ↔ HasSum f (a + ∑ i in s, f i)
[PROOFSTEP]
rw [add_comm]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : DecidableEq β
hf : HasSum f a
b : β
⊢ HasSum (fun n => if n = b then 0 else f n) (a - f b)
[PROOFSTEP]
convert hf.update b 0 using 1
[GOAL]
case h.e'_5
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : DecidableEq β
hf : HasSum f a
b : β
⊢ (fun n => if n = b then 0 else f n) = update f b 0
[PROOFSTEP]
ext n
[GOAL]
case h.e'_5.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : DecidableEq β
hf : HasSum f a
b n : β
⊢ (if n = b then 0 else f n) = update f b 0 n
[PROOFSTEP]
rw [Function.update_apply]
[GOAL]
case h.e'_6
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : DecidableEq β
hf : HasSum f a
b : β
⊢ a - f b = 0 - f b + a
[PROOFSTEP]
rw [sub_add_eq_add_sub, zero_add]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
⊢ ∑' (b : β), -f b = -∑' (b : β), f b
[PROOFSTEP]
by_cases hf : Summable f
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
hf : Summable f
⊢ ∑' (b : β), -f b = -∑' (b : β), f b
[PROOFSTEP]
exact hf.hasSum.neg.tsum_eq
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
hf : ¬Summable f
⊢ ∑' (b : β), -f b = -∑' (b : β), f b
[PROOFSTEP]
simp [tsum_eq_zero_of_not_summable hf, tsum_eq_zero_of_not_summable (mt Summable.of_neg hf)]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommGroup α
inst✝³ : TopologicalSpace α
inst✝² : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
inst✝ : DecidableEq β
hf : Summable f
b : β
⊢ ∑' (n : β), f n = f b + ∑' (n : β), if n = b then 0 else f n
[PROOFSTEP]
rw [(hasSum_ite_sub_hasSum hf.hasSum b).tsum_eq]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommGroup α
inst✝³ : TopologicalSpace α
inst✝² : TopologicalAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝¹ : T2Space α
inst✝ : DecidableEq β
hf : Summable f
b : β
⊢ ∑' (n : β), f n = f b + (∑' (b : β), f b - f b)
[PROOFSTEP]
exact (add_sub_cancel'_right _ _).symm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : ℕ → α
k : ℕ
a : α
⊢ HasSum (fun n => f (n + k)) a ↔ HasSum f (a + ∑ i in range k, f i)
[PROOFSTEP]
refine' Iff.trans _ (range k).hasSum_compl_iff
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : ℕ → α
k : ℕ
a : α
⊢ HasSum (fun n => f (n + k)) a ↔ HasSum (fun x => f ↑x) a
[PROOFSTEP]
rw [← (notMemRangeEquiv k).symm.hasSum_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : ℕ → α
k : ℕ
a : α
⊢ HasSum (fun n => f (n + k)) a ↔ HasSum ((fun x => f ↑x) ∘ ↑(notMemRangeEquiv k).symm) a
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : ℕ → α
k : ℕ
a : α
⊢ HasSum (fun n => f (n + k)) (a - ∑ i in range k, f i) ↔ HasSum f a
[PROOFSTEP]
simp [hasSum_nat_add_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
M : Type u_5
inst✝² : AddCommMonoid M
inst✝¹ : TopologicalSpace M
inst✝ : ContinuousAdd M
f : ℕ → M
k : ℕ
a : M
h : HasSum (fun n => f (n + k)) a
⊢ HasSum f (∑ i in range k, f i + a)
[PROOFSTEP]
refine ((range k).hasSum f).add_compl ?_
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α
inst✝⁴ : TopologicalSpace α
inst✝³ : TopologicalAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
M : Type u_5
inst✝² : AddCommMonoid M
inst✝¹ : TopologicalSpace M
inst✝ : ContinuousAdd M
f : ℕ → M
k : ℕ
a : M
h : HasSum (fun n => f (n + k)) a
⊢ HasSum (f ∘ Subtype.val) a
[PROOFSTEP]
rwa [← (notMemRangeEquiv k).symm.hasSum_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁶ : AddCommGroup α
inst✝⁵ : TopologicalSpace α
inst✝⁴ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
M : Type u_5
inst✝³ : AddCommMonoid M
inst✝² : TopologicalSpace M
inst✝¹ : ContinuousAdd M
inst✝ : T2Space M
f : ℕ → M
hf : Summable fun n => f (n + 1)
⊢ ∑' (b : ℕ), f b = f 0 + ∑' (b : ℕ), f (b + 1)
[PROOFSTEP]
simpa only [sum_range_one] using (sum_add_tsum_nat_add' hf).symm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
⊢ Tendsto (fun i => ∑' (k : ℕ), f (k + i)) atTop (𝓝 0)
[PROOFSTEP]
by_cases hf : Summable f
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
hf : Summable f
⊢ Tendsto (fun i => ∑' (k : ℕ), f (k + i)) atTop (𝓝 0)
[PROOFSTEP]
have h₀ : (fun i => ∑' i, f i - ∑ j in range i, f j) = fun i => ∑' k : ℕ, f (k + i) :=
by
ext1 i
rw [sub_eq_iff_eq_add, add_comm, sum_add_tsum_nat_add i hf]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
hf : Summable f
⊢ (fun i => ∑' (i : ℕ), f i - ∑ j in range i, f j) = fun i => ∑' (k : ℕ), f (k + i)
[PROOFSTEP]
ext1 i
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
hf : Summable f
i : ℕ
⊢ ∑' (i : ℕ), f i - ∑ j in range i, f j = ∑' (k : ℕ), f (k + i)
[PROOFSTEP]
rw [sub_eq_iff_eq_add, add_comm, sum_add_tsum_nat_add i hf]
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
hf : Summable f
h₀ : (fun i => ∑' (i : ℕ), f i - ∑ j in range i, f j) = fun i => ∑' (k : ℕ), f (k + i)
⊢ Tendsto (fun i => ∑' (k : ℕ), f (k + i)) atTop (𝓝 0)
[PROOFSTEP]
have h₁ : Tendsto (fun _ : ℕ => ∑' i, f i) atTop (𝓝 (∑' i, f i)) := tendsto_const_nhds
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
hf : Summable f
h₀ : (fun i => ∑' (i : ℕ), f i - ∑ j in range i, f j) = fun i => ∑' (k : ℕ), f (k + i)
h₁ : Tendsto (fun x => ∑' (i : ℕ), f i) atTop (𝓝 (∑' (i : ℕ), f i))
⊢ Tendsto (fun i => ∑' (k : ℕ), f (k + i)) atTop (𝓝 0)
[PROOFSTEP]
simpa only [h₀, sub_self] using Tendsto.sub h₁ hf.hasSum.tendsto_sum_nat
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
hf : ¬Summable f
⊢ Tendsto (fun i => ∑' (k : ℕ), f (k + i)) atTop (𝓝 0)
[PROOFSTEP]
convert tendsto_const_nhds (α := α) (β := ℕ) (a := 0) (f := atTop)
[GOAL]
case h.e'_3.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
hf : ¬Summable f
x✝ : ℕ
⊢ ∑' (k : ℕ), f (k + x✝) = 0
[PROOFSTEP]
rename_i i
[GOAL]
case h.e'_3.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
hf : ¬Summable f
i : ℕ
⊢ ∑' (k : ℕ), f (k + i) = 0
[PROOFSTEP]
rw [← summable_nat_add_iff i] at hf
[GOAL]
case h.e'_3.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : TopologicalSpace α
inst✝¹ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝ : T2Space α
f : ℕ → α
i : ℕ
hf : ¬Summable fun n => f (n + i)
⊢ ∑' (k : ℕ), f (k + i) = 0
[PROOFSTEP]
exact tsum_eq_zero_of_not_summable hf
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
⊢ HasSum (Int.rec f g) (a + b)
[PROOFSTEP]
have h₁ : Injective ((↑) : ℕ → ℤ) := @Int.ofNat.inj
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
⊢ HasSum (Int.rec f g) (a + b)
[PROOFSTEP]
have h₂ : Injective Int.negSucc := @Int.negSucc.inj
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
h₂ : Injective Int.negSucc
⊢ HasSum (Int.rec f g) (a + b)
[PROOFSTEP]
have : IsCompl (Set.range ((↑) : ℕ → ℤ)) (Set.range Int.negSucc) :=
by
constructor
· rw [disjoint_iff_inf_le]
rintro _ ⟨⟨i, rfl⟩, ⟨j, ⟨⟩⟩⟩
· rw [codisjoint_iff_le_sup]
rintro (i | j) _
exacts [Or.inl ⟨_, rfl⟩, Or.inr ⟨_, rfl⟩]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
h₂ : Injective Int.negSucc
⊢ IsCompl (Set.range Nat.cast) (Set.range Int.negSucc)
[PROOFSTEP]
constructor
[GOAL]
case disjoint
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
h₂ : Injective Int.negSucc
⊢ Disjoint (Set.range Nat.cast) (Set.range Int.negSucc)
[PROOFSTEP]
rw [disjoint_iff_inf_le]
[GOAL]
case disjoint
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
h₂ : Injective Int.negSucc
⊢ Set.range Nat.cast ⊓ Set.range Int.negSucc ≤ ⊥
[PROOFSTEP]
rintro _ ⟨⟨i, rfl⟩, ⟨j, ⟨⟩⟩⟩
[GOAL]
case codisjoint
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
h₂ : Injective Int.negSucc
⊢ Codisjoint (Set.range Nat.cast) (Set.range Int.negSucc)
[PROOFSTEP]
rw [codisjoint_iff_le_sup]
[GOAL]
case codisjoint
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
h₂ : Injective Int.negSucc
⊢ ⊤ ≤ Set.range Nat.cast ⊔ Set.range Int.negSucc
[PROOFSTEP]
rintro (i | j) _
[GOAL]
case codisjoint.ofNat
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
h₂ : Injective Int.negSucc
i : ℕ
a✝ : Int.ofNat i ∈ ⊤
⊢ Int.ofNat i ∈ Set.range Nat.cast ⊔ Set.range Int.negSucc
case codisjoint.negSucc
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
h₂ : Injective Int.negSucc
j : ℕ
a✝ : Int.negSucc j ∈ ⊤
⊢ Int.negSucc j ∈ Set.range Nat.cast ⊔ Set.range Int.negSucc
[PROOFSTEP]
exacts [Or.inl ⟨_, rfl⟩, Or.inr ⟨_, rfl⟩]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f g : ℕ → α
hf : HasSum f a
hg : HasSum g b
h₁ : Injective Nat.cast
h₂ : Injective Int.negSucc
this : IsCompl (Set.range Nat.cast) (Set.range Int.negSucc)
⊢ HasSum (Int.rec f g) (a + b)
[PROOFSTEP]
exact HasSum.add_isCompl this (h₁.hasSum_range_iff.mpr hf) (h₂.hasSum_range_iff.mpr hg)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ b : α
f : ℤ → α
hnonneg : HasSum (fun n => f ↑n) a
hneg : HasSum (fun n => f (-↑(Nat.succ n))) b
⊢ HasSum f (a + b)
[PROOFSTEP]
simp_rw [← Int.negSucc_coe] at hneg
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ b : α
f : ℤ → α
hnonneg : HasSum (fun n => f ↑n) a
hneg : HasSum (fun n => f (Int.negSucc n)) b
⊢ HasSum f (a + b)
[PROOFSTEP]
convert hnonneg.int_rec hneg using 1
[GOAL]
case h.e'_5
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ b : α
f : ℤ → α
hnonneg : HasSum (fun n => f ↑n) a
hneg : HasSum (fun n => f (Int.negSucc n)) b
⊢ f = Int.rec (fun n => f ↑n) fun n => f (Int.negSucc n)
[PROOFSTEP]
ext (i | j)
[GOAL]
case h.e'_5.h.ofNat
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ b : α
f : ℤ → α
hnonneg : HasSum (fun n => f ↑n) a
hneg : HasSum (fun n => f (Int.negSucc n)) b
i : ℕ
⊢ f (Int.ofNat i) = Int.rec (fun n => f ↑n) (fun n => f (Int.negSucc n)) (Int.ofNat i)
[PROOFSTEP]
rfl
[GOAL]
case h.e'_5.h.negSucc
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ b : α
f : ℤ → α
hnonneg : HasSum (fun n => f ↑n) a
hneg : HasSum (fun n => f (Int.negSucc n)) b
j : ℕ
⊢ f (Int.negSucc j) = Int.rec (fun n => f ↑n) (fun n => f (Int.negSucc n)) (Int.negSucc j)
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g : β → α
a a₁ a₂ b : α
f : ℤ → α
hpos : HasSum (fun n => f (↑n + 1)) a
hneg : HasSum (fun n => f (-↑(Nat.succ n))) b
⊢ ∀ (g : ℕ → α), HasSum (fun k => g (k + 1)) a → HasSum g (a + g 0)
[PROOFSTEP]
intro g hg
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : TopologicalSpace α
inst✝ : TopologicalAddGroup α
f✝ g✝ : β → α
a a₁ a₂ b : α
f : ℤ → α
hpos : HasSum (fun n => f (↑n + 1)) a
hneg : HasSum (fun n => f (-↑(Nat.succ n))) b
g : ℕ → α
hg : HasSum (fun k => g (k + 1)) a
⊢ HasSum g (a + g 0)
[PROOFSTEP]
simpa using (hasSum_nat_add_iff _).mp hg
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
⊢ HasSum (fun n => f ↑n + f (-↑n)) (a + f 0)
[PROOFSTEP]
apply (hf.add (hasSum_ite_eq (0 : ℤ) (f 0))).hasSum_of_sum_eq fun u => ?_
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
⊢ ∃ v,
∀ (v' : Finset ℕ), v ⊆ v' → ∃ u', u ⊆ u' ∧ ∑ x in u', (f x + if x = 0 then f 0 else 0) = ∑ b in v', (f ↑b + f (-↑b))
[PROOFSTEP]
refine' ⟨u.image Int.natAbs, fun v' hv' => _⟩
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
⊢ ∃ u', u ⊆ u' ∧ ∑ x in u', (f x + if x = 0 then f 0 else 0) = ∑ b in v', (f ↑b + f (-↑b))
[PROOFSTEP]
let u1 := v'.image fun x : ℕ => (x : ℤ)
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
⊢ ∃ u', u ⊆ u' ∧ ∑ x in u', (f x + if x = 0 then f 0 else 0) = ∑ b in v', (f ↑b + f (-↑b))
[PROOFSTEP]
let u2 := v'.image fun x : ℕ => -(x : ℤ)
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
⊢ ∃ u', u ⊆ u' ∧ ∑ x in u', (f x + if x = 0 then f 0 else 0) = ∑ b in v', (f ↑b + f (-↑b))
[PROOFSTEP]
have A : u ⊆ u1 ∪ u2 := by
intro x hx
simp only [mem_union, mem_image, exists_prop]
rcases le_total 0 x with (h'x | h'x)
· left
refine' ⟨Int.natAbs x, hv' _, _⟩
· simp only [mem_image, exists_prop]
exact ⟨x, hx, rfl⟩
· simp only [h'x, Int.coe_natAbs, abs_eq_self]
· right
refine' ⟨Int.natAbs x, hv' _, _⟩
· simp only [mem_image, exists_prop]
exact ⟨x, hx, rfl⟩
· simp only [abs_of_nonpos h'x, Int.coe_natAbs, neg_neg]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
⊢ u ⊆ u1 ∪ u2
[PROOFSTEP]
intro x hx
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
⊢ x ∈ u1 ∪ u2
[PROOFSTEP]
simp only [mem_union, mem_image, exists_prop]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
⊢ (∃ a, a ∈ v' ∧ ↑a = x) ∨ ∃ a, a ∈ v' ∧ -↑a = x
[PROOFSTEP]
rcases le_total 0 x with (h'x | h'x)
[GOAL]
case inl
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : 0 ≤ x
⊢ (∃ a, a ∈ v' ∧ ↑a = x) ∨ ∃ a, a ∈ v' ∧ -↑a = x
[PROOFSTEP]
left
[GOAL]
case inl.h
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : 0 ≤ x
⊢ ∃ a, a ∈ v' ∧ ↑a = x
[PROOFSTEP]
refine' ⟨Int.natAbs x, hv' _, _⟩
[GOAL]
case inl.h.refine'_1
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : 0 ≤ x
⊢ Int.natAbs x ∈ image Int.natAbs u
[PROOFSTEP]
simp only [mem_image, exists_prop]
[GOAL]
case inl.h.refine'_1
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : 0 ≤ x
⊢ ∃ a, a ∈ u ∧ Int.natAbs a = Int.natAbs x
[PROOFSTEP]
exact ⟨x, hx, rfl⟩
[GOAL]
case inl.h.refine'_2
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : 0 ≤ x
⊢ ↑(Int.natAbs x) = x
[PROOFSTEP]
simp only [h'x, Int.coe_natAbs, abs_eq_self]
[GOAL]
case inr
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : x ≤ 0
⊢ (∃ a, a ∈ v' ∧ ↑a = x) ∨ ∃ a, a ∈ v' ∧ -↑a = x
[PROOFSTEP]
right
[GOAL]
case inr.h
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : x ≤ 0
⊢ ∃ a, a ∈ v' ∧ -↑a = x
[PROOFSTEP]
refine' ⟨Int.natAbs x, hv' _, _⟩
[GOAL]
case inr.h.refine'_1
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : x ≤ 0
⊢ Int.natAbs x ∈ image Int.natAbs u
[PROOFSTEP]
simp only [mem_image, exists_prop]
[GOAL]
case inr.h.refine'_1
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : x ≤ 0
⊢ ∃ a, a ∈ u ∧ Int.natAbs a = Int.natAbs x
[PROOFSTEP]
exact ⟨x, hx, rfl⟩
[GOAL]
case inr.h.refine'_2
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
x : ℤ
hx : x ∈ u
h'x : x ≤ 0
⊢ -↑(Int.natAbs x) = x
[PROOFSTEP]
simp only [abs_of_nonpos h'x, Int.coe_natAbs, neg_neg]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
⊢ ∃ u', u ⊆ u' ∧ ∑ x in u', (f x + if x = 0 then f 0 else 0) = ∑ b in v', (f ↑b + f (-↑b))
[PROOFSTEP]
refine' ⟨u1 ∪ u2, A, _⟩
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
⊢ ∑ x in u1 ∪ u2, (f x + if x = 0 then f 0 else 0) = ∑ b in v', (f ↑b + f (-↑b))
[PROOFSTEP]
calc
(∑ x in u1 ∪ u2, (f x + ite (x = 0) (f 0) 0)) = (∑ x in u1 ∪ u2, f x) + ∑ x in u1 ∩ u2, f x :=
by
rw [sum_add_distrib]
congr 1
refine' (sum_subset_zero_on_sdiff inter_subset_union _ _).symm
· intro x hx
suffices x ≠ 0 by simp only [this, if_false]
rintro rfl
simp at hx
· intro x hx
simp only [mem_inter, mem_image, exists_prop] at hx
have : x = 0 := by
apply le_antisymm
· rcases hx.2 with ⟨a, _, rfl⟩
simp only [Right.neg_nonpos_iff, Nat.cast_nonneg]
· rcases hx.1 with ⟨a, _, rfl⟩
simp only [Nat.cast_nonneg]
simp only [this, eq_self_iff_true, if_true]
_ = (∑ x in u1, f x) + ∑ x in u2, f x := sum_union_inter
_ = (∑ b in v', f b) + ∑ b in v', f (-b) := by simp
_ = ∑ b in v', (f b + f (-b)) := sum_add_distrib.symm
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
⊢ ∑ x in u1 ∪ u2, (f x + if x = 0 then f 0 else 0) = ∑ x in u1 ∪ u2, f x + ∑ x in u1 ∩ u2, f x
[PROOFSTEP]
rw [sum_add_distrib]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
⊢ (∑ x in u1 ∪ u2, f x + ∑ x in u1 ∪ u2, if x = 0 then f 0 else 0) = ∑ x in u1 ∪ u2, f x + ∑ x in u1 ∩ u2, f x
[PROOFSTEP]
congr 1
[GOAL]
case e_a
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
⊢ (∑ x in u1 ∪ u2, if x = 0 then f 0 else 0) = ∑ x in u1 ∩ u2, f x
[PROOFSTEP]
refine' (sum_subset_zero_on_sdiff inter_subset_union _ _).symm
[GOAL]
case e_a.refine'_1
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
⊢ ∀ (x : ℤ), x ∈ (u1 ∪ u2) \ (u1 ∩ u2) → (if x = 0 then f 0 else 0) = 0
[PROOFSTEP]
intro x hx
[GOAL]
case e_a.refine'_1
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
x : ℤ
hx : x ∈ (u1 ∪ u2) \ (u1 ∩ u2)
⊢ (if x = 0 then f 0 else 0) = 0
[PROOFSTEP]
suffices x ≠ 0 by simp only [this, if_false]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
x : ℤ
hx : x ∈ (u1 ∪ u2) \ (u1 ∩ u2)
this : x ≠ 0
⊢ (if x = 0 then f 0 else 0) = 0
[PROOFSTEP]
simp only [this, if_false]
[GOAL]
case e_a.refine'_1
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
x : ℤ
hx : x ∈ (u1 ∪ u2) \ (u1 ∩ u2)
⊢ x ≠ 0
[PROOFSTEP]
rintro rfl
[GOAL]
case e_a.refine'_1
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
hx : 0 ∈ (u1 ∪ u2) \ (u1 ∩ u2)
⊢ False
[PROOFSTEP]
simp at hx
[GOAL]
case e_a.refine'_2
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
⊢ ∀ (x : ℤ), x ∈ u1 ∩ u2 → f x = if x = 0 then f 0 else 0
[PROOFSTEP]
intro x hx
[GOAL]
case e_a.refine'_2
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
x : ℤ
hx : x ∈ u1 ∩ u2
⊢ f x = if x = 0 then f 0 else 0
[PROOFSTEP]
simp only [mem_inter, mem_image, exists_prop] at hx
[GOAL]
case e_a.refine'_2
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
x : ℤ
hx : (∃ a, a ∈ v' ∧ ↑a = x) ∧ ∃ a, a ∈ v' ∧ -↑a = x
⊢ f x = if x = 0 then f 0 else 0
[PROOFSTEP]
have : x = 0 := by
apply le_antisymm
· rcases hx.2 with ⟨a, _, rfl⟩
simp only [Right.neg_nonpos_iff, Nat.cast_nonneg]
· rcases hx.1 with ⟨a, _, rfl⟩
simp only [Nat.cast_nonneg]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
x : ℤ
hx : (∃ a, a ∈ v' ∧ ↑a = x) ∧ ∃ a, a ∈ v' ∧ -↑a = x
⊢ x = 0
[PROOFSTEP]
apply le_antisymm
[GOAL]
case a
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
x : ℤ
hx : (∃ a, a ∈ v' ∧ ↑a = x) ∧ ∃ a, a ∈ v' ∧ -↑a = x
⊢ x ≤ 0
[PROOFSTEP]
rcases hx.2 with ⟨a, _, rfl⟩
[GOAL]
case a.intro.intro
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝¹ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a✝ : α
f : ℤ → α
hf : HasSum f a✝
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
a : ℕ
left✝ : a ∈ v'
hx : (∃ a_1, a_1 ∈ v' ∧ ↑a_1 = -↑a) ∧ ∃ a_1, a_1 ∈ v' ∧ -↑a_1 = -↑a
⊢ -↑a ≤ 0
[PROOFSTEP]
simp only [Right.neg_nonpos_iff, Nat.cast_nonneg]
[GOAL]
case a
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
x : ℤ
hx : (∃ a, a ∈ v' ∧ ↑a = x) ∧ ∃ a, a ∈ v' ∧ -↑a = x
⊢ 0 ≤ x
[PROOFSTEP]
rcases hx.1 with ⟨a, _, rfl⟩
[GOAL]
case a.intro.intro
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝¹ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a✝ : α
f : ℤ → α
hf : HasSum f a✝
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
a : ℕ
left✝ : a ∈ v'
hx : (∃ a_1, a_1 ∈ v' ∧ ↑a_1 = ↑a) ∧ ∃ a_1, a_1 ∈ v' ∧ -↑a_1 = ↑a
⊢ 0 ≤ ↑a
[PROOFSTEP]
simp only [Nat.cast_nonneg]
[GOAL]
case e_a.refine'_2
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
x : ℤ
hx : (∃ a, a ∈ v' ∧ ↑a = x) ∧ ∃ a, a ∈ v' ∧ -↑a = x
this : x = 0
⊢ f x = if x = 0 then f 0 else 0
[PROOFSTEP]
simp only [this, eq_self_iff_true, if_true]
[GOAL]
α✝ : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁵ : AddCommGroup α✝
inst✝⁴ : TopologicalSpace α✝
inst✝³ : TopologicalAddGroup α✝
f✝ g : β → α✝
a✝ a₁ a₂ : α✝
α : Type u_5
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
inst✝ : ContinuousAdd α
a : α
f : ℤ → α
hf : HasSum f a
u : Finset ℤ
v' : Finset ℕ
hv' : image Int.natAbs u ⊆ v'
u1 : Finset ℤ := image (fun x => ↑x) v'
u2 : Finset ℤ := image (fun x => -↑x) v'
A : u ⊆ u1 ∪ u2
⊢ ∑ x in u1, f x + ∑ x in u2, f x = ∑ b in v', f ↑b + ∑ b in v', f (-↑b)
[PROOFSTEP]
simp
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
⊢ (CauchySeq fun s => ∑ b in s, f b) ↔ ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
[PROOFSTEP]
simp only [CauchySeq, cauchy_map_iff, and_iff_right atTop_neBot, prod_atTop_atTop_eq, uniformity_eq_comap_nhds_zero α,
tendsto_comap_iff, (· ∘ ·), atTop_neBot, true_and]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
⊢ Tendsto (fun x => ∑ b in x.snd, f b - ∑ b in x.fst, f b) atTop (𝓝 0) ↔
∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
[PROOFSTEP]
rw [tendsto_atTop']
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
⊢ (∀ (s : Set α), s ∈ 𝓝 0 → ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ s) ↔
∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
[PROOFSTEP]
constructor
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
⊢ (∀ (s : Set α), s ∈ 𝓝 0 → ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ s) →
∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
[PROOFSTEP]
intro h e he
[GOAL]
case mp
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h : ∀ (s : Set α), s ∈ 𝓝 0 → ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ s
e : Set α
he : e ∈ 𝓝 0
⊢ ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
[PROOFSTEP]
obtain ⟨⟨s₁, s₂⟩, h⟩ := h e he
[GOAL]
case mp.intro.mk
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (s : Set α), s ∈ 𝓝 0 → ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ s
e : Set α
he : e ∈ 𝓝 0
s₁ s₂ : Finset β
h : ∀ (b : Finset β × Finset β), b ≥ (s₁, s₂) → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ e
⊢ ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
[PROOFSTEP]
use s₁ ∪ s₂
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (s : Set α), s ∈ 𝓝 0 → ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ s
e : Set α
he : e ∈ 𝓝 0
s₁ s₂ : Finset β
h : ∀ (b : Finset β × Finset β), b ≥ (s₁, s₂) → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ e
⊢ ∀ (t : Finset β), Disjoint t (s₁ ∪ s₂) → ∑ b in t, f b ∈ e
[PROOFSTEP]
intro t ht
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (s : Set α), s ∈ 𝓝 0 → ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ s
e : Set α
he : e ∈ 𝓝 0
s₁ s₂ : Finset β
h : ∀ (b : Finset β × Finset β), b ≥ (s₁, s₂) → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ e
t : Finset β
ht : Disjoint t (s₁ ∪ s₂)
⊢ ∑ b in t, f b ∈ e
[PROOFSTEP]
specialize h (s₁ ∪ s₂, s₁ ∪ s₂ ∪ t) ⟨le_sup_left, le_sup_of_le_left le_sup_right⟩
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (s : Set α), s ∈ 𝓝 0 → ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ s
e : Set α
he : e ∈ 𝓝 0
s₁ s₂ t : Finset β
ht : Disjoint t (s₁ ∪ s₂)
h : ∑ b in (s₁ ∪ s₂, s₁ ∪ s₂ ∪ t).snd, f b - ∑ b in (s₁ ∪ s₂, s₁ ∪ s₂ ∪ t).fst, f b ∈ e
⊢ ∑ b in t, f b ∈ e
[PROOFSTEP]
simpa only [Finset.sum_union ht.symm, add_sub_cancel'] using h
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
⊢ (∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e) →
∀ (s : Set α), s ∈ 𝓝 0 → ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ s
[PROOFSTEP]
rintro h e he
[GOAL]
case mpr
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h : ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
e : Set α
he : e ∈ 𝓝 0
⊢ ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ e
[PROOFSTEP]
rcases exists_nhds_half_neg he with ⟨d, hd, hde⟩
[GOAL]
case mpr.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h : ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
e : Set α
he : e ∈ 𝓝 0
d : Set α
hd : d ∈ 𝓝 0
hde : ∀ (v : α), v ∈ d → ∀ (w : α), w ∈ d → v - w ∈ e
⊢ ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ e
[PROOFSTEP]
rcases h d hd with ⟨s, h⟩
[GOAL]
case mpr.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
e : Set α
he : e ∈ 𝓝 0
d : Set α
hd : d ∈ 𝓝 0
hde : ∀ (v : α), v ∈ d → ∀ (w : α), w ∈ d → v - w ∈ e
s : Finset β
h : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ d
⊢ ∃ a, ∀ (b : Finset β × Finset β), b ≥ a → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ e
[PROOFSTEP]
use(s, s)
[GOAL]
case h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
e : Set α
he : e ∈ 𝓝 0
d : Set α
hd : d ∈ 𝓝 0
hde : ∀ (v : α), v ∈ d → ∀ (w : α), w ∈ d → v - w ∈ e
s : Finset β
h : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ d
⊢ ∀ (b : Finset β × Finset β), b ≥ (s, s) → ∑ b in b.snd, f b - ∑ b in b.fst, f b ∈ e
[PROOFSTEP]
rintro ⟨t₁, t₂⟩ ⟨ht₁, ht₂⟩
[GOAL]
case h.mk.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
e : Set α
he : e ∈ 𝓝 0
d : Set α
hd : d ∈ 𝓝 0
hde : ∀ (v : α), v ∈ d → ∀ (w : α), w ∈ d → v - w ∈ e
s : Finset β
h : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ d
t₁ t₂ : Finset β
ht₁ : (s, s).fst ≤ (t₁, t₂).fst
ht₂ : (s, s).snd ≤ (t₁, t₂).snd
⊢ ∑ b in (t₁, t₂).snd, f b - ∑ b in (t₁, t₂).fst, f b ∈ e
[PROOFSTEP]
have : ((∑ b in t₂, f b) - ∑ b in t₁, f b) = (∑ b in t₂ \ s, f b) - ∑ b in t₁ \ s, f b := by
rw [← Finset.sum_sdiff ht₁, ← Finset.sum_sdiff ht₂, add_sub_add_right_eq_sub]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
e : Set α
he : e ∈ 𝓝 0
d : Set α
hd : d ∈ 𝓝 0
hde : ∀ (v : α), v ∈ d → ∀ (w : α), w ∈ d → v - w ∈ e
s : Finset β
h : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ d
t₁ t₂ : Finset β
ht₁ : (s, s).fst ≤ (t₁, t₂).fst
ht₂ : (s, s).snd ≤ (t₁, t₂).snd
⊢ ∑ b in t₂, f b - ∑ b in t₁, f b = ∑ b in t₂ \ s, f b - ∑ b in t₁ \ s, f b
[PROOFSTEP]
rw [← Finset.sum_sdiff ht₁, ← Finset.sum_sdiff ht₂, add_sub_add_right_eq_sub]
[GOAL]
case h.mk.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
e : Set α
he : e ∈ 𝓝 0
d : Set α
hd : d ∈ 𝓝 0
hde : ∀ (v : α), v ∈ d → ∀ (w : α), w ∈ d → v - w ∈ e
s : Finset β
h : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ d
t₁ t₂ : Finset β
ht₁ : (s, s).fst ≤ (t₁, t₂).fst
ht₂ : (s, s).snd ≤ (t₁, t₂).snd
this : ∑ b in t₂, f b - ∑ b in t₁, f b = ∑ b in t₂ \ s, f b - ∑ b in t₁ \ s, f b
⊢ ∑ b in (t₁, t₂).snd, f b - ∑ b in (t₁, t₂).fst, f b ∈ e
[PROOFSTEP]
simp only [this]
[GOAL]
case h.mk.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
h✝ : ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
e : Set α
he : e ∈ 𝓝 0
d : Set α
hd : d ∈ 𝓝 0
hde : ∀ (v : α), v ∈ d → ∀ (w : α), w ∈ d → v - w ∈ e
s : Finset β
h : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ d
t₁ t₂ : Finset β
ht₁ : (s, s).fst ≤ (t₁, t₂).fst
ht₂ : (s, s).snd ≤ (t₁, t₂).snd
this : ∑ b in t₂, f b - ∑ b in t₁, f b = ∑ b in t₂ \ s, f b - ∑ b in t₁ \ s, f b
⊢ ∑ b in t₂ \ s, f b - ∑ b in t₁ \ s, f b ∈ e
[PROOFSTEP]
exact hde _ (h _ Finset.sdiff_disjoint) _ (h _ Finset.sdiff_disjoint)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
⊢ Tendsto (fun s => ∑' (b : { x // ¬x ∈ s }), f ↑b) atTop (𝓝 0)
[PROOFSTEP]
by_cases H : Summable f
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
H : Summable f
⊢ Tendsto (fun s => ∑' (b : { x // ¬x ∈ s }), f ↑b) atTop (𝓝 0)
[PROOFSTEP]
intro e he
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
⊢ e ∈ Filter.map (fun s => ∑' (b : { x // ¬x ∈ s }), f ↑b) atTop
[PROOFSTEP]
rcases exists_mem_nhds_isClosed_subset he with ⟨o, ho, o_closed, oe⟩
[GOAL]
case pos.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
⊢ e ∈ Filter.map (fun s => ∑' (b : { x // ¬x ∈ s }), f ↑b) atTop
[PROOFSTEP]
simp only [le_eq_subset, Set.mem_preimage, mem_atTop_sets, Filter.mem_map, ge_iff_le]
[GOAL]
case pos.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
⊢ ∃ a, ∀ (b : Finset β), a ⊆ b → ∑' (b_1 : { x // ¬x ∈ b }), f ↑b_1 ∈ e
[PROOFSTEP]
obtain ⟨s, hs⟩ : ∃ s : Finset β, ∀ t : Finset β, Disjoint t s → (∑ b : β in t, f b) ∈ o :=
cauchySeq_finset_iff_vanishing.1 (Tendsto.cauchySeq H.hasSum) o ho
[GOAL]
case pos.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
⊢ ∃ a, ∀ (b : Finset β), a ⊆ b → ∑' (b_1 : { x // ¬x ∈ b }), f ↑b_1 ∈ e
[PROOFSTEP]
refine' ⟨s, fun a sa => oe _⟩
[GOAL]
case pos.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
⊢ ∑' (b : { x // ¬x ∈ a }), f ↑b ∈ o
[PROOFSTEP]
have A : Summable fun b : { x // x ∉ a } => f b := a.summable_compl_iff.2 H
[GOAL]
case pos.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
A : Summable fun b => f ↑b
⊢ ∑' (b : { x // ¬x ∈ a }), f ↑b ∈ o
[PROOFSTEP]
refine' IsClosed.mem_of_tendsto o_closed A.hasSum (eventually_of_forall fun b => _)
[GOAL]
case pos.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
A : Summable fun b => f ↑b
b : Finset { x // ¬x ∈ a }
⊢ ∑ b in b, (fun b => f ↑b) b ∈ o
[PROOFSTEP]
have : Disjoint (Finset.image (fun i : { x // x ∉ a } => (i : β)) b) s :=
by
refine' disjoint_left.2 fun i hi his => _
rcases mem_image.1 hi with ⟨i', _, rfl⟩
exact i'.2 (sa his)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
A : Summable fun b => f ↑b
b : Finset { x // ¬x ∈ a }
⊢ Disjoint (image (fun i => ↑i) b) s
[PROOFSTEP]
refine' disjoint_left.2 fun i hi his => _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
A : Summable fun b => f ↑b
b : Finset { x // ¬x ∈ a }
i : β
hi : i ∈ image (fun i => ↑i) b
his : i ∈ s
⊢ False
[PROOFSTEP]
rcases mem_image.1 hi with ⟨i', _, rfl⟩
[GOAL]
case intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
A : Summable fun b => f ↑b
b : Finset { x // ¬x ∈ a }
i' : { x // ¬x ∈ a }
left✝ : i' ∈ b
hi : ↑i' ∈ image (fun i => ↑i) b
his : ↑i' ∈ s
⊢ False
[PROOFSTEP]
exact i'.2 (sa his)
[GOAL]
case pos.intro.intro.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
A : Summable fun b => f ↑b
b : Finset { x // ¬x ∈ a }
this : Disjoint (image (fun i => ↑i) b) s
⊢ ∑ b in b, (fun b => f ↑b) b ∈ o
[PROOFSTEP]
convert hs _ this using 1
[GOAL]
case h.e'_4
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
A : Summable fun b => f ↑b
b : Finset { x // ¬x ∈ a }
this : Disjoint (image (fun i => ↑i) b) s
⊢ ∑ b in b, (fun b => f ↑b) b = ∑ b in image (fun i => ↑i) b, f b
[PROOFSTEP]
rw [sum_image]
[GOAL]
case h.e'_4
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝ a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
A : Summable fun b => f ↑b
b : Finset { x // ¬x ∈ a }
this : Disjoint (image (fun i => ↑i) b) s
⊢ ∀ (x : { x // ¬x ∈ a }), x ∈ b → ∀ (y : { x // ¬x ∈ a }), y ∈ b → ↑x = ↑y → x = y
[PROOFSTEP]
intro i _ j _ hij
[GOAL]
case h.e'_4
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a✝² a₁ a₂ : α
f : β → α
H : Summable f
e : Set α
he : e ∈ 𝓝 0
o : Set α
ho : o ∈ 𝓝 0
o_closed : IsClosed o
oe : o ⊆ e
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ o
a : Finset β
sa : s ⊆ a
A : Summable fun b => f ↑b
b : Finset { x // ¬x ∈ a }
this : Disjoint (image (fun i => ↑i) b) s
i : { x // ¬x ∈ a }
a✝¹ : i ∈ b
j : { x // ¬x ∈ a }
a✝ : j ∈ b
hij : ↑i = ↑j
⊢ i = j
[PROOFSTEP]
exact Subtype.ext hij
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
H : ¬Summable f
⊢ Tendsto (fun s => ∑' (b : { x // ¬x ∈ s }), f ↑b) atTop (𝓝 0)
[PROOFSTEP]
convert tendsto_const_nhds (α := α) (β := Finset β) (f := atTop) (a := 0)
[GOAL]
case h.e'_3.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
H : ¬Summable f
x✝ : Finset β
⊢ ∑' (b : { x // ¬x ∈ x✝ }), f ↑b = 0
[PROOFSTEP]
apply tsum_eq_zero_of_not_summable
[GOAL]
case h.e'_3.h.h
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommGroup α
inst✝¹ : UniformSpace α
inst✝ : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
f : β → α
H : ¬Summable f
x✝ : Finset β
⊢ ¬Summable fun b => f ↑b
[PROOFSTEP]
rwa [Finset.summable_compl_iff]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : UniformSpace α
inst✝¹ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : CompleteSpace α
⊢ Summable f ↔ ∀ (e : Set α), e ∈ 𝓝 0 → ∃ s, ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
[PROOFSTEP]
rw [summable_iff_cauchySeq_finset, cauchySeq_finset_iff_vanishing]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : UniformSpace α
inst✝¹ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : CompleteSpace α
hf : Summable f
h : ∀ (b : β), g b = 0 ∨ g b = f b
e : Set α
he : e ∈ 𝓝 0
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
t : Finset β
ht : Disjoint t s
⊢ ∑ b in filter (fun b => g b = f b) t, g b = ∑ b in t, g b
[PROOFSTEP]
{ refine' Finset.sum_subset (Finset.filter_subset _ _) _
intro b hbt hb
simp only [Finset.mem_filter, and_iff_right hbt] at hb
exact (h b).resolve_right hb
}
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : UniformSpace α
inst✝¹ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : CompleteSpace α
hf : Summable f
h : ∀ (b : β), g b = 0 ∨ g b = f b
e : Set α
he : e ∈ 𝓝 0
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
t : Finset β
ht : Disjoint t s
⊢ ∑ b in filter (fun b => g b = f b) t, g b = ∑ b in t, g b
[PROOFSTEP]
refine' Finset.sum_subset (Finset.filter_subset _ _) _
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : UniformSpace α
inst✝¹ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : CompleteSpace α
hf : Summable f
h : ∀ (b : β), g b = 0 ∨ g b = f b
e : Set α
he : e ∈ 𝓝 0
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
t : Finset β
ht : Disjoint t s
⊢ ∀ (x : β), x ∈ t → ¬x ∈ filter (fun b => g b = f b) t → g x = 0
[PROOFSTEP]
intro b hbt hb
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : UniformSpace α
inst✝¹ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : CompleteSpace α
hf : Summable f
h : ∀ (b : β), g b = 0 ∨ g b = f b
e : Set α
he : e ∈ 𝓝 0
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
t : Finset β
ht : Disjoint t s
b : β
hbt : b ∈ t
hb : ¬b ∈ filter (fun b => g b = f b) t
⊢ g b = 0
[PROOFSTEP]
simp only [Finset.mem_filter, and_iff_right hbt] at hb
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : UniformSpace α
inst✝¹ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : CompleteSpace α
hf : Summable f
h : ∀ (b : β), g b = 0 ∨ g b = f b
e : Set α
he : e ∈ 𝓝 0
s : Finset β
hs : ∀ (t : Finset β), Disjoint t s → ∑ b in t, f b ∈ e
t : Finset β
ht : Disjoint t s
b : β
hbt : b ∈ t
hb : ¬g b = f b
⊢ g b = 0
[PROOFSTEP]
exact (h b).resolve_right hb
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommGroup α
inst✝² : UniformSpace α
inst✝¹ : UniformAddGroup α
f g : β → α
a a₁ a₂ : α
inst✝ : CompleteSpace α
i : γ → β
hf : Summable f
hi : Injective i
⊢ Summable (f ∘ i)
[PROOFSTEP]
simpa only [Set.indicator_range_comp] using
(hi.summable_iff (fun x hx => Set.indicator_of_not_mem hx _)).2 (hf.indicator (Set.range i))
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommGroup α
inst✝³ : UniformSpace α
inst✝² : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝¹ : CompleteSpace α
inst✝ : T2Space α
f : β → α
hf : Summable f
s : Finset β
⊢ ∑ x in s, f x + ∑' (x : { x // ¬x ∈ s }), f ↑x = ∑' (x : β), f x
[PROOFSTEP]
rw [← tsum_subtype_add_tsum_subtype_compl hf s]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommGroup α
inst✝³ : UniformSpace α
inst✝² : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝¹ : CompleteSpace α
inst✝ : T2Space α
f : β → α
hf : Summable f
s : Finset β
⊢ ∑ x in s, f x + ∑' (x : { x // ¬x ∈ s }), f ↑x = ∑' (x : ↑↑s), f ↑x + ∑' (x : ↑(↑s)ᶜ), f ↑x
[PROOFSTEP]
simp only [Finset.tsum_subtype', add_right_inj]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommGroup α
inst✝³ : UniformSpace α
inst✝² : UniformAddGroup α
f✝ g : β → α
a a₁ a₂ : α
inst✝¹ : CompleteSpace α
inst✝ : T2Space α
f : β → α
hf : Summable f
s : Finset β
⊢ ∑' (x : { x // ¬x ∈ s }), f ↑x = ∑' (x : ↑(↑s)ᶜ), f ↑x
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f : α → G
hf : Summable f
e : Set G
he : e ∈ 𝓝 0
⊢ ∃ s, ∀ (t : Finset α), Disjoint t s → ∑ k in t, f k ∈ e
[PROOFSTEP]
letI : UniformSpace G := TopologicalAddGroup.toUniformSpace G
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f : α → G
hf : Summable f
e : Set G
he : e ∈ 𝓝 0
this : UniformSpace G := TopologicalAddGroup.toUniformSpace G
⊢ ∃ s, ∀ (t : Finset α), Disjoint t s → ∑ k in t, f k ∈ e
[PROOFSTEP]
letI : UniformAddGroup G := comm_topologicalAddGroup_is_uniform
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f : α → G
hf : Summable f
e : Set G
he : e ∈ 𝓝 0
this✝ : UniformSpace G := TopologicalAddGroup.toUniformSpace G
this : UniformAddGroup G := comm_topologicalAddGroup_is_uniform
⊢ ∃ s, ∀ (t : Finset α), Disjoint t s → ∑ k in t, f k ∈ e
[PROOFSTEP]
rcases hf with ⟨y, hy⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f : α → G
e : Set G
he : e ∈ 𝓝 0
this✝ : UniformSpace G := TopologicalAddGroup.toUniformSpace G
this : UniformAddGroup G := comm_topologicalAddGroup_is_uniform
y : G
hy : HasSum f y
⊢ ∃ s, ∀ (t : Finset α), Disjoint t s → ∑ k in t, f k ∈ e
[PROOFSTEP]
exact cauchySeq_finset_iff_vanishing.1 hy.cauchySeq e he
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f : α → G
hf : Summable f
⊢ Tendsto f cofinite (𝓝 0)
[PROOFSTEP]
intro e he
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f : α → G
hf : Summable f
e : Set G
he : e ∈ 𝓝 0
⊢ e ∈ Filter.map f cofinite
[PROOFSTEP]
rw [Filter.mem_map]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f : α → G
hf : Summable f
e : Set G
he : e ∈ 𝓝 0
⊢ f ⁻¹' e ∈ cofinite
[PROOFSTEP]
rcases hf.vanishing he with ⟨s, hs⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f : α → G
hf : Summable f
e : Set G
he : e ∈ 𝓝 0
s : Finset α
hs : ∀ (t : Finset α), Disjoint t s → ∑ k in t, f k ∈ e
⊢ f ⁻¹' e ∈ cofinite
[PROOFSTEP]
refine' s.eventually_cofinite_nmem.mono fun x hx => _
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f : α → G
hf : Summable f
e : Set G
he : e ∈ 𝓝 0
s : Finset α
hs : ∀ (t : Finset α), Disjoint t s → ∑ k in t, f k ∈ e
x : α
hx : ¬x ∈ s
⊢ f x ∈ e
[PROOFSTEP]
simpa using hs { x } (disjoint_singleton_left.2 hx)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f✝ : α → G
f : ℕ → G
hf : Summable f
⊢ Tendsto f atTop (𝓝 0)
[PROOFSTEP]
rw [← Nat.cofinite_eq_atTop]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝² : TopologicalSpace G
inst✝¹ : AddCommGroup G
inst✝ : TopologicalAddGroup G
f✝ : α → G
f : ℕ → G
hf : Summable f
⊢ Tendsto f cofinite (𝓝 0)
[PROOFSTEP]
exact hf.tendsto_cofinite_zero
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
G : Type u_5
inst✝⁴ : TopologicalSpace G
inst✝³ : AddCommGroup G
inst✝² : TopologicalAddGroup G
f : α → G
inst✝¹ : TopologicalSpace.FirstCountableTopology G
inst✝ : T1Space G
hf : Summable f
⊢ Set.Countable (support f)
[PROOFSTEP]
simpa only [sInter_sets_nhds] using hf.tendsto_cofinite_zero.countable_compl_preimage_sInter_sets
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
inst✝¹ : AddCommMonoid γ
inst✝ : TopologicalSpace γ
f : β → α
g : β → γ
a : α
b : γ
hf : HasSum f a
hg : HasSum g b
⊢ HasSum (fun x => (f x, g x)) (a, b)
[PROOFSTEP]
simp [HasSum, ← prod_mk_sum, Filter.Tendsto.prod_mk_nhds hf hg]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ι : Type u_5
π : α → Type u_6
inst✝¹ : (x : α) → AddCommMonoid (π x)
inst✝ : (x : α) → TopologicalSpace (π x)
f : ι → (x : α) → π x
g : (x : α) → π x
⊢ HasSum f g ↔ ∀ (x : α), HasSum (fun i => f i x) (g x)
[PROOFSTEP]
simp only [HasSum, tendsto_pi_nhds, sum_apply]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
ι : Type u_5
π : α → Type u_6
inst✝¹ : (x : α) → AddCommMonoid (π x)
inst✝ : (x : α) → TopologicalSpace (π x)
f : ι → (x : α) → π x
⊢ Summable f ↔ ∀ (x : α), Summable fun i => f i x
[PROOFSTEP]
simp only [Summable, Pi.hasSum, skolem]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f : β → α
a : α
inst✝ : T2Space α
⊢ ∑' (x : β), op (f x) = op (∑' (x : β), f x)
[PROOFSTEP]
by_cases h : Summable f
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f : β → α
a : α
inst✝ : T2Space α
h : Summable f
⊢ ∑' (x : β), op (f x) = op (∑' (x : β), f x)
[PROOFSTEP]
exact h.hasSum.op.tsum_eq
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f : β → α
a : α
inst✝ : T2Space α
h : ¬Summable f
⊢ ∑' (x : β), op (f x) = op (∑' (x : β), f x)
[PROOFSTEP]
have ho := summable_op.not.mpr h
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝² : AddCommMonoid α
inst✝¹ : TopologicalSpace α
f : β → α
a : α
inst✝ : T2Space α
h : ¬Summable f
ho : ¬Summable fun a => op (f a)
⊢ ∑' (x : β), op (f x) = op (∑' (x : β), f x)
[PROOFSTEP]
rw [tsum_eq_zero_of_not_summable h, tsum_eq_zero_of_not_summable ho, MulOpposite.op_zero]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
inst✝¹ : StarAddMonoid α
inst✝ : ContinuousStar α
f : β → α
a : α
h : HasSum f a
⊢ HasSum (fun b => Star.star (f b)) (Star.star a)
[PROOFSTEP]
simpa only using h.map (starAddEquiv : α ≃+ α) continuous_star
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝³ : AddCommMonoid α
inst✝² : TopologicalSpace α
inst✝¹ : StarAddMonoid α
inst✝ : ContinuousStar α
f : β → α
a : α
hf : Summable fun b => Star.star (f b)
⊢ Summable f
[PROOFSTEP]
simpa only [star_star] using hf.star
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
inst✝² : StarAddMonoid α
inst✝¹ : ContinuousStar α
f : β → α
a : α
inst✝ : T2Space α
⊢ star (∑' (b : β), f b) = ∑' (b : β), star (f b)
[PROOFSTEP]
by_cases hf : Summable f
[GOAL]
case pos
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
inst✝² : StarAddMonoid α
inst✝¹ : ContinuousStar α
f : β → α
a : α
inst✝ : T2Space α
hf : Summable f
⊢ star (∑' (b : β), f b) = ∑' (b : β), star (f b)
[PROOFSTEP]
exact hf.hasSum.star.tsum_eq.symm
[GOAL]
case neg
α : Type u_1
β : Type u_2
γ : Type u_3
δ : Type u_4
inst✝⁴ : AddCommMonoid α
inst✝³ : TopologicalSpace α
inst✝² : StarAddMonoid α
inst✝¹ : ContinuousStar α
f : β → α
a : α
inst✝ : T2Space α
hf : ¬Summable f
⊢ star (∑' (b : β), f b) = ∑' (b : β), star (f b)
[PROOFSTEP]
rw [tsum_eq_zero_of_not_summable hf, tsum_eq_zero_of_not_summable (mt Summable.ofStar hf), star_zero]
|
{"mathlib_filename": "Mathlib.Topology.Algebra.InfiniteSum.Basic", "llama_tokens": 75911}
|
import os
import json
import xml.etree.ElementTree as ET
from PIL import Image
from collections import defaultdict
import torch
import numpy as np
import pycocotools.mask as mask_util
from torchvision import transforms
from .generalized_dataset import GeneralizedDataset
VOC_CLASSES = (
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
)
def target_to_coco_ann(target):
image_id = target["image_id"].item()
boxes = target["boxes"]
masks = target["masks"]
labels = target["labels"].tolist()
xmin, ymin, xmax, ymax = boxes.unbind(1)
boxes = torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
area = boxes[:, 2] * boxes[:, 3]
area = area.tolist()
boxes = boxes.tolist()
rles = [
mask_util.encode(np.array(mask[:, :, None], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
anns = []
for i, rle in enumerate(rles):
anns.append(
{
"image_id": image_id,
"id": i,
"category_id": labels[i],
"segmentation": rle,
"bbox": boxes[i],
"area": area[i],
"iscrowd": 0,
}
)
return anns
class VOCDataset(GeneralizedDataset):
# download VOC 2012: http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
def __init__(self, data_dir, split, train=False):
super().__init__()
self.data_dir = data_dir
self.split = split
self.train = train
# instances segmentation task
id_file = os.path.join(data_dir, "ImageSets/Segmentation/{}.txt".format(split))
self.ids = [id_.strip() for id_ in open(id_file)]
self.id_compare_fn = lambda x: int(x.replace("_", ""))
self.ann_file = os.path.join(
data_dir, "Annotations/instances_{}.json".format(split)
)
self._coco = None
self.classes = VOC_CLASSES
# resutls' labels convert to annotation labels
self.ann_labels = {self.classes.index(n): i for i, n in enumerate(self.classes)}
checked_id_file = os.path.join(
os.path.dirname(id_file), "checked_{}.txt".format(split)
)
if train:
if not os.path.exists(checked_id_file):
self.make_aspect_ratios()
self.check_dataset(checked_id_file)
def make_aspect_ratios(self):
self._aspect_ratios = []
for img_id in self.ids:
anno = ET.parse(
os.path.join(self.data_dir, "Annotations", "{}.xml".format(img_id))
)
size = anno.findall("size")[0]
width = size.find("width").text
height = size.find("height").text
ar = int(width) / int(height)
self._aspect_ratios.append(ar)
def get_image(self, img_id):
image = Image.open(
os.path.join(self.data_dir, "JPEGImages/{}.jpg".format(img_id))
)
return image.convert("RGB")
def get_target(self, img_id):
masks = Image.open(
os.path.join(self.data_dir, "SegmentationObject/{}.png".format(img_id))
)
masks = transforms.ToTensor()(masks)
uni = masks.unique()
uni = uni[(uni > 0) & (uni < 1)]
masks = (masks == uni.reshape(-1, 1, 1)).to(torch.uint8)
anno = ET.parse(
os.path.join(self.data_dir, "Annotations", "{}.xml".format(img_id))
)
boxes = []
labels = []
for obj in anno.findall("object"):
bndbox = obj.find("bndbox")
bbox = [
int(bndbox.find(tag).text) for tag in ["xmin", "ymin", "xmax", "ymax"]
]
name = obj.find("name").text
label = self.classes.index(name)
boxes.append(bbox)
labels.append(label)
boxes = torch.tensor(boxes, dtype=torch.float32)
labels = torch.tensor(labels)
img_id = torch.tensor([self.ids.index(img_id)])
target = dict(image_id=img_id, boxes=boxes, labels=labels, masks=masks)
return target
@property
def coco(self):
if self._coco is None:
from pycocotools.coco import COCO
self.convert_to_coco_format()
self._coco = COCO(self.ann_file)
return self._coco
def convert_to_coco_format(self, overwrite=False):
if overwrite or not os.path.exists(self.ann_file):
print("Generating COCO-style annotations...")
voc_dataset = VOCDataset(self.data_dir, self.split, True)
instances = defaultdict(list)
instances["categories"] = [
{"id": i, "name": n} for i, n in enumerate(voc_dataset.classes)
]
ann_id_start = 0
for image, target in voc_dataset:
image_id = target["image_id"].item()
filename = voc_dataset.ids[image_id] + ".jpg"
h, w = image.shape[-2:]
img = {"id": image_id, "file_name": filename, "height": h, "width": w}
instances["images"].append(img)
anns = target_to_coco_ann(target)
for ann in anns:
ann["id"] += ann_id_start
instances["annotations"].append(ann)
ann_id_start += len(anns)
json.dump(instances, open(self.ann_file, "w"))
print("Created successfully: {}".format(self.ann_file))
|
{"hexsha": "816154425db7b762d5213474d772bd601b9a696f", "size": 5772, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch_mask_rcnn/datasets/voc_dataset.py", "max_stars_repo_name": "JinchengHeRyan/STATS402_Final_MaskRcnn", "max_stars_repo_head_hexsha": "c8103751008afb2f969c7e321a7e843a50c0f681", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pytorch_mask_rcnn/datasets/voc_dataset.py", "max_issues_repo_name": "JinchengHeRyan/STATS402_Final_MaskRcnn", "max_issues_repo_head_hexsha": "c8103751008afb2f969c7e321a7e843a50c0f681", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytorch_mask_rcnn/datasets/voc_dataset.py", "max_forks_repo_name": "JinchengHeRyan/STATS402_Final_MaskRcnn", "max_forks_repo_head_hexsha": "c8103751008afb2f969c7e321a7e843a50c0f681", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5396825397, "max_line_length": 99, "alphanum_fraction": 0.5538808039, "include": true, "reason": "import numpy", "num_tokens": 1407}
|
import keras
import numpy as np
import AxonDeepSeg.ads_utils as ads
from scipy import ndimage
from skimage import exposure
import AxonDeepSeg.ads_utils
from AxonDeepSeg.ads_utils import convert_path
class DataGen(keras.utils.Sequence):
"""Generates data for Keras"""
def __init__(
self,
ids,
path,
augmentations,
batch_size=8,
image_size=512,
thresh_indices=[0, 0.2, 0.8],
):
"""
Initalization for the DataGen class
:param ids: List of strings, ids of all the images/masks in the training set.
:param batch_size: Int, the batch size used for training.
:param image_size: Int, input image size.
:param image_size: Int, input image size.
:param augmentations: Compose object, a set of data augmentation operations to be applied.
:return: the original image, a list of patches, and their positions.
"""
# If string, convert to Path object
path = convert_path(path)
self.ids = ids
self.path = path
self.batch_size = batch_size
self.image_size = image_size
self.on_epoch_end()
self.thresh_indices = thresh_indices
self.augment = augmentations
def __load__(self, id_name):
"""
Loads images and masks
:param ids_name: String, id name of a particular image/mask.
"""
## Path
image_path = self.path / ("image_" + id_name + ".png")
mask_path = self.path / ("mask_" + id_name + ".png")
## Reading Image
image = ads.imread(str(image_path))
image = np.reshape(image, (self.image_size, self.image_size, 1))
# -----Mask PreProcessing --------
mask = ads.imread(str(mask_path))
mask = descritize_mask(mask, self.thresh_indices)
# ---------------------------
return (image, mask)
def __getitem__(self, index):
"""
Generates a batch of images/masks
:param ids_name: String, id name of a particular image/mask..
"""
files_batch = self.ids[
index * self.batch_size : (index + 1) * self.batch_size
]
image = []
mask = []
for id_name in files_batch:
_img, _mask = self.__load__(id_name)
image.append(_img)
mask.append(_mask)
images = np.array(image)
masks = np.array(mask)
image_aug = []
mask_aug = []
for x, y in zip(images, masks):
aug = self.augment(image=x, mask=y)
image_aug.append(aug["image"])
mask_aug.append(aug["mask"])
image_aug = np.array(image_aug)
mask_aug = np.array(mask_aug)
return (image_aug, mask_aug)
def on_epoch_end(self):
pass
def __len__(self):
return int(np.ceil(len(self.ids) / float(self.batch_size)))
def labellize_mask_2d(patch, thresh_indices=[0, 0.2, 0.8]):
"""
Process a patch with 8 bit pixels ([0-255]) so that the pixels between two threshold values are set to the closest threshold, effectively
enabling the creation of a mask with as many different values as there are thresholds.
Returns mask in [0-1] domain
"""
mask = np.zeros_like(patch)
for indice in range(len(thresh_indices) - 1):
thresh_inf_8bit = 255 * thresh_indices[indice]
thresh_sup_8bit = 255 * thresh_indices[indice + 1]
idx = np.where(
(patch >= thresh_inf_8bit) & (patch < thresh_sup_8bit)
) # returns (x, y) of the corresponding indices
mask[idx] = np.mean([thresh_inf_8bit / 255, thresh_sup_8bit / 255])
mask[(patch >= 255 * thresh_indices[-1])] = 1
return patch
def descritize_mask(mask, thresh_indices):
"""
Process a mask with 8 bit pixels ([0-255]) such that it get discretizes into 3 different channels ( background, myelin, axon) .
Returns mask composed of 3 different channels ( background, myelin, axon )
"""
# Discretization of the mask
mask = labellize_mask_2d(
mask, thresh_indices
) # mask intensity float between 0-1
# Working out the real mask (sparse cube with n depth layer, one for each class)
n = len(thresh_indices) # number of classes
thresh_indices = [255 * x for x in thresh_indices]
real_mask = np.zeros([mask.shape[0], mask.shape[1], n])
for class_ in range(n - 1):
real_mask[:, :, class_] = (mask[:, :] >= thresh_indices[class_]) * (
mask[:, :] < thresh_indices[class_ + 1]
)
real_mask[:, :, -1] = mask[:, :] >= thresh_indices[-1]
real_mask = real_mask.astype(np.uint8)
return real_mask
|
{"hexsha": "913375dc88d7d8c51ea4bdd6cf418d804d1798fc", "size": 4728, "ext": "py", "lang": "Python", "max_stars_repo_path": "AxonDeepSeg/data_management/input_data.py", "max_stars_repo_name": "mariehbourget/axondeepseg", "max_stars_repo_head_hexsha": "23c3f7355f85b9800e16acca980be1f5cae79b21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 115, "max_stars_repo_stars_event_min_datetime": "2017-11-08T02:24:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T19:03:57.000Z", "max_issues_repo_path": "AxonDeepSeg/data_management/input_data.py", "max_issues_repo_name": "vs74/axondeepseg", "max_issues_repo_head_hexsha": "9d54521e61b5e856f60ee57749feb36766f29c92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 511, "max_issues_repo_issues_event_min_datetime": "2017-12-05T15:23:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-22T19:38:43.000Z", "max_forks_repo_path": "AxonDeepSeg/data_management/input_data.py", "max_forks_repo_name": "vs74/axondeepseg", "max_forks_repo_head_hexsha": "9d54521e61b5e856f60ee57749feb36766f29c92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2017-11-30T13:36:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-10T18:11:06.000Z", "avg_line_length": 31.9459459459, "max_line_length": 141, "alphanum_fraction": 0.60321489, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1160}
|
/**
* \file dcs/testbed/constant_signal_generator.hpp
*
* \brief Generates constant signals.
*
* \author Marco Guazzone (marco.guazzone@gmail.com)
*
* <hr/>
*
* Copyright 2012 Marco Guazzone (marco.guazzone@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef DCS_TESTBED_CONSTANT_SIGNAL_GENERATOR_HPP
#define DCS_TESTBED_CONSTANT_SIGNAL_GENERATOR_HPP
#include <algorithm>
#include <boost/bind.hpp>
#include <dcs/assert.hpp>
#include <dcs/exception.hpp>
#include <dcs/testbed/base_signal_generator.hpp>
#include <functional>
#include <stdexcept>
namespace dcs { namespace testbed {
template <typename ValueT>
class constant_signal_generator: public base_signal_generator<ValueT>
{
private: typedef base_signal_generator<ValueT> base_type;
public: typedef ValueT value_type;
public: typedef typename base_type::vector_type vector_type;
public: constant_signal_generator(vector_type const& u0)
: u_(u0)
{
}
private: vector_type do_generate()
{
return u_;
}
private: void do_reset()
{
// do nothing: the signal is constant.
}
private: void do_upper_bound(value_type val)
{
// pre: for each ui in u_ : ui <= val
DCS_ASSERT(::std::count_if(u_.begin(), u_.end(), ::boost::bind(::std::greater<value_type>(), ::_1, val)) == 0,
DCS_EXCEPTION_THROW(::std::invalid_argument,
"Invalid upper bound: some signal value is bigger"));
}
private: void do_lower_bound(value_type val)
{
// pre: for each ui in u_ : ui >= val
DCS_ASSERT(::std::count_if(u_.begin(), u_.end(), ::boost::bind(::std::less<value_type>(), ::_1, val)) == 0,
DCS_EXCEPTION_THROW(::std::invalid_argument,
"Invalid lower bound: some signal value is smaller"));
}
private: vector_type u_;
};
}} // Namespace dcs::testbed
#endif // DCS_TESTBED_CONSTANT_SIGNAL_GENERATOR_HPP
|
{"hexsha": "e50b6e7e6504fed6a6e4d737f7605124bc6b4e87", "size": 2348, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "inc/dcs/testbed/constant_signal_generator.hpp", "max_stars_repo_name": "sguazt/dcsxx-testbed", "max_stars_repo_head_hexsha": "e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inc/dcs/testbed/constant_signal_generator.hpp", "max_issues_repo_name": "sguazt/dcsxx-testbed", "max_issues_repo_head_hexsha": "e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inc/dcs/testbed/constant_signal_generator.hpp", "max_forks_repo_name": "sguazt/dcsxx-testbed", "max_forks_repo_head_hexsha": "e7210f0c7f54256d5bf0c90297e0c4f9eaf82da0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9885057471, "max_line_length": 112, "alphanum_fraction": 0.7214650767, "num_tokens": 584}
|
# coding: utf-8
def load_pickle(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
## time
def aexp2zred(aexp):
return [1.0/a - 1.0 for a in aexp]
def zred2aexp(zred):
return [1.0/(1.0 + z) for z in zred]
def lbt2aexp(lts):
import astropy.units as u
from astropy.cosmology import WMAP7, z_at_value
zreds = [z_at_value(WMAP7.lookback_time, ll * u.Gyr) for ll in lts]
return [1.0/(1+z) for z in zreds]
def density_map(x, y, sort=True):
from scipy.stats import gaussian_kde
xy = np.vstack([x,y])
z = gaussian_kde(xy)(xy)
z /= max(z)
idx = z.argsort()
xx, yy = x[idx], y[idx]
z = z[idx]
return xx, yy, z
def sigma_clip_ind(c, high, low):
"""
returns indices of sigma-clipping-safe elements.
"""
import numpy as np
ind = (np.mean(c) - np.std(c)*low < c) * (c < np.mean(c) + np.std(c)*high)
return ind
def mask_outlier(y, low=1.5, high=1.5):
"""
maks outlier assuming monotonic trend.
"""
x = np.arange(len(y))
# linear fitting .. more desirably, a very strong smoothing scheme that can reconstrcut mild curve.
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x,y)
# extract linear fit
yy = y - (slope * x + intercept)
# sigma clipped value = mean of the rest
i_good = sigma_clip_ind(yy, low, high)
yy[~i_good] = np.mean(yy[i_good])
# add linear fit again
return yy + (slope * x + intercept)
def smooth(x, beta=5, window_len=20, monotonic=False):
"""
kaiser window smoothing
beta = 5 : Similar to a Hamming
"""
if monotonic:
"""
if there is an overall slope, smoothing may result in offset.
compensate for that.
"""
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y=np.arange(len(x)))
xx = np.arange(len(x)) * slope + intercept
x = x - xx
# extending the data at beginning and at the end
# to apply the window at the borders
s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
w = np.kaiser(window_len,beta)
y = np.convolve(w/w.sum(), s, mode='valid')
if monotonic:
return y[int(window_len)/2:len(y)-int(window_len/2) + 1] + xx#[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
else:
return y[int(window_len)/2:len(y)-int(window_len/2) + 1]
#return y[5:len(y)-5]
class MainPrg():
import tree.ctutils as ctu
import numpy as np
def __init__(self, treedata, final_gal, nout_ini=None, nout_fi=None):
temp_tree = ctu.extract_main_tree(treedata, final_gal)
if nout_ini == None:
nout_ini = min(temp_tree['nout'])
if nout_fi == None:
nout_fi = max(temp_tree['nout'])
self.nouts = np.arange(nout_fi, nout_ini -1, -1)
self.idxs = temp_tree['id'] # nout_ini, nout_fi consideration needed.
self.ids = temp_tree['Orig_halo_id']
self.data = None
def set_data(self, cat, nout):
"""
compile data from catalogs.
"""
if nout in self.nouts:
# Declare self.data first if there isn't.
if self.data == None:
self.data = np.zeros(len(self.nouts), dtype=cat.dtype)
inow = self.nouts == nout
a = np.where(cat['idx'] == self.idxs[inow])[0]
if len(a) > 0:
self.data[inow] = cat[a]
else:
pass
#print(self.ids[inow],cat['id'])
else:
pass
#print("No {} in the catalog".format(nout))
def clip_non_detection(self):
# end of galaxy tree = last non-zero position.
# Note that 'id' can be 0 if phantom. But phantom is a valid datapoint
i_first_nout = max(np.where(self.data['idx'] > 0)[0])
#print('i_first', i_first_nout)
# then, only [0-i_first_nout] are valid.
# earlier then 187 - 91-th are zero. so get rid of them.
self.data = self.data[:i_first_nout].copy()
self.nouts = self.nouts[:i_first_nout].copy()
self.ids = self.ids[:i_first_nout].copy()
self.idxs = self.idxs[:i_first_nout].copy()
def fill_missing_data(self):
assert (self.ids[-1] != 0)
# loop over all fields except id, index, and non-physical entries.
i_bad = np.where(self.data['idx'] == 0)[0]
for field in self.data.dtype.names:
# do not modify index and id fields.
if field in ["index", "id", "idx"]:
continue
arr = self.data[field] # it's a view.
for i_b in i_bad:
# neighbouring array might also be empty. Search for closest valid element.
# left point
i_l = i_b - 1
while(i_l in i_bad):
i_l = i_l - 1
# right point
i_r = i_b + 1
while(i_r in i_bad):
i_r = i_r + 1
arr[i_b] = (arr[i_b -1] + arr[i_b +1])/2.
# In[2]:
def fixed_ind_Lr(gal):
nnouts = len(gal.nouts)
ind_reff_fix = np.zeros(nnouts, dtype='i4')
#print(gal.data['rgal'])
smooth_r = smooth(mask_outlier(gal.data['rgal'], 1.5, 1.5), 50, monotonic=False)
# fixed Reff array
for i in range(nnouts):
# 1Reff = 5 points
reff_real = smooth_r[i]
reff = gal.data['rgal'][i]
try:
ind_reff_fix[i] = np.round(reff_real/reff * 5) -1
except:
pass
return ind_reff_fix
def smoothed_reff(cat, nout_merger):
"""
returns "representative" lambda at each nout by assuming monotonic change in Reff.
During merger, Reff can fluctuate, and if has no physical meaning to infer Labda at Reff during merger stage.
So Reff' is derived by linear interpolating Reffs before and after the merger.
cat is one galaxy catalog over time.
"""
import utils.match as mtc
i_merger = np.where(cat['nout'] == nout_merger)[0]
ind_lower = 20
ind_upper = 20
reffs = cat['rgal']
# left and right values chosen by sigma-clipping
r_lefts, b, c = scipy.stats.sigmaclip(reffs[max([0,i_merger-ind_lower]):i_merger], sig_lower, sig_upper)
#print(r_lefts)
r_left = r_lefts[-1]
i_left = np.where(reffs == r_left)[0]
r_rights, b,c = scipy.stats.sigmaclip(reffs[i_merger:min([i_merger+ind_upper,len(reffs)])], sig_lower, sig_upper)
r_right = r_rights[0]
i_right = np.where(reffs == r_right)[0]
r_prime = reffs
#print("chekc")
#print(r_prime)
r_prime[i_left : i_right + 1] = np.linspace(r_left, r_right, i_right - i_left + 1)
return r_prime
# In[3]:
import numpy as np
import scipy.stats
import tree.ctutils as ctu
import matplotlib.pyplot as plt
# Read a single galaxy evolution catalog.
import pickle
# In[4]:
clusters = ['10002', '04466', '17891', '36415', '35663', '06098', '07206',\
'49096', '39990', '36413', '01605', '05427'][:]
# parameters used for lambda_arr clipping.
ind_upper = 20
ind_lower = 20
sig_upper = 2.0
sig_lower = 2.0
nout_ini = 70
nout_fi = 187
bad = 0
# In[ ]:
base = '/data1/good/'
cdir = ['catalog/', 'easy/', 'catalog_GM/'][1]
verbose=True
ngals_tot = 0
for cluster in clusters:
wdir = base + cluster + '/'
# main galaxy list
cat = pickle.load(open(wdir + cdir + 'catalog' + str(nout_fi) + '.pickle', 'rb'))
ngals_tot = ngals_tot + len(cat['idx'])
nnouts = nout_fi - nout_ini + 1
mpgs = []
for cluster in clusters:
print(cluster)
wdir = base + cluster + '/'
# Serialize catalogs. -> Only main galaxies
# main galaxy list
alltrees = ctu.load_tree(wdir, is_gal=True)
ad = alltrees.data
tn = ad[ad['nout'] == nout_fi]
cat = pickle.load(open(wdir + cdir + 'catalog' + str(nout_fi) + '.pickle', 'rb'))
#idx_all = [tn['id'][tn['Orig_halo_id'] == id_final][0] for id_final in cat['id']]
idx_all = cat['idx']
mpg_tmp = []
for i, idx in enumerate(idx_all):
mpg_tmp.append(MainPrg(ad, idx))
print(i, idx)
# mpg_tmp =[MainPrg(ad, idx) for idx in idx_all]
for nout in range(nout_ini, nout_fi + 1):
cat = pickle.load(open(wdir + cdir + 'catalog' + str(nout) + '.pickle', 'rb'))
for gal in mpg_tmp:
gal.set_data(cat, nout)
print(nout)
while len(mpg_tmp) > 0:
mpgs.append(mpg_tmp.pop())
with open('main_prgs_GM.pickle', 'wb') as f:
pickle.dump(mpgs, f)
|
{"hexsha": "6ccafc0da1910ad8584e619f6b3913eb1e54324d", "size": 8611, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyram/analysis/save_mpgs3.py", "max_stars_repo_name": "Hoseung/pyRamAn", "max_stars_repo_head_hexsha": "f9386fa5a9f045f98590039988d3cd50bc488dc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-25T16:11:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T16:11:56.000Z", "max_issues_repo_path": "pyram/analysis/save_mpgs3.py", "max_issues_repo_name": "Hoseung/pyRamAn", "max_issues_repo_head_hexsha": "f9386fa5a9f045f98590039988d3cd50bc488dc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-02-17T13:44:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-25T15:35:05.000Z", "max_forks_repo_path": "pyram/analysis/save_mpgs3.py", "max_forks_repo_name": "Hoseung/pyRamAn", "max_forks_repo_head_hexsha": "f9386fa5a9f045f98590039988d3cd50bc488dc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-25T16:11:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-25T16:11:56.000Z", "avg_line_length": 29.3890784983, "max_line_length": 117, "alphanum_fraction": 0.5804203925, "include": true, "reason": "import numpy,import scipy,from scipy,import astropy,from astropy", "num_tokens": 2541}
|
from __future__ import division
from psychopy.visual import TextStim, Window
from psychopy import core, event, gui, data, logging
import numpy as np
import pandas as pd
import os
from routines import Routine
# Code for the choice titration experiment of Weber and Chapman (2005) https://doi.org/10.1016/j.obhdp.2005.01.001
#general settings
expName = 'example_2'
screen_size = [800, 600]
frames_per_second = 60
full_screen = False
background_color = '#bfbfbf'
# trial settings
choice_keys = ['a', 'l']
escape_key = 'escape'
fixation_duration = .5
#stimuli settings
text_color = 'black'
options_x_offset = 200
text_height = 20
#store info about the experiment session
dlg = gui.Dlg(title=expName)
dlg.addField('Participant:', 1)
dlg.addField('Age:', 25)
dlg.addField('Gender:', choices=['female', 'male', 'prefer not to disclose'])
dlg.addField('Handedness:', choices=['right', 'left', 'both'])
dlg.show()
expInfo = dict(zip(['participant', 'age', 'gender', 'hand'], dlg.data))
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName # add the experiment name
if dlg.OK: # then the user pressed OK
print(expInfo)
else:
print(expInfo)
core.quit()
#check if data folder exists
directory=os.path.join(os.getcwd(), 'data')
if not os.path.exists(directory):
os.makedirs(directory)
#create file name for storing data
fileName = os.path.join('data', '%s_%s_%s' % (expName, expInfo['participant'], expInfo['date']))
#save a log file
logFile = logging.LogFile(fileName + '.log', level=logging.DEBUG)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
#create a window
mywin = Window(screen_size, units='pix', color=background_color, fullscr=full_screen)
#create some stimuli
safe_gamble = TextStim(win=mywin, color=text_color, pos=(-options_x_offset, 0), height=text_height)
risky_gamble = TextStim(win=mywin, text="10% chance of CHF 3000", color=text_color, pos=(options_x_offset, 0), height=text_height)
n_trials = 50 # max number of trials...
value_current = 1500
value_upper = 3000
value_lower = 0
#create the dataframe
data = pd.DataFrame([])
#draw the stimuli
trial_routine = Routine(window=mywin, frames_per_second=frames_per_second, escape_key=escape_key)
for t in range(n_trials):
# put here things that change at the beginning of every trial
value_current = int(np.round(np.mean([value_upper, value_lower])))
safe_gamble.text = "100% chance of CHF {}".format(value_current)
# first event
trial_routine.wait_for_time_limit(
components=[],
time_seconds=fixation_duration,
label='fixation_cross')
# second event
key, rt = trial_routine.wait_for_keys(
components=[safe_gamble, risky_gamble],
valid_keys=choice_keys,
label='gamble_choice')
data = data.append(
{'rt':rt, 'choice': key, 'trial': t, 'current': value_current, 'upper': value_upper, 'lower': value_lower, 'difference':value_upper-value_lower},
ignore_index=True) # record the responses
#save data to file
for label in expInfo.keys():
data[label] = expInfo[label]
data.to_csv(fileName + '.csv')
# put here things that change at the end of every trial
if key == choice_keys[0]:
value_upper = value_current
elif key == choice_keys[1]:
value_lower = value_current
# stop experiment when:
if (value_upper-value_lower) < 25:
break
#cleanup
mywin.close()
core.quit()
|
{"hexsha": "2c85e8a230e5861025c4e5d171799a8b00fda215", "size": 3495, "ext": "py", "lang": "Python", "max_stars_repo_path": "example_2.py", "max_stars_repo_name": "laurafontanesi/psych-routines", "max_stars_repo_head_hexsha": "fe3a1ff055fd5b32bc1ca666a2d86ee19b3a2f49", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example_2.py", "max_issues_repo_name": "laurafontanesi/psych-routines", "max_issues_repo_head_hexsha": "fe3a1ff055fd5b32bc1ca666a2d86ee19b3a2f49", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example_2.py", "max_forks_repo_name": "laurafontanesi/psych-routines", "max_forks_repo_head_hexsha": "fe3a1ff055fd5b32bc1ca666a2d86ee19b3a2f49", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6578947368, "max_line_length": 154, "alphanum_fraction": 0.7104434907, "include": true, "reason": "import numpy", "num_tokens": 905}
|
#include "rotation.h"
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <iostream>
namespace cpt {
Eigen::Matrix3f get_angle_axis_rotation_matrix(const Eigen::Vector3f& angleAxis) {
return Eigen::AngleAxisf(angleAxis.norm(), angleAxis.normalized()).toRotationMatrix();
}
Eigen::Matrix3f get_euler_xyz_rotation_matrix(const Eigen::Vector3f& xyz) {
return get_angle_axis_rotation_matrix(xyz(2) * Eigen::Vector3f::UnitZ()) *
get_angle_axis_rotation_matrix(xyz(1) * Eigen::Vector3f::UnitY()) *
get_angle_axis_rotation_matrix(xyz(0) * Eigen::Vector3f::UnitX());
}
void decompose_scale_rotation(const Eigen::Matrix3f& rotScaleMat, Eigen::Matrix3f& rot, Eigen::Vector3f& scale)
{
scale = rotScaleMat.colwise().norm();
rot = rotScaleMat.array().rowwise() / scale.transpose().array();
if (rot.determinant() < 0.f) {
scale *= -1.f;
rot *= -1.f;
}
}
}
|
{"hexsha": "05314a00d4e969315f3c3bd9668fc77d9527f72b", "size": 913, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "lib/math/rotation.cpp", "max_stars_repo_name": "b3h47pte/cuda-path-tracing", "max_stars_repo_head_hexsha": "b874b86f15b4aca18ecd40e9eb962996298f5fa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/math/rotation.cpp", "max_issues_repo_name": "b3h47pte/cuda-path-tracing", "max_issues_repo_head_hexsha": "b874b86f15b4aca18ecd40e9eb962996298f5fa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/math/rotation.cpp", "max_forks_repo_name": "b3h47pte/cuda-path-tracing", "max_forks_repo_head_hexsha": "b874b86f15b4aca18ecd40e9eb962996298f5fa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.53125, "max_line_length": 111, "alphanum_fraction": 0.6976998905, "num_tokens": 246}
|
"""Script to calulate the value of pre-flop hands for n_players
Examples:
Call this file like so to get the help:
```bash
$ python monte_carlo_rank.py --help
Usage: monte_carlo_rank.py [OPTIONS]
Multithreaded monte carlo pre-flop hand equity calculation.
Over `n_threads` threads, rank the pre-flop hands according to which lead
to winning results. Run for a maximum of `n_iterations` iterations, this
should be a big number!
Options:
--n_threads INTEGER Number of threads.
--n_iterations INTEGER Number of iterations.
--save_path TEXT Save path.
--max_n_players INTEGER Maximum number of players in a game.
--n_ranks INTEGER Number of ranks in a deck of cards.
--print_n_steps INTEGER How many steps until print.
--help Show this message and exit.
```
Call this file like so to compute rankings of pre-flop hands:
```bash
python monte_carlo_rank.py --n_threads 4 --max_n_players 6
```
"""
import collections
import pickle
import queue
from typing import Dict, List
from threading import Thread
import click
import numpy as np
from tqdm import tqdm, trange
from pluribus.poker.card import Card
from pluribus.poker.deck import Deck
from pluribus.poker.evaluation import Evaluator
from pluribus.poker.evaluation import EvaluationCard
class PreflopMatrixEvaluator:
"""Generates poker scenarios and returns a matrix of results."""
def __init__(self, n_ranks=13):
"""Initialise class."""
self._deck = Deck()
self._evaluator = Evaluator()
self._n_ranks = n_ranks
def __call__(self, n_players: int) -> np.ndarray:
"""Get new delta matrix containing information about which player won."""
# Compute the
rank_to_hands = self._compute_preflop_rankings(n_players)
delta_matrix = self._compute_delta_matrix(rank_to_hands)
return delta_matrix
@property
def n_ranks(self) -> int:
"""Return the number of ranks in a deck of cards."""
return self._n_ranks
def _deal_cards(self, n_players: int):
"""Deal a hand for each player and a table of cards.
We never pop cards from the deck otherwise we'd have to reconstruct the deck
each iteration which would add the the computational expense.
"""
self._deck.shuffle()
hands = collections.defaultdict(list)
card_i = 0
for _ in range(2):
for player_i in range(n_players):
hands[player_i].append(self._deck[card_i])
card_i += 1
table = []
# "Burn" card.
card_i += 1
for _ in range(3):
table.append(self._deck[card_i])
card_i += 1
for _ in range(2):
# "Burn" card.
card_i += 1
table.append(self._deck[card_i])
return hands, table
def _to_eval_cards(self, cards: List[Card]) -> List[EvaluationCard]:
"""Convert cards representation to one suitable for deuces."""
return [card.eval_card for card in cards]
def _compute_preflop_rankings(self, n_players: int) -> Dict[int, List[List[Card]]]:
"""Generate random hands (one for each player) and a table and rank."""
# Get random hands and table.
hands, table = self._deal_cards(n_players)
eval_table = self._to_eval_cards(table)
# Get ranking of hands.
ranks_to_hands = collections.defaultdict(list)
for player_i in range(n_players):
eval_hand = self._to_eval_cards(hands[player_i])
rank = self._evaluator.evaluate(cards=eval_hand, board=eval_table)
ranks_to_hands[rank].append(hands[player_i])
# The maximum score is the number of unique ranks.
score = len(set(ranks_to_hands.keys()))
# Ensure the ranks go from `score`, ..., 3, 2, 1 as rank gets worse.
normalised_rank_to_hands = collections.defaultdict(list)
for rank in sorted(ranks_to_hands.keys()):
for hand in ranks_to_hands[rank]:
normalised_rank_to_hands[score].append(hand)
score -= 1
# Return a dictionary of hands. The keys are int, and relate to how well
# the hand(s) did. If the hands drew, they will share the same key. The
# bigger the key, the better the hand.
return dict(normalised_rank_to_hands)
def _compute_delta_matrix(self, rank_to_hands: Dict[int, List[List[Card]]]):
"""Write the results to a numpy matrix mirroed along the diagonal."""
delta_matrix = np.zeros(shape=(self.n_ranks, self.n_ranks))
for rank, hands in rank_to_hands.items():
for hand in hands:
# Get the range to go between 0 and 13.
rank_0 = hand[0].rank_int - 2
rank_1 = hand[1].rank_int - 2
# Write rank to zeroed matrix.
delta_matrix[rank_0, rank_1] = rank
delta_matrix[rank_1, rank_0] = rank
return delta_matrix
def delta_matrix_worker(
n_ranks: int,
min_n_players: int,
max_n_players: int,
sentinal_queue: queue.Queue,
delta_matrix_queue: queue.Queue,
):
"""Generate delta matrix and push to queue."""
evaluator = PreflopMatrixEvaluator(n_ranks=n_ranks)
while True:
try:
sentinal = sentinal_queue.get(block=False)
if sentinal == "terminate":
break
except queue.Empty:
# Handle empty queue here
pass
# Read from sentinal queue, if any sentinals then quit!
for n_players in range(min_n_players, max_n_players):
delta_matrix = evaluator(n_players=n_players)
delta_matrix_queue.put(dict(
n_players=n_players,
delta_matrix=delta_matrix
))
@click.command()
@click.option(
'--n_threads', default=1, help='Number of threads.'
)
@click.option(
'--n_iterations', default=10000000, help='Number of iterations.'
)
@click.option(
'--save_path', default="./results.pickle", help='Save path.'
)
@click.option(
'--max_n_players', default=6, help='Maximum number of players in a game.'
)
@click.option(
'--n_ranks', default=13, help='Number of ranks in a deck of cards.'
)
@click.option(
'--print_n_steps', default=10000, help='How many steps until print.'
)
def multithreaded_matrix_summation(
n_threads: int,
n_iterations: int,
save_path: str,
max_n_players: int = 6,
n_ranks: int = 13,
print_n_steps: int = 10000,
):
"""Multithreaded monte carlo pre-flop hand equity calculation.
Over `n_threads` threads, rank the pre-flop hands according to which
lead to winning results. Run for a maximum of `n_iterations` iterations,
this should be a big number!
"""
# How we communicate to our workers.
delta_matrix_queue = queue.Queue()
sentinal_queue = queue.Queue()
threads = []
min_n_players = 2
args = (n_ranks, min_n_players, max_n_players + 1, sentinal_queue, delta_matrix_queue)
for _ in range(n_threads):
thread = Thread(target=delta_matrix_worker, args=args)
threads.append(thread)
thread.start()
# Create a matrix of rankings for each hand for every number of
# players.
matrices = {
n_players: np.zeros(shape=(n_ranks, n_ranks))
for n_players in range(min_n_players, max_n_players + 1)
}
np.set_printoptions(precision=2)
try:
for i in trange(n_iterations):
result = delta_matrix_queue.get()
n_players = result["n_players"]
matrices[n_players] += result["delta_matrix"]
if i > 0 and i % print_n_steps == 0:
tqdm.write(f"\nStep {i} reached.")
for n_players, matrix in matrices.items():
tqdm.write(
f"> For {n_players} players, the normalised matrix "
f"looks like:\n{matrix / np.max(matrix)}"
)
except KeyboardInterrupt:
print(f"Control-c detected, quitting looping at iteration {i}.")
pass
# Save the rankings.
with open(save_path, 'wb') as f:
pickle.dump(matrices, f, pickle.HIGHEST_PROTOCOL)
print(f"Saved matrix to {save_path}")
for thread in threads:
sentinal_queue.put("terminate")
thread.join()
print("Program terminated.")
if __name__ == "__main__":
multithreaded_matrix_summation()
|
{"hexsha": "8fda9163a2c3393cc74bef909c9c6f1aacd09efd", "size": 8483, "ext": "py", "lang": "Python", "max_stars_repo_path": "research/preflop_hand_ranking/monte_carlo_rank.py", "max_stars_repo_name": "keithlee96/pluribus-poker-AI", "max_stars_repo_head_hexsha": "15e52fe73dd09570e782dd0e7b9069865eb5823d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 113, "max_stars_repo_stars_event_min_datetime": "2020-08-06T15:03:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T01:56:34.000Z", "max_issues_repo_path": "research/preflop_hand_ranking/monte_carlo_rank.py", "max_issues_repo_name": "jumbokun/pluribus-poker-AI", "max_issues_repo_head_hexsha": "15e52fe73dd09570e782dd0e7b9069865eb5823d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "research/preflop_hand_ranking/monte_carlo_rank.py", "max_forks_repo_name": "jumbokun/pluribus-poker-AI", "max_forks_repo_head_hexsha": "15e52fe73dd09570e782dd0e7b9069865eb5823d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 42, "max_forks_repo_forks_event_min_datetime": "2020-08-17T15:51:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T17:10:44.000Z", "avg_line_length": 35.4937238494, "max_line_length": 90, "alphanum_fraction": 0.6404573854, "include": true, "reason": "import numpy", "num_tokens": 2011}
|
# Elaine Laguerta (github: @elaguerta)
# LBNL GIG
# File created: 19 February 2021
# Create Circuit class to mirror a dss Circuit object
# used by Solution objects to solve powerflow
import numpy as np
import pandas as pd
from . bus_group import BusGroup
from . line_group import LineGroup
from . load_group import LoadGroup
from . capacitor_group import CapacitorGroup
from . transformer_group import TransformerGroup
from . voltage_regulator_group import VoltageRegulatorGroup
from . utils import parse_phase_matrix, set_zip_values_dss
class Circuit():
@classmethod
def _set_zip_values(cls, zip_V):
"""
sets zip values for the Circuit class
same method as Solution.set_zip_values, just private
param zip_V: List or nd.array with 7 values
[a_z_p, a_i_p, a_pq_p, a_z_q, a_i_q, a_pq_q, min voltage pu]
Note that zip values are set both on the Solution class and Circuit
class. Users should set zip values via the Solution class.
"""
cls.ZIP_V = np.asarray(zip_V)
cls.aZ_p, cls.aI_p, cls.aPQ_p = cls.ZIP_V[0:3]
cls.aZ_q, cls.aI_q, cls.aPQ_q = cls.ZIP_V[3:6]
cls.min_voltage_pu = cls.ZIP_V[6]
def __init__(self, dss, Sbase=10**6):
"""
initialize Circuit from a dss object
Note that the Solution class runs 'redirect' on the dss file
The Circuit does not call opendss functions directly
"""
set_zip_values_dss(dss, Circuit.ZIP_V)
self.Sbase = Sbase
#: The Circuit's BusGroup
self.buses = BusGroup(dss)
#: The Circuit's LineGroup
self.lines = LineGroup(dss, bus_group=self.buses)
#: The Circuit's LoadGroup
self.loads = LoadGroup(dss, bus_group=self.buses, zip_v=Circuit.ZIP_V)
#: The Circuit's CapacitorGroup
self.capacitors = CapacitorGroup(dss, bus_group=self.buses)
#: The Circuit's TransformerGroup
self.transformers = TransformerGroup(dss, bus_group=self.buses)
#: The Circuit's VoltageRegulatorGroup
self.voltage_regulators = VoltageRegulatorGroup(dss, bus_group=self.buses)
# the main line group needs to be aware of transformers and voltage
# regulators. It can be queried for transformer and voltage regulator
# indices and topology
self.lines.transformers = self.transformers
self.lines.voltage_regulators = self.voltage_regulators
#: A pointer to the OpenDSS object corresponding to this Circuit
self.dss = dss
self._orient = 'rows' # may be overwritten by Solution
def set_kW(self, load_name: str, kW: float):
"""
sets a new kW for the given Load.
Updates Load.spu, Load.ppu, Load.qpu, and Bus.sum_spu
"""
load = self.loads.get_element(load_name)
bus = self.buses.get_element(load.related_bus)
old_load_spu = load.spu
load._set_kW(kW)
new_load_spu = load.spu
bus._set_spu(old_load_spu, new_load_spu)
def set_kvar(self, load_name: str, kvar: float):
"""
sets a new kvar for the given Load.
Updates Load.spu, Load.ppu, Load.qpu, and Bus.sum_spu
"""
load = self.loads.get_element(load_name)
bus = self.buses.get_element(load.related_bus)
old_load_spu = load.spu
load._set_kvar(kvar)
new_load_spu = load.spu
bus._set_spu(old_load_spu, new_load_spu)
def get_tx_idx_matrix(self):
"""
n x 1 matrix of tx bus indices, for all Lines
Indexed as follows:
[0, len(Lines) - 1]: Lines
[len(Lines), len(Transformers)- 1]: Transformers
[len(Transformers), len(VoltageRegulators)- 1]: VoltageRegulators
"""
tx_buses = self.lines.get_bus_ids('tx')
try:
tx_buses += self.transformers.get_bus_ids('tx')
tx_buses += self.voltage_regulators.get_bus_ids('tx')
except AttributeError:
pass
return np.asarray([self.buses.get_idx(bus) for bus in tx_buses])
def get_rx_idx_matrix(self):
"""
n x 1 matrix of rx bus indices. Indexed by line index,
which is the same value as in opendss
"""
rx_buses = self.lines.get_bus_ids('rx')
try:
rx_buses += self.transformers.get_bus_ids('rx')
rx_buses += self.voltage_regulators.get_bus_ids('rx')
except AttributeError:
pass
return np.asarray([self.buses.get_idx(bus) for bus in rx_buses])
def _orient_switch(self, matrix):
if self._orient == 'rows':
return matrix
elif self._orient == 'cols':
return matrix.transpose()
def get_spu_matrix(self) -> np.ndarray:
"""
3 x n or n x 3 matrix of complex spu indexed by bus index
"""
spu_matrix = self.loads.get_spu_matrix()
return self._orient_switch(spu_matrix)
def get_cappu_matrix(self) -> np.ndarray:
"""
3 x n or n x 3 matrix of real cappu, columns indexed by bus index
"""
cappu_matrix = self.capacitors.get_cappu_matrix()
return self._orient_switch(cappu_matrix)
def get_aPQ_matrix(self) -> np.ndarray:
"""
3 x n or n x 3 matrix of all load.aPQ_p, aggregated by phase on bus,
columns indexed by bus
"""
matrix = self.loads._get_zip_val_matrix('aPQ_p')
return self._orient_switch(matrix)
def get_aI_matrix(self) -> np.ndarray:
"""
3 x n or n x 3matrix of all load.aPQ_p, aggregated by phase on bus,
columns indexed by bus
"""
matrix = self.loads._get_zip_val_matrix('aI_p')
return self._orient_switch(matrix)
def get_aZ_matrix(self) -> np.ndarray:
"""
3 x n or n x 3matrix of all load.aPQ_p, aggregated by phase on bus,
columns indexed by bus
"""
matrix = self.loads._get_zip_val_matrix('aZ_p')
return self._orient_switch(matrix)
def get_wpu_matrix(self) -> np.ndarray:
"""
3 x n or n x 3matrix of all real wpu, columns indexed by bus
Currently set to all zeros.
TODO: Implement logic to set this as needed.
"""
return self._orient_switch(np.zeros((self.buses.num_elements, 3),
dtype=float))
def get_total_lines(self):
""" returns number of Lines transformers, and voltage regulators * 2"""
total = self.lines.num_elements
try:
total += self.transformers.num_elements
except AttributeError:
pass
try:
total += self.voltage_regulators.num_elements
except AttributeError:
pass
return total
def get_nominal_bus_powers(self) -> pd.DataFrame:
""" 3 x n or n x 3 matrix of total nominal powers by bus"""
data = self.get_spu_matrix() - 1j * self.get_cappu_matrix()
data_type = complex
index = self.buses.all_names()
cols = ['A', 'B', 'C']
if self._orient == 'cols':
# force a deep copy swap to avoid pointer issues
temp = [_ for _ in cols]
cols = [_ for _ in index]
index = temp
return pd.DataFrame(data=data, index=index, columns=cols, dtype=data_type)
def _assign_to_buses(self, ckt_element_group):
"""
For all elements in the ckt_element_group, gives the bus
associated with CircuitElement.related_bus a pointer to the element
"""
for ele in ckt_element_group.get_elements():
bus = self.buses.get_element(ele.related_bus)
element_list_ptr = f'{ele.__class__.__name__}s'.lower()
try:
getattr(bus, element_list_ptr)
except(AttributeError):
setattr(bus, element_list_ptr, [])
getattr(bus, element_list_ptr).append(ele)
|
{"hexsha": "5d83041491983c7cebe106cba0a3c8173462fc7c", "size": 7955, "ext": "py", "lang": "Python", "max_stars_repo_path": "gigpower/src/gigpower/circuit.py", "max_stars_repo_name": "elaguerta/gigpower", "max_stars_repo_head_hexsha": "22e0a6152fa8d7a04f6067f3d500bfee042a98f9", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gigpower/src/gigpower/circuit.py", "max_issues_repo_name": "elaguerta/gigpower", "max_issues_repo_head_hexsha": "22e0a6152fa8d7a04f6067f3d500bfee042a98f9", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-06-15T16:50:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-15T16:53:23.000Z", "max_forks_repo_path": "gigpower/src/gigpower/circuit.py", "max_forks_repo_name": "msankur/gigpower", "max_forks_repo_head_hexsha": "22e0a6152fa8d7a04f6067f3d500bfee042a98f9", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5235849057, "max_line_length": 82, "alphanum_fraction": 0.6275298554, "include": true, "reason": "import numpy", "num_tokens": 1947}
|
#include <gtest/gtest.h>
#include <boost/math/quaternion.hpp>
#include "test_util.h"
#include "ApproachCube.hpp"
#include "SwarmieSensors.hpp"
#include "Tag.hpp"
class ApproachCubeTest : public testing::Test
{
protected:
SwarmieSensors sensors;
ApproachCube approach;
boost::math::quaternion<double> defaultOrientation;
ApproachCubeTest() : defaultOrientation(1.2, 1.2, 1.2, 2.1) {
approach.Update(sensors, SwarmieAction());
}
};
TEST_F(ApproachCubeTest, noCubeNoMovement)
{
EXPECT_FALSE(is_moving(approach.GetAction()));
}
TEST_F(ApproachCubeTest, nestTagOnlyNoMovement)
{
Tag t = tag_top_left(Tag::NEST_TAG_ID);
sensors.DetectedTag(t);
approach.Update(sensors, SwarmieAction());
for(int i = 0; i < 30 && !is_moving(approach.GetAction()); i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_FALSE(is_moving(approach.GetAction()));
}
TEST_F(ApproachCubeTest, nestTagAlignedNoMovement)
{
Tag t(Tag::NEST_TAG_ID, -0.023, -0.12, 0.5, defaultOrientation);
ASSERT_TRUE(fabs(t.Alignment()) < 0.01) << "Tag not aligned: " << t.Alignment() << std::endl;
sensors.DetectedTag(t);
approach.Update(sensors, SwarmieAction());
for(int i = 0; i < 30 && !is_moving(approach.GetAction()); i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_FALSE(is_moving(approach.GetAction()));
}
TEST_F(ApproachCubeTest, tagMisalignedNoMovement)
{
Tag t = tag_top_left(Tag::CUBE_TAG_ID);
sensors.DetectedTag(t);
approach.Update(sensors, SwarmieAction());
for(int i = 0; i < 30 && !is_moving(approach.GetAction()); i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_FALSE(is_moving(approach.GetAction()));
}
TEST_F(ApproachCubeTest, tagAlignedMovement)
{
Tag t(Tag::CUBE_TAG_ID, -0.022, -0.12, 0.4, defaultOrientation);
ASSERT_TRUE(fabs(t.Alignment()) < 0.01) << "Tag not aligned: " << t.Alignment() << std::endl;
sensors.DetectedTag(t);
approach.Update(sensors, SwarmieAction());
for(int i = 0; i < 30 && !is_moving(approach.GetAction()); i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_TRUE(is_moving(approach.GetAction()));
}
TEST_F(ApproachCubeTest, movementStopsWhenTagVanishes)
{
Tag t(Tag::CUBE_TAG_ID, -0.022, -0.12, 0.4, defaultOrientation);
sensors.DetectedTag(t);
approach.Update(sensors, SwarmieAction());
for(int i = 0; i < 30 && !is_moving(approach.GetAction()); i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_TRUE(is_moving(approach.GetAction()));
sensors.ClearDetections();
// Update many times to test that the integral term is no longer
// having an effect.
for(int i = 0; i < 1000; i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_FALSE(is_moving(approach.GetAction()));
}
// This is the actual position of cubes when this bug is triggered.
// Tag[0]{ alignment: -0.0582302 | position: (-0.0812302, -0.0454901, 0.426023) | Orientation: (0.954242,-0.0389091,0.160215,-0.24948)}n
// Tag[0]{ alignment: 0.00210978 | position: (-0.0208902, -0.0438522, 0.423104) | Orientation: (0.0378738,0.948888,0.2493,0.189805)}
TEST_F(ApproachCubeTest, approachWhenBackgroundTagMisalignedToLeft)
{
Tag aligned(Tag::CUBE_TAG_ID, -0.0208902, -0.0438522, 0.423104,
boost::math::quaternion<double>(0.0378738,0.948888,0.2493,0.189805));
Tag background(Tag::CUBE_TAG_ID, -0.0812302, -0.0454901, 0.426023,
boost::math::quaternion<double>(0.954242,-0.0389091,0.160215,-0.24948));
sensors.DetectedTag(background);
sensors.DetectedTag(aligned);
approach.Update(sensors, SwarmieAction());
for(int i = 0; i < 30 && !is_moving(approach.GetAction()); i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_TRUE(is_moving(approach.GetAction()));
sensors.ClearDetections();
approach.Update(sensors, SwarmieAction());
sensors.DetectedTag(aligned);
sensors.DetectedTag(background);
approach.Update(sensors, SwarmieAction());
for(int i = 0; i < 30 && !is_moving(approach.GetAction()); i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_TRUE(is_moving(approach.GetAction()));
}
TEST_F(ApproachCubeTest, approachWhenBackgroundTagMisalignedToRight)
{
Tag aligned(Tag::CUBE_TAG_ID, -0.0208902, -0.0438522, 0.423104,
boost::math::quaternion<double>(0.0378738,0.948888,0.2493,0.189805));
Tag background(Tag::CUBE_TAG_ID, 0.1012302, -0.0454901, 0.426023,
boost::math::quaternion<double>(0.954242,-0.0389091,0.160215,-0.24948));
sensors.DetectedTag(background);
sensors.DetectedTag(aligned);
approach.Update(sensors, SwarmieAction());
for(int i = 0; i < 30 && !is_moving(approach.GetAction()); i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_TRUE(is_moving(approach.GetAction()));
sensors.ClearDetections();
approach.Update(sensors, SwarmieAction());
sensors.DetectedTag(aligned);
sensors.DetectedTag(background);
approach.Update(sensors, SwarmieAction());
for(int i = 0; i < 30 && !is_moving(approach.GetAction()); i++) {
approach.Update(sensors, SwarmieAction());
}
EXPECT_TRUE(is_moving(approach.GetAction()));
}
|
{"hexsha": "64466cfa9e4e5adf145dc3647cd7e5cef44da987", "size": 5223, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/behaviours/test/approach_cube_test.cpp", "max_stars_repo_name": "BCLab-UNM/SwarmBaseCode-Modular-Public", "max_stars_repo_head_hexsha": "2061796570baf65deeb74f29444fcaf3b6464aa1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/behaviours/test/approach_cube_test.cpp", "max_issues_repo_name": "BCLab-UNM/SwarmBaseCode-Modular-Public", "max_issues_repo_head_hexsha": "2061796570baf65deeb74f29444fcaf3b6464aa1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/behaviours/test/approach_cube_test.cpp", "max_forks_repo_name": "BCLab-UNM/SwarmBaseCode-Modular-Public", "max_forks_repo_head_hexsha": "2061796570baf65deeb74f29444fcaf3b6464aa1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9155844156, "max_line_length": 136, "alphanum_fraction": 0.6869615164, "num_tokens": 1532}
|
from itertools import combinations_with_replacement as cwr
import numpy as np
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
import scipy.sparse as sp
import anndata2ri
from anndata._core.sparse_dataset import SparseDataset
from controller.cellar.utils.exceptions import UserError
from ._neighbors import get_spatial_knn_graph, get_spatial_knn_graph_10x
from app import logger
try:
STvEA = importr('STvEA')
Matrix = importr('Matrix')
except:
raise ImportError("Could not import R Libraries.")
def adjScoreProteinsCODEX(
adata, path_to_df, n_neighbors=5, num_cores=1, num_perms=50,
key='spatial_nneigh'):
"""
Computes a neighbors graph using the spatial tile
and use it to compute a colocalization score of the proteins.
See https://github.com/CamaraLab/STvEA
Parameters
__________
adata: anndata.AnnData object
path_to_df: str
The path to data.csv file containing spatial information.
In particular, it must contain the columns 'rx', 'ry', 'rid' (optional)
where 'rx' and 'ry' correspond to the x and y coordinates,
while 'rid' contains the order of the cells in adata.
n_neighbors: int
Number of neighbors to compute.
num_cores: int
Number of cores to use.
num_perms: int
Number of permutations to use.
key: str
If adata is not None, will use this key to store the adjacency
matrix in adata.obsp
"""
if key in adata.obsp and key in adata.uns and\
adata.uns[key]['n_neighbors'] == n_neighbors:
adj = adata.obsp[key]
else:
adj = get_spatial_knn_graph(
path_to_df, n_neighbors=n_neighbors, adata=adata, key=key)
# We will only consider non-zero rows/cols.
adata = adata[adj.getnnz(1) > 0]
# We use the fact that adj is symmetric
adj = adj[adj.getnnz(1) > 0][:, adj.getnnz(0) > 0]
x_cords, y_cords = adj.nonzero()
# Add one to coordinates since R is retarded
adj_mat = Matrix.sparseMatrix(
i=ro.IntVector(x_cords + 1),
j=ro.IntVector(y_cords + 1),
x=1,
dims=ro.IntVector(np.array([*adj.shape])))
if isinstance(adata.X, SparseDataset) or sp.issparse(adata.X):
xx_cords, yy_cords = adata.X.nonzero()
protein_mat = Matrix.sparseMatrix(
i=ro.IntVector(xx_cords + 1),
j=ro.IntVector(yy_cords + 1),
x=adata.X[xx_cords, yy_cords],
dims=ro.IntVector(np.array([*adata.shape])))
else:
protein_mat = ro.numpy2ri.py2rpy(np.array(adata.X))
colnames = list(range(1, adata.shape[1] + 1))
protein_mat = ro.r("`colnames<-`")(protein_mat, ro.IntVector(colnames))
protein_pairs = np.array(list(cwr(colnames, 2)))
protein_pairs = ro.numpy2ri.py2rpy(np.array(protein_pairs))
res = STvEA.AdjScoreProteins_internal(
adj_mat, protein_mat, protein_pairs,
num_cores=num_cores, num_perms=num_perms)
res = ro.pandas2ri.rpy2py_dataframe(res)
# Careful: Subtract 1
res['f'] -= 1
res['g'] -= 1
return res
def adjScoreClustersCODEX(
adata, path_to_df, n_neighbors=3, key='spatial_nneigh',
labels_key='labels', num_cores=1):
"""
Computes a neighbors graph using the spatial tile
and use it to compute a colocalization score of the clusters.
See https://github.com/CamaraLab/STvEA
Parameters
__________
adata: anndata.AnnData object
path_to_df: str
The path to data.csv file containing spatial information.
In particular, it must contain the columns 'rx', 'ry', 'rid' (optional)
where 'rx' and 'ry' correspond to the x and y coordinates,
while 'rid' contains the order of the cells in adata.
n_neighbors: int
Number of neighbors to compute.
key: str
If adata is not None, will use this key to store the adjacency
matrix in adata.obsp
num_cores: int
Number of cores to use.
num_perms: int
Number of permutations to use.
"""
if labels_key not in adata.obs:
raise UserError("No labels found in adata. Cannot compute " +
"colocalization score.")
if key in adata.obsp and key in adata.uns and\
adata.uns[key]['n_neighbors'] == n_neighbors:
adj = adata.obsp[key]
else:
adj = get_spatial_knn_graph(
path_to_df, n_neighbors=n_neighbors, adata=adata, key=key)
x_cords, y_cords = adj.nonzero()
adj_mat = Matrix.sparseMatrix(
i=ro.IntVector(x_cords + 1),
j=ro.IntVector(y_cords + 1),
x=1,
dims=ro.IntVector(np.array([*adj.shape])))
labels = ro.IntVector(adata.obs[labels_key])
res = STvEA.AdjScoreClustersCODEX_internal(
adj_mat, labels, num_cores=num_cores)
res = ro.pandas2ri.rpy2py_dataframe(res)
return res
def adjScoreClusters10x(
adata, path_to_df, n_neighbors=3, key='spatial_nneigh',
labels_key='labels', num_cores=1):
"""
Same thing for 10x spatial transcriptomics data
Computes a neighbors graph using the spatial tile
and use it to compute a colocalization score of the clusters.
See https://github.com/CamaraLab/STvEA
Parameters
__________
adata: anndata.AnnData object
path_to_df: str
The path to data.csv file containing spatial information.
If None, try to use the 'spatial_dict' stored in adata.
n_neighbors: int
Number of neighbors to compute.
key: str
If adata is not None, will use this key to store the adjacency
matrix in adata.obsp
num_cores: int
Number of cores to use.
num_perms: int
Number of permutations to use.
"""
if labels_key not in adata.obs:
raise UserError("No labels found in adata. Cannot compute " +
"colocalization score.")
if key in adata.obsp and key in adata.uns and\
adata.uns[key]['n_neighbors'] == n_neighbors:
adj = adata.obsp[key]
else:
adj = get_spatial_knn_graph_10x(
path_to_df, n_neighbors=n_neighbors, adata=adata, key=key)
x_cords, y_cords = adj.nonzero()
adj_mat = Matrix.sparseMatrix(
i=ro.IntVector(x_cords + 1),
j=ro.IntVector(y_cords + 1),
x=1,
dims=ro.IntVector(np.array([*adj.shape])))
labels = ro.IntVector(adata.obs[labels_key])
res = STvEA.AdjScoreClustersCODEX_internal(
adj_mat, labels, num_cores=num_cores)
res = ro.pandas2ri.rpy2py_dataframe(res)
return res
|
{"hexsha": "60f20d5dd89f7f29faa8d269d502d1919c65205a", "size": 6593, "ext": "py", "lang": "Python", "max_stars_repo_path": "controller/cellar/core/_spatial_scores.py", "max_stars_repo_name": "euxhenh/cellar", "max_stars_repo_head_hexsha": "679387216043f3d287ea29a15f78868f412d2948", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-09-08T16:56:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-12T03:13:29.000Z", "max_issues_repo_path": "controller/cellar/core/_spatial_scores.py", "max_issues_repo_name": "euxhenh/cellar", "max_issues_repo_head_hexsha": "679387216043f3d287ea29a15f78868f412d2948", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "controller/cellar/core/_spatial_scores.py", "max_forks_repo_name": "euxhenh/cellar", "max_forks_repo_head_hexsha": "679387216043f3d287ea29a15f78868f412d2948", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-20T03:04:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T03:04:44.000Z", "avg_line_length": 33.4670050761, "max_line_length": 79, "alphanum_fraction": 0.6569088427, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1779}
|
[STATEMENT]
lemma equivalent_complements:
assumes \<open>complements F G\<close>
assumes \<open>equivalent_registers G G'\<close>
shows \<open>complements F G'\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. complements F G'
[PROOF STEP]
apply (rule complementsI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. compatible F G'
2. iso_register (F;G')
[PROOF STEP]
apply (metis assms(1) assms(2) compatible_comp_right complements_def equivalent_registers_def iso_register_is_register)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. iso_register (F;G')
[PROOF STEP]
by (metis assms(1) assms(2) complements_def equivalent_registers_def equivalent_registers_pair_right iso_register_comp)
|
{"llama_tokens": 270, "file": "Registers_Laws_Complement_Quantum", "length": 3}
|
# This file was generated, do not modify it. # hide
using HTTP
using MLJ
using PyPlot
import DataFrames: DataFrame, describe
using UrlDownload
MLJ.color_off() # hide
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
header = ["Class", "Alcool", "Malic acid", "Ash", "Alcalinity of ash",
"Magnesium", "Total phenols", "Flavanoids",
"Nonflavanoid phenols", "Proanthcyanins", "Color intensity",
"Hue", "OD280/OD315 of diluted wines", "Proline"]
data = urldownload(url, true, format=:CSV, header=header);
|
{"hexsha": "75f90ce643128b2e63bd80be5a3013f2e7411e7b", "size": 561, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "__site/assets/end-to-end/wine/code/ex1.jl", "max_stars_repo_name": "giordano/DataScienceTutorials.jl", "max_stars_repo_head_hexsha": "8284298842e0d77061cf8ee767d0899fb7d051ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2021-08-09T11:35:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T06:20:43.000Z", "max_issues_repo_path": "__site/assets/end-to-end/wine/code/ex1.jl", "max_issues_repo_name": "giordano/DataScienceTutorials.jl", "max_issues_repo_head_hexsha": "8284298842e0d77061cf8ee767d0899fb7d051ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 56, "max_issues_repo_issues_event_min_datetime": "2019-10-22T00:06:41.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-21T14:38:09.000Z", "max_forks_repo_path": "__site/assets/end-to-end/wine/code/ex1.jl", "max_forks_repo_name": "giordano/DataScienceTutorials.jl", "max_forks_repo_head_hexsha": "8284298842e0d77061cf8ee767d0899fb7d051ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-11-20T16:25:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-05T11:55:15.000Z", "avg_line_length": 40.0714285714, "max_line_length": 79, "alphanum_fraction": 0.6951871658, "num_tokens": 162}
|
(* 1st-order unification did not work when in competition with pattern unif. *)
Set Implicit Arguments.
Lemma test : forall
(A : Type)
(B : Type)
(f : A -> B)
(S : B -> Prop)
(EV : forall y (f':A->B), (forall x', S (f' x')) -> S (f y))
(HS : forall x', S (f x'))
(x : A),
S (f x).
Proof.
intros. eapply EV. intros.
(* worked in v8.2 but not in v8.3beta, fixed in r12898 *)
apply HS.
(* still not compatible with 8.2 because an evar can be solved in
two different ways and is left open *)
|
{"author": "mattam82", "repo": "Coq-misc", "sha": "60bc3cbe72083f4fa1aa759914936e4fa3d6b42e", "save_path": "github-repos/coq/mattam82-Coq-misc", "path": "github-repos/coq/mattam82-Coq-misc/Coq-misc-60bc3cbe72083f4fa1aa759914936e4fa3d6b42e/test-suite/bugs/closed/shouldsucceed/2244.v"}
|
"""
Provides functions for processing a video file into numpy arrays
for RGB data and optical flow data, which can be used with the I3D model.
"""
import cv2
import numpy as np
def _raw_numpy_array(video_file, nframes=None):
"""
Loads a video from the given file. Will set the number
of frames to `nframes` if this parameter is not `None`.
Returns:
- (width, height, arr): The width and height of the video,
and a numpy array with the parsed contents of the video.
"""
# Read video
cap = cv2.VideoCapture(video_file)
# Get properties of the video
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Min allowed height or width (whatever is smaller), in pixels
min_dimension = 256.0
# Determine scaling factors of width and height
assert min(w, h) > 0, 'Cannot resize {} with W={}, H={}'.format(video_file, w, h)
scale = min_dimension / min(w, h)
w = int(w * scale)
h = int(h * scale)
buf = np.zeros((1, frame_count, h, w, 3), np.dtype('float32'))
fc, flag = 0, True
while fc < frame_count and flag:
flag, image = cap.read()
if flag:
image = cv2.resize(image, (w, h))
buf[0, fc] = image
fc += 1
cap.release()
if nframes is not None:
if nframes < frame_count:
fc = frame_count
t1, t2 = int(fc/2) - int(nframes/2), int(fc/2) + int(nframes/2)
buf = buf[:, t1:t2, :, :, :]
elif nframes > frame_count:
buf = np.resize(buf, (1, nframes, h, w, 3))
return w, h, buf
def _crop_video(numpy_video, size, desired_size):
"""
Crop a video of the given size (WIDTH, HEIGHT) into a square of `desired_size`.
The video is represented as a numpy array. This func is for internal usage.
"""
w, h = size
h1, h2 = int(h/2) - int(desired_size/2), int(h/2) + int(desired_size/2)
w1, w2 = int(w/2) - int(desired_size/2), int(w/2) + int(desired_size/2)
return numpy_video[:, :, h1:h2, w1:w2, :]
def rgb_data(video_file, size, nframes=None):
"""
Loads a numpy array of shape (1, nframes, size, size, 3) from a video file.
Values contained in the array are based on RGB values of each frame in the video.
Parameter `size` should be an int (pixels) for a square cropping of the video.
Omitting the parameter `nframes` will preserve the original # frames in the video.
"""
# Load video into numpy array
w, h, buf = _raw_numpy_array(video_file, nframes=nframes)
# Scale pixels between -1 and 1
buf[0, :] = ((buf[0, :] / 255.0) * 2) - 1
# Select center crop from the video
return _crop_video(buf, (w, h), size)
def flow_data(video_file, size, nframes=None):
"""
Loads a numpy array of shape (1, nframes, size, size, 2) from a video file.
Values contained in the array are based on optical flow of the video.
https://docs.opencv.org/3.1.0/d6/d39/classcv_1_1cuda_1_1OpticalFlowDual__TVL1.html
Parameter `size` should be an integer (pixels) for a square cropping of the video.
Omitting the parameter `nframes` will preserve the original # frames in the video.
"""
# Load video into numpy array, and crop the video
w, h, buf = _raw_numpy_array(video_file, nframes=nframes)
buf = _crop_video(buf, (w, h), size)
num_frames = buf.shape[1]
flow = np.zeros((1, num_frames, size, size, 2), dtype='float32')
# Convert to grayscale
buf = np.dot(buf, np.array([0.2989, 0.5870, 0.1140]))
# Apply optical flow algorithm
for i in range(1, num_frames):
prev, cur = buf[0, i - 1], buf[0, i]
cur_flow = cv2.calcOpticalFlowFarneback(prev, cur, None, 0.5, 3, 15, 3, 5, 1.2, 0)
# Truncate values to [-20, 20] and scale from [-1, 1]
cur_flow[cur_flow < -20] = -20
cur_flow[cur_flow > 20] = 20
cur_flow /= 20
flow[0, i] = cur_flow
return flow
|
{"hexsha": "284896f1a986946cea2667aa9b467c23ce2dd02f", "size": 3839, "ext": "py", "lang": "Python", "max_stars_repo_path": "video-classifier/process_video.py", "max_stars_repo_name": "Sben05/Sportable", "max_stars_repo_head_hexsha": "70a7c315b24a72e32a957978ec901d35507a0456", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "video-classifier/process_video.py", "max_issues_repo_name": "Sben05/Sportable", "max_issues_repo_head_hexsha": "70a7c315b24a72e32a957978ec901d35507a0456", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "video-classifier/process_video.py", "max_forks_repo_name": "Sben05/Sportable", "max_forks_repo_head_hexsha": "70a7c315b24a72e32a957978ec901d35507a0456", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-28T21:02:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-28T21:02:02.000Z", "avg_line_length": 30.9596774194, "max_line_length": 88, "alphanum_fraction": 0.659025788, "include": true, "reason": "import numpy", "num_tokens": 1191}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
from dataclasses import dataclass
from typing import Dict, List
import numpy as np
import numpy.testing as npt
from caffe2.python import schema, workspace
from ml.rl import types as rlt
from ml.rl.preprocessing.feature_extractor import (
PredictorFeatureExtractor,
TrainingFeatureExtractor,
WorldModelFeatureExtractor,
id_list_schema,
id_score_list_schema,
map_schema,
)
from ml.rl.preprocessing.identify_types import CONTINUOUS, PROBABILITY
from ml.rl.preprocessing.normalization import MISSING_VALUE, NormalizationParameters
from ml.rl.test.utils import (
ABIdFeatures,
CIdFeatures,
FloatOnlySequence,
IdAndFloatSequence,
IdOnlySequence,
NumpyFeatureProcessor,
SequenceFeatures,
)
class FeatureExtractorTestBase(unittest.TestCase):
def get_state_normalization_parameters(self):
return {
i: NormalizationParameters(
feature_type=PROBABILITY if i % 2 else CONTINUOUS, mean=0, stddev=1
)
for i in range(1, 5)
}
def get_action_normalization_parameters(self):
# Sorted order: 12, 11, 13
return {
i: NormalizationParameters(
feature_type=CONTINUOUS if i % 2 else PROBABILITY, mean=0, stddev=1
)
for i in range(11, 14)
}
def setup_state_features(self, ws, field):
lengths = np.array([3, 0, 5], dtype=np.int32)
keys = np.array([2, 1, 9, 1, 2, 3, 4, 5], dtype=np.int64)
values = np.arange(8).astype(np.float32)
ws.feed_blob(str(field.lengths()), lengths)
ws.feed_blob(str(field.keys()), keys)
ws.feed_blob(str(field.values()), values)
return lengths, keys, values
def expected_state_features(self, normalize):
# Feature order: 1, 3, 2, 4
dense = np.array(
[
[1, MISSING_VALUE, 0, MISSING_VALUE],
[MISSING_VALUE, MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
[3, 5, 4, 6],
],
dtype=np.float32,
)
if normalize:
dense = NumpyFeatureProcessor.preprocess_array(
dense, [1, 3, 2, 4], self.get_state_normalization_parameters()
)
return dense
def setup_next_state_features(self, ws, field):
lengths = np.array([2, 2, 4], dtype=np.int32)
keys = np.array([2, 1, 9, 1, 2, 3, 4, 5], dtype=np.int64)
values = np.arange(10, 18).astype(np.float32)
ws.feed_blob(str(field.lengths()), lengths)
ws.feed_blob(str(field.keys()), keys)
ws.feed_blob(str(field.values()), values)
return lengths, keys, values
def expected_next_state_features(self, normalize):
# Feature order: 1, 3, 2, 4
dense = np.array(
[
[11, MISSING_VALUE, 10, MISSING_VALUE],
[13, MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
[MISSING_VALUE, 15, 14, 16],
],
dtype=np.float32,
)
if normalize:
dense = NumpyFeatureProcessor.preprocess_array(
dense, [1, 3, 2, 4], self.get_state_normalization_parameters()
)
return dense
def expected_tiled_next_state_features(self, normalize):
# NOTE: this depends on lengths of possible next action
# Feature order: 1, 3, 2, 4
dense = np.array(
[
[11, MISSING_VALUE, 10, MISSING_VALUE],
[11, MISSING_VALUE, 10, MISSING_VALUE],
[13, MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
[13, MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
[MISSING_VALUE, 15, 14, 16],
[MISSING_VALUE, 15, 14, 16],
],
dtype=np.float32,
)
if normalize:
dense = NumpyFeatureProcessor.preprocess_array(
dense, [1, 3, 2, 4], self.get_state_normalization_parameters()
)
return dense
def id_mapping_config(self):
return {
"a_mapping": rlt.IdMapping(ids=[20020, 20021]),
"b_mapping": rlt.IdMapping(ids=[20031, 20030, 20032]),
"c_mapping": rlt.IdMapping(ids=[20040, 20041, 20042, 20043, 20044]),
}
def setup_state_sequence_features(self, ws, id_list_field, id_score_list_field):
# id_list
id_list_lengths = np.array([3, 2, 0], dtype=np.int32)
id_list_values_keys = np.array([2002, 2003, 2004, 2004, 2005], dtype=np.int64)
id_list_values_values_lengths = np.array([2, 2, 1, 4, 1], dtype=np.int32)
id_list_values_values_values = np.array(
[20020, 20021, 20030, 20031, 20040, 20041, 20042, 20043, 20044, 20050],
dtype=np.int64,
)
# id_score_list
id_score_list_legnths = np.array([1, 2, 3], dtype=np.int32)
id_score_list_values_keys = np.array(
[1004, 1004, 1005, 1001, 1002, 1003], dtype=np.int64
)
id_score_list_values_values_lengths = np.array(
[1, 4, 1, 1, 1, 1], dtype=np.int32
)
id_score_list_values_values_values_ids = np.array(
[10040, 10041, 10042, 10043, 10044, 10050, 10010, 10020, 10030],
dtype=np.int64,
)
id_score_list_values_values_values_scores = np.array(
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], dtype=np.float32
)
ws.feed_blob(str(id_list_field.lengths()), id_list_lengths)
ws.feed_blob(str(id_list_field["values"].keys()), id_list_values_keys)
ws.feed_blob(
str(id_list_field["values"]["values"].lengths()),
id_list_values_values_lengths,
)
ws.feed_blob(
str(id_list_field["values"]["values"]["values"]()),
id_list_values_values_values,
)
ws.feed_blob(str(id_score_list_field.lengths()), id_score_list_legnths)
ws.feed_blob(
str(id_score_list_field["values"].keys()), id_score_list_values_keys
)
ws.feed_blob(
str(id_score_list_field["values"]["values"].lengths()),
id_score_list_values_values_lengths,
)
ws.feed_blob(
str(id_score_list_field["values"]["values"]["values"]["keys"]()),
id_score_list_values_values_values_ids,
)
ws.feed_blob(
str(id_score_list_field["values"]["values"]["values"]["values"]()),
id_score_list_values_values_values_scores,
)
return {
"id_list": {
"lengths": id_list_lengths,
"keys": id_list_values_keys,
"values": {
"lengths": id_list_values_values_values,
"values": id_list_values_values_values,
},
},
"id_score_list": {
"lengths": id_score_list_legnths,
"keys": id_score_list_values_keys,
"values": {
"lengths": id_score_list_values_values_lengths,
"ids": id_score_list_values_values_values_ids,
"scores": id_score_list_values_values_values_scores,
},
},
}
def expected_state_sequence_features(self):
return SequenceFeatures(
id_only=IdOnlySequence(
id_features=ABIdFeatures(
a_id=np.array([[1, 2], [0, 0], [0, 0]], dtype=np.int64),
b_id=np.array([[2, 1], [0, 0], [0, 0]], dtype=np.int64),
),
float_features=None,
),
id_and_float=IdAndFloatSequence(
id_features=CIdFeatures(
c_id=np.array([[1, 0, 0], [3, 4, 5], [0, 0, 0]], dtype=np.int64)
),
float_features=np.array(
[
[[0.1], [0.0], [0.0]],
[[0.3], [0.4], [0.5]],
[[0.0], [0.0], [0.0]],
],
dtype=np.float32,
),
),
float_only=FloatOnlySequence(
id_features=None,
float_features=np.array(
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.7, 0.8, 0.9], [0.0, 0.0, 0.0]],
],
dtype=np.float32,
),
),
)
def setup_next_state_sequence_features(
self, ws, id_list_field, id_score_list_field
):
# id_list
id_list_lengths = np.array([0, 3, 2], dtype=np.int32)
id_list_values_keys = np.array([2002, 2003, 2004, 2004, 2005], dtype=np.int64)
id_list_values_values_lengths = np.array([2, 2, 1, 4, 1], dtype=np.int32)
id_list_values_values_values = np.array(
[20020, 20021, 20030, 20031, 20040, 20041, 20042, 20043, 20044, 20050],
dtype=np.int64,
)
# id_score_list
id_score_list_legnths = np.array([3, 1, 2], dtype=np.int32)
id_score_list_values_keys = np.array(
[1001, 1002, 1003, 1004, 1004, 1005], dtype=np.int64
)
id_score_list_values_values_lengths = np.array(
[1, 1, 1, 1, 4, 1], dtype=np.int32
)
id_score_list_values_values_values_ids = np.array(
[10010, 10020, 10030, 10040, 10041, 10042, 10043, 10044, 10050],
dtype=np.int64,
)
id_score_list_values_values_values_scores = np.array(
[0.7, 0.8, 0.9, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=np.float32
)
ws.feed_blob(str(id_list_field.lengths()), id_list_lengths)
ws.feed_blob(str(id_list_field["values"].keys()), id_list_values_keys)
ws.feed_blob(
str(id_list_field["values"]["values"].lengths()),
id_list_values_values_lengths,
)
ws.feed_blob(
str(id_list_field["values"]["values"]["values"]()),
id_list_values_values_values,
)
ws.feed_blob(str(id_score_list_field.lengths()), id_score_list_legnths)
ws.feed_blob(
str(id_score_list_field["values"].keys()), id_score_list_values_keys
)
ws.feed_blob(
str(id_score_list_field["values"]["values"].lengths()),
id_score_list_values_values_lengths,
)
ws.feed_blob(
str(id_score_list_field["values"]["values"]["values"]["keys"]()),
id_score_list_values_values_values_ids,
)
ws.feed_blob(
str(id_score_list_field["values"]["values"]["values"]["values"]()),
id_score_list_values_values_values_scores,
)
return {
"id_list": {
"lengths": id_list_lengths,
"keys": id_list_values_keys,
"values": {
"lengths": id_list_values_values_values,
"values": id_list_values_values_values,
},
},
"id_score_list": {
"lengths": id_score_list_legnths,
"keys": id_score_list_values_keys,
"values": {
"lengths": id_score_list_values_values_lengths,
"ids": id_score_list_values_values_values_ids,
"scores": id_score_list_values_values_values_scores,
},
},
}
def expected_next_state_sequence_features(self):
return SequenceFeatures(
id_only=IdOnlySequence(
id_features=ABIdFeatures(
a_id=np.array([[0, 0], [1, 2], [0, 0]], dtype=np.int64),
b_id=np.array([[0, 0], [2, 1], [0, 0]], dtype=np.int64),
),
float_features=None,
),
id_and_float=IdAndFloatSequence(
id_features=CIdFeatures(
c_id=np.array([[0, 0, 0], [1, 0, 0], [3, 4, 5]], dtype=np.int64)
),
float_features=np.array(
[
[[0.0], [0.0], [0.0]],
[[0.1], [0.0], [0.0]],
[[0.3], [0.4], [0.5]],
],
dtype=np.float32,
),
),
float_only=FloatOnlySequence(
id_features=None,
float_features=np.array(
[
[[0.7, 0.8, 0.9], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
],
dtype=np.float32,
),
),
)
def setup_action(self, ws, field):
action = np.array([1, 0, 1], dtype=np.int64)
ws.feed_blob(str(field()), action)
return action
def setup_next_action(self, ws, field):
action = np.array([0, 1, 1], dtype=np.int64)
ws.feed_blob(str(field()), action)
return action
def setup_possible_actions_mask(self, ws, field):
lengths = np.array([2, 2, 2], dtype=np.int32)
actions_mask = np.array([0, 1, 1, 1, 0, 1], dtype=np.int64)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"]()), actions_mask)
return lengths, actions_mask
def setup_possible_next_actions_mask(self, ws, field):
lengths = np.array([2, 2, 2], dtype=np.int32)
actions_mask = np.array([1, 1, 1, 0, 0, 0], dtype=np.int64)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"]()), actions_mask)
return lengths, actions_mask
def setup_action_features(self, ws, field):
lengths = np.array([2, 4, 2], dtype=np.int32)
keys = np.array([11, 12, 14, 11, 12, 13, 13, 12], dtype=np.int64)
values = np.arange(20, 28).astype(np.float32)
ws.feed_blob(str(field.lengths()), lengths)
ws.feed_blob(str(field.keys()), keys)
ws.feed_blob(str(field.values()), values)
return lengths, keys, values
def expected_action_features(self, normalize):
# Feature order: 12, 11, 13
dense = np.array(
[[21, 20, MISSING_VALUE], [24, 23, 25], [27, MISSING_VALUE, 26]],
dtype=np.float32,
)
if normalize:
dense = NumpyFeatureProcessor.preprocess_array(
dense, [12, 11, 13], self.get_action_normalization_parameters()
)
return dense
def setup_next_action_features(self, ws, field):
lengths = np.array([4, 2, 2], dtype=np.int32)
keys = np.array([11, 12, 14, 13, 12, 13, 11, 13], dtype=np.int64)
values = np.arange(30, 38).astype(np.float32)
ws.feed_blob(str(field.lengths()), lengths)
ws.feed_blob(str(field.keys()), keys)
ws.feed_blob(str(field.values()), values)
return lengths, keys, values
def expected_next_action_features(self, normalize):
# Feature order: 12, 11, 13
dense = np.array(
[[31, 30, 33], [34, MISSING_VALUE, 35], [MISSING_VALUE, 36, 37]],
dtype=np.float32,
)
if normalize:
dense = NumpyFeatureProcessor.preprocess_array(
dense, [12, 11, 13], self.get_action_normalization_parameters()
)
return dense
def setup_possible_actions_features(self, ws, field):
lengths = np.array([2, 2, 2], dtype=np.int32)
values_lengths = np.array([1, 0, 2, 3, 0, 0], dtype=np.int32)
keys = np.array([11, 12, 14, 11, 13, 12], dtype=np.int64)
values = np.arange(50, 56).astype(np.float32)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"].lengths()), values_lengths)
ws.feed_blob(str(field["values"].keys()), keys)
ws.feed_blob(str(field["values"].values()), values)
return lengths, values_lengths, keys, values
def expected_possible_actions_features(self, normalize):
# Feature order: 12, 11, 13
dense = np.array(
[
[MISSING_VALUE, 50, MISSING_VALUE],
[MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
[51, MISSING_VALUE, MISSING_VALUE],
[55, 53, 54],
[MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
[MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
],
dtype=np.float32,
)
if normalize:
dense = NumpyFeatureProcessor.preprocess_array(
dense, [12, 11, 13], self.get_action_normalization_parameters()
)
return dense
def setup_possible_next_actions_features(self, ws, field):
lengths = np.array([2, 2, 2], dtype=np.int32)
values_lengths = np.array([1, 0, 2, 3, 0, 0], dtype=np.int32)
keys = np.array([11, 12, 14, 11, 13, 12], dtype=np.int64)
values = np.arange(40, 46).astype(np.float32)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"].lengths()), values_lengths)
ws.feed_blob(str(field["values"].keys()), keys)
ws.feed_blob(str(field["values"].values()), values)
return lengths, values_lengths, keys, values
def expected_possible_next_actions_features(self, normalize):
# Feature order: 12, 11, 13
dense = np.array(
[
[MISSING_VALUE, 40, MISSING_VALUE],
[MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
[41, MISSING_VALUE, MISSING_VALUE],
[45, 43, 44],
[MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
[MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
],
dtype=np.float32,
)
if normalize:
dense = NumpyFeatureProcessor.preprocess_array(
dense, [12, 11, 13], self.get_action_normalization_parameters()
)
return dense
def setup_reward(self, ws, field):
reward = np.array([0.5, 0.6, 0.7], dtype=np.float32)
ws.feed_blob(str(field()), reward)
return reward
def setup_not_terminal(self, ws, field):
not_terminal = np.array([1, 1, 1], dtype=np.int32)
ws.feed_blob(str(field()), not_terminal)
return not_terminal
def setup_step(self, ws, field):
step = np.array([1, 2, 3], dtype=np.int32)
ws.feed_blob(str(field()), step)
return step
def setup_time_diff(self, ws, field):
time_diff = np.array([1, 1, 1], dtype=np.int32)
ws.feed_blob(str(field()), time_diff)
return time_diff
def setup_mdp_id(self, ws, field):
mdp_id = np.array(["1", "2", "3"], dtype=np.string_)
ws.feed_blob(str(field()), mdp_id)
return mdp_id
def setup_seq_num(self, ws, field):
seq_num = np.array([1, 1, 2], dtype=np.int32)
ws.feed_blob(str(field()), seq_num)
return seq_num
def create_ws_and_net(self, extractor):
net, init_net = extractor.create_net()
ws = workspace.Workspace()
ws.create_net(init_net)
ws.run(init_net)
for b in net.input_record().field_blobs():
ws.create_blob(str(b))
ws.create_net(net)
return ws, net
def check_create_net_spec(
self, extractor, expected_input_record, expected_output_record
):
net, init_net = extractor.create_net()
# First, check that all outputs of init_net are used in net
for b in init_net.external_outputs:
self.assertTrue(net.is_external_input(b))
# Second, check that input and output records are set
input_record = net.input_record()
output_record = net.output_record()
self.assertIsNotNone(input_record)
self.assertIsNotNone(output_record)
# Third, check that the fields match what is expected
self.assertEqual(
set(expected_input_record.field_names()), set(input_record.field_names())
)
# Output must match positionally since it's used by exporting
self.assertEqual(
expected_output_record.field_names(), output_record.field_names()
)
class TestWorldModelFeatureExtractor(FeatureExtractorTestBase):
SEQ_LEN = 3
def expected_state_features(self, normalize):
dense = super(TestWorldModelFeatureExtractor, self).expected_state_features(
normalize
)
dense = dense.reshape((1, dense.shape[0], dense.shape[1]))
return dense
def expected_next_state_features(self, normalize):
dense = super(
TestWorldModelFeatureExtractor, self
).expected_next_state_features(normalize)
dense = dense.reshape((1, dense.shape[0], dense.shape[1]))
return dense
def expected_action_features(self, normalize):
dense = super(TestWorldModelFeatureExtractor, self).expected_action_features(
normalize
)
dense = dense.reshape((1, dense.shape[0], dense.shape[1]))
return dense
def create_extra_input_record(self, net):
return net.input_record() + schema.NewRecord(
net, schema.Struct(("reward", schema.List(schema.Scalar())))
)
def setup_state_features(self, ws, field):
lengths = np.array([1], dtype=np.int32)
values_lengths = np.array([3, 0, 5], dtype=np.int32)
keys = np.array([2, 1, 9, 1, 2, 3, 4, 5], dtype=np.int64)
values = np.arange(8).astype(np.float32)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"].lengths()), values_lengths)
ws.feed_blob(str(field["values"].keys()), keys)
ws.feed_blob(str(field["values"].values()), values)
return lengths, values_lengths, keys, values
def setup_next_state_features(self, ws, field):
lengths = np.array([1], dtype=np.int32)
values_lengths = np.array([2, 2, 4], dtype=np.int32)
keys = np.array([2, 1, 9, 1, 2, 3, 4, 5], dtype=np.int64)
values = np.arange(10, 18).astype(np.float32)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"].lengths()), values_lengths)
ws.feed_blob(str(field["values"].keys()), keys)
ws.feed_blob(str(field["values"].values()), values)
return lengths, values_lengths, keys, values
def setup_action_features(self, ws, field):
lengths = np.array([1], dtype=np.int32)
values_lengths = np.array([2, 4, 2], dtype=np.int32)
keys = np.array([11, 12, 14, 11, 12, 13, 13, 12], dtype=np.int64)
values = np.arange(20, 28).astype(np.float32)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"].lengths()), values_lengths)
ws.feed_blob(str(field["values"].keys()), keys)
ws.feed_blob(str(field["values"].values()), values)
return lengths, values_lengths, keys, values
def setup_action(self, ws, field):
lengths = np.array([3], dtype=np.int32)
action = np.array([1, 0, 1], dtype=np.int64)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"]()), action)
return lengths, action
def setup_reward(self, ws, field):
lengths = np.array([3], dtype=np.int32)
reward = np.array([0.5, 0.6, 0.7], dtype=np.float32)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"]()), reward)
return lengths, reward
def setup_not_terminal(self, ws, field):
lengths = np.array([3], dtype=np.int32)
not_terminal = np.array([1, 1, 1], dtype=np.int32)
ws.feed_blob(str(field["lengths"]()), lengths)
ws.feed_blob(str(field["values"]()), not_terminal)
return lengths, not_terminal
def test_extract_parametric_action(self):
self._test_extract_parametric_action(normalize=False)
def test_extract_parametric_action_normalize(self):
self._test_extract_parametric_action(normalize=True)
def _test_extract_parametric_action(self, normalize):
extractor = WorldModelFeatureExtractor(
self.SEQ_LEN,
self.get_state_normalization_parameters(),
self.get_action_normalization_parameters(),
normalize=normalize,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = self.create_extra_input_record(net)
self.setup_state_features(ws, input_record.state_features)
self.setup_next_state_features(ws, input_record.next_state_features)
self.setup_action_features(ws, input_record.action)
_, reward = self.setup_reward(ws, input_record.reward)
_, not_terminal = self.setup_not_terminal(ws, input_record.not_terminal)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
o = res.training_input
npt.assert_array_equal(reward.reshape(1, 3), o.reward.numpy())
npt.assert_array_equal(not_terminal.reshape(1, 3), o.not_terminal.numpy())
npt.assert_allclose(
self.expected_action_features(normalize),
o.action.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_state_features(normalize),
o.state.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_next_state_features(normalize),
o.next_state.numpy(),
rtol=1e-6,
)
def test_extract_discrete_action(self):
self._test_extract_discrete_action(normalize=False)
def test_extract_discrete_action_normalize(self):
self._test_extract_discrete_action(normalize=True)
def _test_extract_discrete_action(self, normalize):
num_actions = 2
extractor = WorldModelFeatureExtractor(
self.SEQ_LEN,
self.get_state_normalization_parameters(),
discrete_action_names=["ACT1", "ACT2"],
normalize=normalize,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = self.create_extra_input_record(net)
self.setup_state_features(ws, input_record.state_features)
self.setup_next_state_features(ws, input_record.next_state_features)
_, action = self.setup_action(ws, input_record.action)
_, reward = self.setup_reward(ws, input_record.reward)
_, not_terminal = self.setup_not_terminal(ws, input_record.not_terminal)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
o = res.training_input
npt.assert_array_equal(reward.reshape(1, 3), o.reward.numpy())
npt.assert_array_equal(not_terminal.reshape(1, 3), o.not_terminal.numpy())
npt.assert_array_equal(
action.reshape(-1, 3, 1) == np.arange(num_actions),
o.action.float_features.numpy(),
)
npt.assert_allclose(
self.expected_state_features(normalize),
o.state.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_next_state_features(normalize),
o.next_state.numpy(),
rtol=1e-6,
)
class TestTrainingFeatureExtractor(FeatureExtractorTestBase):
def create_extra_input_record(self, net):
return net.input_record() + schema.NewRecord(
net,
schema.Struct(
("reward", schema.Scalar()),
("action_probability", schema.Scalar()),
("step", schema.Scalar()),
("mdp_id", schema.Scalar()),
("sequence_number", schema.Scalar()),
),
)
def setup_extra_data(self, ws, input_record):
extra_data = rlt.ExtraData(
action_probability=np.array([0.11, 0.21, 0.13], dtype=np.float32)
)
ws.feed_blob(
str(input_record.action_probability()), extra_data.action_probability
)
return extra_data
def test_extract_max_q_discrete_action(self):
self._test_extract_max_q_discrete_action(normalize=False)
def test_extract_max_q_discrete_action_normalize(self):
self._test_extract_max_q_discrete_action(normalize=True)
def _test_extract_max_q_discrete_action(self, normalize):
num_actions = 2
extractor = TrainingFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
include_possible_actions=True,
normalize=normalize,
max_num_actions=num_actions,
multi_steps=3,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = self.create_extra_input_record(net)
self.setup_state_features(ws, input_record.state_features)
self.setup_next_state_features(ws, input_record.next_state_features)
action = self.setup_action(ws, input_record.action)
next_action = self.setup_next_action(ws, input_record.next_action)
possible_actions_mask = self.setup_possible_actions_mask(
ws, input_record.possible_actions_mask
)
possible_next_actions_mask = self.setup_possible_next_actions_mask(
ws, input_record.possible_next_actions_mask
)
reward = self.setup_reward(ws, input_record.reward)
not_terminal = self.setup_not_terminal(ws, input_record.not_terminal)
time_diff = self.setup_time_diff(ws, input_record.time_diff)
mdp_id = self.setup_mdp_id(ws, input_record.mdp_id)
sequence_number = self.setup_seq_num(ws, input_record.sequence_number)
step = self.setup_step(ws, input_record.step)
extra_data = self.setup_extra_data(ws, input_record)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
o = res.training_input
e = res.extras
npt.assert_array_equal(reward.reshape(-1, 1), o.reward.numpy())
npt.assert_array_equal(time_diff.reshape(-1, 1), o.time_diff.numpy())
npt.assert_array_equal(not_terminal.reshape(-1, 1), o.not_terminal.numpy())
npt.assert_array_equal(step.reshape(-1, 1), o.step.numpy())
npt.assert_array_equal(
sequence_number.reshape(-1, 1), e.sequence_number.numpy()
)
npt.assert_array_equal(mdp_id.reshape(-1, 1), e.mdp_id)
npt.assert_array_equal(
extra_data.action_probability.reshape(-1, 1),
res.extras.action_probability.numpy(),
)
npt.assert_array_equal(
action.reshape(-1, 1) == np.arange(num_actions), o.action.numpy()
)
npt.assert_array_equal(
next_action.reshape(-1, 1) == np.arange(num_actions), o.next_action.numpy()
)
npt.assert_array_equal(
possible_actions_mask[1], o.possible_actions_mask.numpy().flatten()
)
npt.assert_array_equal(
possible_next_actions_mask[1],
o.possible_next_actions_mask.numpy().flatten(),
)
npt.assert_allclose(
self.expected_state_features(normalize),
o.state.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_next_state_features(normalize),
o.next_state.float_features.numpy(),
rtol=1e-6,
)
def test_extract_max_q_discrete_action_with_sequence(self):
normalize = True
num_actions = 2
model_feature_config = rlt.ModelFeatureConfig(
id_mapping_config=self.id_mapping_config(),
sequence_features_type=SequenceFeatures,
float_feature_infos=[],
)
extractor = TrainingFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
include_possible_actions=True,
normalize=normalize,
max_num_actions=num_actions,
model_feature_config=model_feature_config,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = self.create_extra_input_record(net)
self.setup_state_features(ws, input_record.state_features)
self.setup_next_state_features(ws, input_record.next_state_features)
self.setup_state_sequence_features(
ws,
input_record.state_id_list_features,
input_record.state_id_score_list_features,
)
self.setup_next_state_sequence_features(
ws,
input_record.next_state_id_list_features,
input_record.next_state_id_score_list_features,
)
action = self.setup_action(ws, input_record.action)
next_action = self.setup_next_action(ws, input_record.next_action)
possible_actions_mask = self.setup_possible_actions_mask(
ws, input_record.possible_actions_mask
)
possible_next_actions_mask = self.setup_possible_next_actions_mask(
ws, input_record.possible_next_actions_mask
)
reward = self.setup_reward(ws, input_record.reward)
not_terminal = self.setup_not_terminal(ws, input_record.not_terminal)
time_diff = self.setup_time_diff(ws, input_record.time_diff)
mdp_id = self.setup_mdp_id(ws, input_record.mdp_id)
sequence_number = self.setup_seq_num(ws, input_record.sequence_number)
extra_data = self.setup_extra_data(ws, input_record)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
o = res.training_input
e = res.extras
npt.assert_array_equal(reward.reshape(-1, 1), o.reward.numpy())
npt.assert_array_equal(time_diff.reshape(-1, 1), o.time_diff.numpy())
npt.assert_array_equal(not_terminal.reshape(-1, 1), o.not_terminal.numpy())
npt.assert_array_equal(
sequence_number.reshape(-1, 1), e.sequence_number.numpy()
)
npt.assert_array_equal(mdp_id.reshape(-1, 1), e.mdp_id)
npt.assert_array_equal(
extra_data.action_probability.reshape(-1, 1),
res.extras.action_probability.numpy(),
)
npt.assert_array_equal(
action.reshape(-1, 1) == np.arange(num_actions), o.action.numpy()
)
npt.assert_array_equal(
next_action.reshape(-1, 1) == np.arange(num_actions), o.next_action.numpy()
)
npt.assert_array_equal(
possible_actions_mask[1], o.possible_actions_mask.numpy().flatten()
)
npt.assert_array_equal(
possible_next_actions_mask[1],
o.possible_next_actions_mask.numpy().flatten(),
)
npt.assert_allclose(
self.expected_state_features(normalize),
o.state.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_next_state_features(normalize),
o.next_state.float_features.numpy(),
rtol=1e-6,
)
# Check state sequence features
expected_state_sequence_features = self.expected_state_sequence_features()
id_only = o.state.sequence_features.id_only
expected_id_only = expected_state_sequence_features.id_only
self.assertEqual(expected_id_only.float_features, id_only.float_features)
npt.assert_array_equal(
expected_id_only.id_features.a_id, id_only.id_features.a_id
)
npt.assert_array_equal(
expected_id_only.id_features.b_id, id_only.id_features.b_id
)
id_and_float = o.state.sequence_features.id_and_float
expected_id_and_float = expected_state_sequence_features.id_and_float
npt.assert_array_equal(
expected_id_and_float.float_features, id_and_float.float_features
)
npt.assert_array_equal(
expected_id_and_float.id_features.c_id, id_and_float.id_features.c_id
)
float_only = o.state.sequence_features.float_only
expected_float_only = expected_state_sequence_features.float_only
npt.assert_array_equal(
expected_float_only.float_features, float_only.float_features
)
self.assertEqual(expected_float_only.id_features, float_only.id_features)
# Check next state sequence features
expected_next_state_sequence_features = (
self.expected_next_state_sequence_features()
)
id_only = o.next_state.sequence_features.id_only
expected_id_only = expected_next_state_sequence_features.id_only
self.assertEqual(expected_id_only.float_features, id_only.float_features)
npt.assert_array_equal(
expected_id_only.id_features.a_id, id_only.id_features.a_id
)
npt.assert_array_equal(
expected_id_only.id_features.b_id, id_only.id_features.b_id
)
id_and_float = o.next_state.sequence_features.id_and_float
expected_id_and_float = expected_next_state_sequence_features.id_and_float
npt.assert_array_equal(
expected_id_and_float.float_features, id_and_float.float_features
)
npt.assert_array_equal(
expected_id_and_float.id_features.c_id, id_and_float.id_features.c_id
)
float_only = o.next_state.sequence_features.float_only
expected_float_only = expected_next_state_sequence_features.float_only
npt.assert_array_equal(
expected_float_only.float_features, float_only.float_features
)
self.assertEqual(expected_float_only.id_features, float_only.id_features)
def test_extract_sarsa_discrete_action(self):
self._test_extract_sarsa_discrete_action(normalize=False)
def test_extract_sarsa_discrete_action_normalize(self):
self._test_extract_sarsa_discrete_action(normalize=True)
def _test_extract_sarsa_discrete_action(self, normalize):
num_actions = 2
extractor = TrainingFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
include_possible_actions=False,
normalize=normalize,
max_num_actions=num_actions,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = self.create_extra_input_record(net)
self.setup_state_features(ws, input_record.state_features)
self.setup_next_state_features(ws, input_record.next_state_features)
action = self.setup_action(ws, input_record.action)
next_action = self.setup_next_action(ws, input_record.next_action)
reward = self.setup_reward(ws, input_record.reward)
not_terminal = self.setup_not_terminal(ws, input_record.not_terminal)
time_diff = self.setup_time_diff(ws, input_record.time_diff)
mdp_id = self.setup_mdp_id(ws, input_record.mdp_id)
sequence_number = self.setup_seq_num(ws, input_record.sequence_number)
extra_data = self.setup_extra_data(ws, input_record)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
o = res.training_input
e = res.extras
npt.assert_array_equal(reward.reshape(-1, 1), o.reward.numpy())
npt.assert_array_equal(time_diff.reshape(-1, 1), o.time_diff.numpy())
npt.assert_array_equal(not_terminal.reshape(-1, 1), o.not_terminal.numpy())
npt.assert_array_equal(
sequence_number.reshape(-1, 1), e.sequence_number.numpy()
)
npt.assert_array_equal(mdp_id.reshape(-1, 1), e.mdp_id)
npt.assert_array_equal(
extra_data.action_probability.reshape(-1, 1),
res.extras.action_probability.numpy(),
)
npt.assert_array_equal(
action.reshape(-1, 1) == np.arange(num_actions), o.action.numpy()
)
npt.assert_array_equal(
next_action.reshape(-1, 1) == np.arange(num_actions), o.next_action.numpy()
)
npt.assert_allclose(
self.expected_state_features(normalize),
o.state.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_next_state_features(normalize),
o.next_state.float_features.numpy(),
rtol=1e-6,
)
def test_extract_max_q_parametric_action(self):
self._test_extract_max_q_parametric_action(normalize=False)
def test_extract_max_q_parametric_action_normalize(self):
self._test_extract_max_q_parametric_action(normalize=True)
def _test_extract_max_q_parametric_action(self, normalize):
extractor = TrainingFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
action_normalization_parameters=self.get_action_normalization_parameters(),
include_possible_actions=True,
normalize=normalize,
max_num_actions=2,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = self.create_extra_input_record(net)
self.setup_state_features(ws, input_record.state_features)
self.setup_next_state_features(ws, input_record.next_state_features)
self.setup_action_features(ws, input_record.action)
self.setup_next_action_features(ws, input_record.next_action)
self.setup_possible_actions_features(ws, input_record.possible_actions)
possible_actions_mask = self.setup_possible_actions_mask(
ws, input_record.possible_actions_mask
)
self.setup_possible_next_actions_features(
ws, input_record.possible_next_actions
)
possible_next_actions_mask = self.setup_possible_next_actions_mask(
ws, input_record.possible_next_actions_mask
)
reward = self.setup_reward(ws, input_record.reward)
not_terminal = self.setup_not_terminal(ws, input_record.not_terminal)
time_diff = self.setup_time_diff(ws, input_record.time_diff)
mdp_id = self.setup_mdp_id(ws, input_record.mdp_id)
sequence_number = self.setup_seq_num(ws, input_record.sequence_number)
extra_data = self.setup_extra_data(ws, input_record)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
o = res.training_input
e = res.extras
npt.assert_array_equal(reward.reshape(-1, 1), o.reward.numpy())
npt.assert_array_equal(time_diff.reshape(-1, 1), o.time_diff.numpy())
npt.assert_array_equal(not_terminal.reshape(-1, 1), o.not_terminal.numpy())
npt.assert_array_equal(
sequence_number.reshape(-1, 1), e.sequence_number.numpy()
)
npt.assert_array_equal(mdp_id.reshape(-1, 1), e.mdp_id)
npt.assert_array_equal(
extra_data.action_probability.reshape(-1, 1),
res.extras.action_probability.numpy(),
)
npt.assert_allclose(
self.expected_action_features(normalize),
o.action.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_possible_actions_features(normalize),
o.possible_actions.float_features.numpy(),
rtol=1e-6,
)
npt.assert_array_equal(
possible_actions_mask[1], o.possible_actions_mask.numpy().flatten()
)
npt.assert_allclose(
self.expected_possible_next_actions_features(normalize),
o.possible_next_actions.float_features.numpy(),
rtol=1e-6,
)
npt.assert_array_equal(
possible_next_actions_mask[1],
o.possible_next_actions_mask.numpy().flatten(),
)
npt.assert_allclose(
self.expected_state_features(normalize),
o.state.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_tiled_next_state_features(normalize),
o.tiled_next_state.float_features.numpy(),
rtol=1e-6,
)
def test_extract_sarsa_parametric_action(self):
self._test_extract_sarsa_parametric_action(normalize=False)
def test_extract_sarsa_parametric_action_normalize(self):
self._test_extract_sarsa_parametric_action(normalize=True)
def _test_extract_sarsa_parametric_action(self, normalize):
extractor = TrainingFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
action_normalization_parameters=self.get_action_normalization_parameters(),
include_possible_actions=False,
normalize=normalize,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = self.create_extra_input_record(net)
self.setup_state_features(ws, input_record.state_features)
self.setup_next_state_features(ws, input_record.next_state_features)
self.setup_action_features(ws, input_record.action)
self.setup_next_action_features(ws, input_record.next_action)
reward = self.setup_reward(ws, input_record.reward)
not_terminal = self.setup_not_terminal(ws, input_record.not_terminal)
time_diff = self.setup_time_diff(ws, input_record.time_diff)
mdp_id = self.setup_mdp_id(ws, input_record.mdp_id)
sequence_number = self.setup_seq_num(ws, input_record.sequence_number)
extra_data = self.setup_extra_data(ws, input_record)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
o = res.training_input
e = res.extras
npt.assert_array_equal(reward.reshape(-1, 1), o.reward.numpy())
npt.assert_array_equal(time_diff.reshape(-1, 1), o.time_diff.numpy())
npt.assert_array_equal(not_terminal.reshape(-1, 1), o.not_terminal.numpy())
npt.assert_array_equal(
sequence_number.reshape(-1, 1), e.sequence_number.numpy()
)
npt.assert_array_equal(mdp_id.reshape(-1, 1), e.mdp_id)
npt.assert_array_equal(
extra_data.action_probability.reshape(-1, 1),
res.extras.action_probability.numpy(),
)
npt.assert_allclose(
self.expected_action_features(normalize),
o.action.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_next_action_features(normalize),
o.next_action.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_state_features(normalize),
o.state.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_next_state_features(normalize),
o.next_state.float_features.numpy(),
rtol=1e-6,
)
def test_create_net_max_q_discrete_action(self):
extractor = TrainingFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
include_possible_actions=True,
max_num_actions=2,
)
expected_input_record = schema.Struct(
("state_features", map_schema()),
("next_state_features", map_schema()),
("action", schema.Scalar()),
("next_action", schema.Scalar()),
("not_terminal", schema.Scalar()),
("possible_actions_mask", schema.List(schema.Scalar())),
("possible_next_actions_mask", schema.List(schema.Scalar())),
("time_diff", schema.Scalar()),
)
expected_output_record = schema.Struct(
("state_features", schema.Scalar()),
("next_state_features", schema.Scalar()),
("action", schema.Scalar()),
("next_action", schema.Scalar()),
("not_terminal", schema.Scalar()),
("time_diff", schema.Scalar()),
("possible_actions_mask", schema.Scalar()),
("possible_next_actions_mask", schema.Scalar()),
)
self.check_create_net_spec(
extractor, expected_input_record, expected_output_record
)
def test_create_net_sarsa_discrete_action(self):
extractor = TrainingFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
include_possible_actions=False,
max_num_actions=2,
)
expected_input_record = schema.Struct(
("state_features", map_schema()),
("next_state_features", map_schema()),
("action", schema.Scalar()),
("next_action", schema.Scalar()),
("not_terminal", schema.Scalar()),
("time_diff", schema.Scalar()),
)
expected_output_record = schema.Struct(
("state_features", schema.Scalar()),
("next_state_features", schema.Scalar()),
("action", schema.Scalar()),
("next_action", schema.Scalar()),
("not_terminal", schema.Scalar()),
("time_diff", schema.Scalar()),
)
self.check_create_net_spec(
extractor, expected_input_record, expected_output_record
)
def test_create_net_max_q_parametric_action(self):
self._test_create_net_max_q_parametric_action(normalize=False)
def test_create_net_max_q_parametric_action_normalize(self):
self._test_create_net_max_q_parametric_action(normalize=True)
def _test_create_net_max_q_parametric_action(self, normalize):
extractor = TrainingFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
action_normalization_parameters=self.get_action_normalization_parameters(),
include_possible_actions=True,
normalize=normalize,
max_num_actions=2,
)
expected_input_record = schema.Struct(
("state_features", map_schema()),
("next_state_features", map_schema()),
("action", map_schema()),
("next_action", map_schema()),
("not_terminal", schema.Scalar()),
("possible_actions", schema.List(map_schema())),
("possible_actions_mask", schema.List(schema.Scalar())),
("possible_next_actions", schema.List(map_schema())),
("possible_next_actions_mask", schema.List(schema.Scalar())),
("time_diff", schema.Scalar()),
)
expected_output_record = schema.Struct(
("state_features", schema.Scalar()),
("next_state_features", schema.Scalar()),
("action", schema.Scalar()),
("next_action", schema.Scalar()),
("not_terminal", schema.Scalar()),
("time_diff", schema.Scalar()),
("possible_actions_mask", schema.Scalar()),
("possible_next_actions_mask", schema.Scalar()),
("possible_actions", schema.Scalar()),
("possible_next_actions", schema.Scalar()),
)
self.check_create_net_spec(
extractor, expected_input_record, expected_output_record
)
def test_create_net_sarsa_parametric_action(self):
self._test_create_net_sarsa_parametric_action(normalize=False)
def test_create_net_sarsa_parametric_action_normalize(self):
self._test_create_net_sarsa_parametric_action(normalize=True)
def _test_create_net_sarsa_parametric_action(self, normalize):
extractor = TrainingFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
action_normalization_parameters=self.get_action_normalization_parameters(),
include_possible_actions=False,
normalize=normalize,
max_num_actions=2,
)
expected_input_record = schema.Struct(
("state_features", map_schema()),
("next_state_features", map_schema()),
("action", map_schema()),
("next_action", map_schema()),
("not_terminal", schema.Scalar()),
("time_diff", schema.Scalar()),
)
expected_output_record = schema.Struct(
("state_features", schema.Scalar()),
("next_state_features", schema.Scalar()),
("action", schema.Scalar()),
("next_action", schema.Scalar()),
("not_terminal", schema.Scalar()),
("time_diff", schema.Scalar()),
)
self.check_create_net_spec(
extractor, expected_input_record, expected_output_record
)
class TestPredictorFeatureExtractor(FeatureExtractorTestBase):
def setup_float_features(self, ws, field):
lengths = np.array([3 + 2, 0 + 4, 5 + 2], dtype=np.int32)
keys = np.array(
[2, 11, 12, 1, 14, 11, 12, 13, 9, 1, 13, 12, 2, 3, 4, 5], dtype=np.int64
)
values = np.array(
[0, 20, 21, 1, 22, 23, 24, 25, 2, 3, 26, 27, 4, 5, 6, 7], dtype=np.float32
)
ws.feed_blob(str(field.lengths()), lengths)
ws.feed_blob(str(field.keys()), keys)
ws.feed_blob(str(field.values()), values)
return lengths, keys, values
def expected_state_features(self, normalize):
# Feature order: 1, 3, 2, 4
dense = np.array(
[
[1, MISSING_VALUE, 0, MISSING_VALUE],
[MISSING_VALUE, MISSING_VALUE, MISSING_VALUE, MISSING_VALUE],
[3, 5, 4, 6],
],
dtype=np.float32,
)
if normalize:
dense = NumpyFeatureProcessor.preprocess_array(
dense, [1, 3, 2, 4], self.get_state_normalization_parameters()
)
return dense
def expected_action_features(self, normalize):
# Feature order: 12, 11, 13
dense = np.array(
[[21, 20, MISSING_VALUE], [24, 23, 25], [27, MISSING_VALUE, 26]],
dtype=np.float32,
)
if normalize:
dense = NumpyFeatureProcessor.preprocess_array(
dense, [12, 11, 13], self.get_action_normalization_parameters()
)
return dense
def test_extract_no_action(self):
self._test_extract_no_action(normalize=False)
def test_extract_no_action_normalize(self):
self._test_extract_no_action(normalize=True)
def _test_extract_no_action(self, normalize):
extractor = PredictorFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
normalize=normalize,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = net.input_record()
self.setup_float_features(ws, input_record.float_features)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
npt.assert_allclose(
self.expected_state_features(normalize),
res.state.float_features.numpy(),
rtol=1e-6,
)
def test_extract_with_sequence(self):
model_feature_config = rlt.ModelFeatureConfig(
id_mapping_config=self.id_mapping_config(),
sequence_features_type=SequenceFeatures,
float_feature_infos=[],
)
extractor = PredictorFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
normalize=True,
model_feature_config=model_feature_config,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = net.input_record()
self.setup_state_features(ws, input_record.float_features)
self.setup_state_sequence_features(
ws, input_record.id_list_features, input_record.id_score_list_features
)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
npt.assert_allclose(
super().expected_state_features(normalize=True),
res.state.float_features.numpy(),
rtol=1e-6,
)
# Check state sequence features
expected_state_sequence_features = self.expected_state_sequence_features()
id_only = res.state.sequence_features.id_only
expected_id_only = expected_state_sequence_features.id_only
self.assertEqual(expected_id_only.float_features, id_only.float_features)
npt.assert_array_equal(
expected_id_only.id_features.a_id, id_only.id_features.a_id
)
npt.assert_array_equal(
expected_id_only.id_features.b_id, id_only.id_features.b_id
)
id_and_float = res.state.sequence_features.id_and_float
expected_id_and_float = expected_state_sequence_features.id_and_float
npt.assert_array_equal(
expected_id_and_float.float_features, id_and_float.float_features
)
npt.assert_array_equal(
expected_id_and_float.id_features.c_id, id_and_float.id_features.c_id
)
float_only = res.state.sequence_features.float_only
expected_float_only = expected_state_sequence_features.float_only
npt.assert_array_equal(
expected_float_only.float_features, float_only.float_features
)
self.assertEqual(expected_float_only.id_features, float_only.id_features)
def test_extract_parametric_action(self):
self._test_extract_parametric_action(normalize=False)
def test_extract_parametric_action_normalize(self):
self._test_extract_parametric_action(normalize=True)
def _test_extract_parametric_action(self, normalize):
extractor = PredictorFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
action_normalization_parameters=self.get_action_normalization_parameters(),
normalize=normalize,
)
# Setup
ws, net = self.create_ws_and_net(extractor)
input_record = net.input_record()
self.setup_float_features(ws, input_record.float_features)
# Run
ws.run(net)
res = extractor.extract(ws, input_record, net.output_record())
npt.assert_allclose(
self.expected_action_features(normalize),
res.action.float_features.numpy(),
rtol=1e-6,
)
npt.assert_allclose(
self.expected_state_features(normalize),
res.state.float_features.numpy(),
rtol=1e-6,
)
def test_create_net_sarsa_no_action(self):
self._test_create_net_sarsa_no_action(normalize=False)
def test_create_net_sarsa_no_action_normalize(self):
self._test_create_net_sarsa_no_action(normalize=True)
def _test_create_net_sarsa_no_action(self, normalize):
extractor = PredictorFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
normalize=normalize,
)
expected_input_record = schema.Struct(("float_features", map_schema()))
expected_output_record = schema.Struct(
("state:float_features", schema.Scalar())
)
self.check_create_net_spec(
extractor, expected_input_record, expected_output_record
)
def test_create_net_sarsa_no_action_with_sequence(self):
extractor = PredictorFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
normalize=True,
model_feature_config=rlt.ModelFeatureConfig(
id_mapping_config=self.id_mapping_config(),
sequence_features_type=SequenceFeatures,
float_feature_infos=[],
),
)
expected_input_record = schema.Struct(
("float_features", map_schema()),
("id_list_features", id_list_schema()),
("id_score_list_features", id_score_list_schema()),
)
expected_output_record = schema.Struct(
("state:float_features", schema.Scalar()),
("state:sequence_features:id_only:id_features:a_id", schema.Scalar()),
("state:sequence_features:id_only:id_features:b_id", schema.Scalar()),
("state:sequence_features:id_and_float:id_features:c_id", schema.Scalar()),
("state:sequence_features:id_and_float:float_features", schema.Scalar()),
("state:sequence_features:float_only:float_features", schema.Scalar()),
)
self.check_create_net_spec(
extractor, expected_input_record, expected_output_record
)
def test_create_net_parametric_action(self):
self._test_create_net_parametric_action(normalize=False)
def test_create_net_parametric_action_normalize(self):
self._test_create_net_parametric_action(normalize=True)
def _test_create_net_parametric_action(self, normalize):
extractor = PredictorFeatureExtractor(
state_normalization_parameters=self.get_state_normalization_parameters(),
action_normalization_parameters=self.get_action_normalization_parameters(),
normalize=normalize,
)
expected_input_record = schema.Struct(("float_features", map_schema()))
expected_output_record = schema.Struct(
("state:float_features", schema.Scalar()), ("action", schema.Scalar())
)
self.check_create_net_spec(
extractor, expected_input_record, expected_output_record
)
|
{"hexsha": "62d9e03ebfc781345b411924ee43757b6c4201f3", "size": 62107, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml/rl/test/preprocessing/test_feature_extractor.py", "max_stars_repo_name": "michaeltashman/Horizon", "max_stars_repo_head_hexsha": "ee310b34adeb807bbae379a6e1703d0f725f26a9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-30T06:15:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-30T06:15:20.000Z", "max_issues_repo_path": "ml/rl/test/preprocessing/test_feature_extractor.py", "max_issues_repo_name": "michaeltashman/Horizon", "max_issues_repo_head_hexsha": "ee310b34adeb807bbae379a6e1703d0f725f26a9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ml/rl/test/preprocessing/test_feature_extractor.py", "max_forks_repo_name": "michaeltashman/Horizon", "max_forks_repo_head_hexsha": "ee310b34adeb807bbae379a6e1703d0f725f26a9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-05T15:52:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-05T15:52:18.000Z", "avg_line_length": 41.4046666667, "max_line_length": 87, "alphanum_fraction": 0.6215241438, "include": true, "reason": "import numpy", "num_tokens": 13732}
|
import pandas as pd
from os import path
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import matplotlib.ticker as mticker
from cartopy.feature.nightshade import Nightshade
from datetime import datetime
import numpy as np
import shapely.geometry as sgeom
from scipy.ndimage.filters import gaussian_filter
from math import radians, atan2, sin, cos, sqrt
from matplotlib.image import imread
import spiceypy as spice
import main
def GlobePlotter(solarlat, solarlon, lattrace, lontrace):
midlat = np.mean(lattrace)
midlon = np.mean(lontrace)
projection = ccrs.Orthographic(midlon, midlat)
ax = plt.axes(projection=projection)
file_location = path.abspath(path.dirname(__file__))
path_to_pic = file_location + '/images/2k_mars.jpg'
source_proj = ccrs.PlateCarree()
ax.imshow(imread(path_to_pic), origin='upper', transform=source_proj,
extent=[-180, 180, -90, 90])
# plot the ground feature labels
df = pd.read_csv('mars.csv', encoding='latin-1')
df.sort_values(by=["Diameter"], inplace=True, ascending=False)
minlen = 30 # plot only the largest 30 objects on the surface of mars. Maybe update to the the best known
df = df.head(min(minlen, len(df)))
for index, row in df.iterrows():
text = row['Clean_Feature_Name']
x, y, s = row['Center_Longitude'], row['Center_Latitude'], row['Diameter']
ax.text(x, y, text, transform=ccrs.PlateCarree(),
ha='left', va='center', fontsize=8, color='#ebc334')
ax.scatter(x, y, transform=ccrs.PlateCarree(),
s=10, color='#ebc334', edgecolor='None', lw=0)
# adjusting the gridlines to fit the occtrace location [10 MIGHT BE TO COARSE]
# STILL NEED BETTER SOLUTION
minlat = (np.floor(np.min(lattrace)/10))*10
maxlat = (np.ceil(np.max(lattrace)/10))*10
minlon = (np.floor(np.min(lontrace)/10))*10
maxlon = (np.ceil(np.max(lontrace)/10))*10
track = sgeom.LineString(zip(lontrace, lattrace))
ax.add_geometries([track], source_proj,
edgecolor='#C852C8', linewidth=8, alpha=0.5, facecolor='none')
gl = ax.gridlines(crs=ccrs.PlateCarree(), linewidth=2,
color='k', alpha=0.2, linestyle='--', draw_labels=True)
gl.top_labels = False
gl.left_labels = False
gl.right_labels = False
gl.xlines = True
gl.ylocator = mticker.FixedLocator(
list(np.arange(int(minlat), int(maxlat), 10)))
gl.xlocator = mticker.FixedLocator(
list(np.arange(int(minlon), int(maxlon), 10)))
gl.xlabel_style = {'color': 'k'}
# <<<< this involves a custom nightshade function, go into the cartopy libs and edit to include lat lon
ax.add_feature(Nightshade(solarlat, solarlon))
# plot occultation track
ax.add_geometries([track], source_proj,
edgecolor='#C852C8', linewidth=8, alpha=0.5, facecolor='none')
plt.show()
def f():
here = path.abspath(path.dirname(__file__))
PathtoMetaKernel1 = here[:-39] + \
'/git/exomars2016/kernels/mk/em16_ops.tm'
PathtoMetaKernel2 = here[:-39] + \
'/git/mars-express/kernels/mk/MEX_OPS.tm'
print(PathtoMetaKernel1)
print(PathtoMetaKernel2)
spice.furnsh(PathtoMetaKernel1)
spice.furnsh(PathtoMetaKernel2)
sv = main.SpiceVariables()
et = 657605100
# get an actual occultation trace
[trace, altTrace] = main.occSurfaceTrace(et, sv)
#lattrace = np.linspace(10, 40, 10)
#lontrace = np.linspace(90, 170, 10)
# print('the starting locations are ', lontrace[1] + ',', lattrace[1])
lattrace = trace[:, 0]
lontrace = trace[:, 1]
print('the starting locations are ', lontrace[1], ',', lattrace[1])
GlobePlotter(20, 10, lattrace, lontrace)
if __name__ == '__main__':
f()
|
{"hexsha": "e6c6a5a8b83ab4e4449445ff1101893df71b76e9", "size": 3880, "ext": "py", "lang": "Python", "max_stars_repo_path": "freshmapping.py", "max_stars_repo_name": "JacobParrott/OccultationProfiler", "max_stars_repo_head_hexsha": "f537555d44239e29bbba69c83d2413eec14f9c88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-02-27T16:12:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-24T12:08:02.000Z", "max_issues_repo_path": "freshmapping.py", "max_issues_repo_name": "JacobParrott/Occultation-Geometry-Analyser", "max_issues_repo_head_hexsha": "f537555d44239e29bbba69c83d2413eec14f9c88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "freshmapping.py", "max_forks_repo_name": "JacobParrott/Occultation-Geometry-Analyser", "max_forks_repo_head_hexsha": "f537555d44239e29bbba69c83d2413eec14f9c88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3076923077, "max_line_length": 112, "alphanum_fraction": 0.6706185567, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1099}
|
import os
import MySQLdb
import os, sys, anydbm, time
#from config import datb, dataloc
#db = anydbm.open("./db/" + cluster,'c')
import lib
#lib.galextinct(cluster, db)
#db[sys.argv[0][:-3]] = 'Started/' + time.asctime()
spectype = 'full'
if len(sys.argv) > 2:
if sys.argv[2] == 'spec': spectype = 'spec'
listfile = []
import os
import MySQLdb
colnames = ['B','V','R','I','z']
kfile = open('lk.sm','w')
kfile.write("device postlandfile spec.ps\nerase macro read plotbpz zs\ndevice x11\n")
legendlist = []
varps = []
bl1 = 0
bl2 = 0
file = open(sys.argv[1],'r').readlines()
results = []
for line in file:
if line[0] != '#':
import re
res = re.split('\s+',line)
for i in range(len(res)):
print res[i],i
#results.append([float(res[2]),float(res[48])]) # OLD
results.append([float(res[2]),float(res[23])])
#raw_input()
diff = []
z = []
z_spec = []
print results[0:3]
for line in results:
diff_val = (line[0] - line[1])/(1 + line[1])
if 1==1: #(0.48 > float(line[1]) or float(line[1]) > 0.53):
print line, spectype
diff.append(diff_val)
z.append(line[0])
z_spec.append(line[1])
list = diff[:]
import pylab
from scipy import arange
a, b, varp = pylab.hist(diff,bins=arange(-0.2,0.2,0.016))
print a,b,varp
varps.append(varp[0])
#pylab.legend(varps,legendlist)
import scipy
diffB = []
for d in diff:
if abs(d) < 0.1:
diffB.append(d)
diff = diffB
list = scipy.array(diff)
mu = list.mean()
sigma = list.std()
print 'mu', mu
print 'sigma', sigma
#print 'std', scipy.std(a)
#print 'mean', scipy.mean(a)
from scipy import stats
#x = scipy.linspace(list.min(), list.max(), 100)
pdf = scipy.stats.norm.pdf(b, mu, sigma)
print 'pdf', pdf
#s = subplot(111)
height = scipy.array(a).max()
pylab.plot(b,len(diff)*pdf/pdf.sum(),'r')
pylab.xlabel("(PhotZ - SpecZ)/(1 + SpecZ)")
pylab.ylabel("Number of Galaxies")
pylab.savefig('RedshiftErrors.ps')
pylab.clf()
pylab.scatter(z_spec,diff)
pylab.xlim(0,1)
pylab.ylim(-0.5,0.5)
pylab.ylabel("(PhotZ - SpecZ)/(1 + SpecZ)")
pylab.xlabel("PhotZ")
pylab.savefig('RedshiftScatter.ps')
|
{"hexsha": "5047f4fe4c83b950445227a5fb82de08f547793c", "size": 2197, "ext": "py", "lang": "Python", "max_stars_repo_path": "mkplotsspecme.py", "max_stars_repo_name": "deapplegate/wtgpipeline", "max_stars_repo_head_hexsha": "9693e8562022cc97bf5a96427e22965e1a5e8497", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-15T04:01:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-15T04:01:19.000Z", "max_issues_repo_path": "mkplotsspecme.py", "max_issues_repo_name": "deapplegate/wtgpipeline", "max_issues_repo_head_hexsha": "9693e8562022cc97bf5a96427e22965e1a5e8497", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-12-11T00:11:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-09T17:05:16.000Z", "max_forks_repo_path": "mkplotsspecme.py", "max_forks_repo_name": "deapplegate/wtgpipeline", "max_forks_repo_head_hexsha": "9693e8562022cc97bf5a96427e22965e1a5e8497", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-08-15T21:19:11.000Z", "max_forks_repo_forks_event_max_datetime": "2017-10-12T00:36:35.000Z", "avg_line_length": 22.1919191919, "max_line_length": 85, "alphanum_fraction": 0.6058261265, "include": true, "reason": "import scipy,from scipy", "num_tokens": 695}
|
import json
import csv
import codecs
from collections import Counter, namedtuple
import numpy as np
from correlations import Correlations
#from analyzer import SortedThetas
SortedThetas = namedtuple('SortedThetas', 'thetas labels histogram correlations')
class ComplexDecoder(object):
'''Decodes json complex arrays to lists (or lists of lists etc.), not numpy.arrays.
Using np.array(result) will make np.arrays from these. There is no need to decode
all lists to numpy matrices, since that would require making all np.arrays
into special objects ala complex_array_json (below)!!
Shall be used as hook for read in DAO's json.loads.'''
def complex_number_decoder(self, obj):
data = obj["ComplexNumber"]
return data["Re"] + 1j * data["Im"]
def complex_array_decoder(self, obj):
data = obj["ComplexArray"]
real_array = np.array(data["Re"])
imag_array = np.array(data["Im"])
return real_array + 1j * imag_array
def decode(self, obj):
if "ComplexNumber" in obj:
return self.complex_number_decoder(obj)
if "ComplexArray" in obj:
return self.complex_array_decoder(obj)
return obj
class NumpyEncoder(json.JSONEncoder):
'''Encodes normal np objects to numbers or lists, whereas for
complex numbers and arrays special json formats will be used.'''
def complex_number_encoder(self, number):
return {"ComplexNumber": {"Re": number.real, "Im": number.imag}}
def complex_array_encoder(self, array):
return {"ComplexArray": {"Re": list(array.real), "Im": list(array.imag)}}
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.complex_, np.complex64, np.complex128)):
return self.complex_number_encoder(obj)
if isinstance(obj, (np.ndarray,)) and np.iscomplexobj(obj):
return self.complex_array_encoder(obj)
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class BaseDAO(object):
'''Writes json files of records in numpy format and reads
jsons into numpy/list formats'''
def __init__(self, path):
self._path = path
def read(self, file_name, path=None):
if path is None:
path = self._path
try:
with codecs.open(path+file_name, 'r', 'utf-8') as file_object:
return self._read(file_object)
except IOError:
print('Can\'t read the file called {}'.format(path+file_name))
raise
def _read(self, file_object):
return file_object.read()
def write(self, file_name, content, path=None):
if path is None:
path = self._path
try:
with open(path+file_name, 'w', newline='') as file_object:
self._write(file_object, content)
except IOError:
print('Can\'t write the file {}'.format(path+file_name))
raise
def _write(self, file_object, content):
return file_object.write(content)
class ThetasDAO(BaseDAO):
def __init__(self, path):
BaseDAO.__init__(self, path)
def _read(self, file_object):
decoder = ComplexDecoder()
raw_data = file_object.read()
return json.loads(raw_data, object_hook=decoder.decode)
def _write(self, file_object, content):
json.dump(content, file_object, ensure_ascii=False, cls=NumpyEncoder)
class SortedThetasDAO(BaseDAO):
def __init__(self, path):
BaseDAO.__init__(self, path)
def _to_histogram(self, groups):
amounts = {key: len(value) for key, value in groups.items()}
return Counter(amounts)
def _get_headers(self, dim):
return ("theta_" + str(index) for index in range(dim))
def _read(self, file_object):
reader = csv.reader(file_object, delimiter=',')
headers = next(reader)
dim = len(headers) - 1 - len(Correlations._fields)
thetas = {}
corrs = {}
labels = []
def process_row(row):
if len(row) != 0:
if row[0] != 'average':
label_index = int(row[0])
theta = np.array(row[1:dim + 1], dtype=np.float64)
thetas.setdefault(label_index, []).append(theta)
vals = [float(val) for val in row[dim+1:]]
corr = Correlations(*vals)
corrs.setdefault(label_index, []).append(corr)
elif row[0] == 'average':
labels.append(np.array(row[1:dim+1], dtype=np.float64))
for row in reader:
process_row(row)
hist = self._to_histogram(thetas)
return SortedThetas(thetas=thetas, labels=labels, histogram=hist, correlations=corrs)
def _write(self, file_object, sorted_thetas):
dim = sorted_thetas.thetas[0][0].shape[0]
headers = list(self._get_headers(dim))
fieldnames = ['Label'] + headers + list(Correlations._fields)
thetas_writer = csv.writer(file_object, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
thetas_writer.writerow(fieldnames)
for label_ind in sorted_thetas.thetas.keys():
thetas_and_corrs = zip(sorted_thetas.thetas[label_ind],
sorted_thetas.correlations[label_ind])
for theta, corrs in thetas_and_corrs:
row_content = [label_ind] + theta.tolist() + list(corrs._asdict().values())
thetas_writer.writerow(row_content)
thetas_writer.writerow(['average'] + sorted_thetas.labels[label_ind].tolist())
class ThetaGroupsDAO(BaseDAO):
'''Writes csv files of the analysis of the records and reads them'''
def __init__(self, path):
BaseDAO.__init__(self, path)
def _read(self, file_object):
reader = csv.reader(file_object, delimiter=',', quotechar='|')
headers = next(reader)
theta_groups = []
for row in reader:
if len(row) != 0:
theta_group = {}
theta_group["dimension"] = int(row[1])
theta_group["variances"] = np.array(row[2:], dtype=np.float64)
theta_groups.append(theta_group)
return theta_groups
def get_max_variances_dim(self, content):
variances = [np.diag(var) for W, var in content.values()]
var_amounts = map(len, variances)
return max(var_amounts)
def _write(self, file_object, content):
max_var_dim = self.get_max_variances_dim(content)
fieldnames = ['Label', 'Dimensions'] + ['var_'+str(index) for index in range(max_var_dim)]
thetas_writer = csv.writer(file_object, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
thetas_writer.writerow(fieldnames)
for label_index in content.keys():
variances = np.diag(content[label_index][0])
dim = len(variances)
thetas_writer.writerow([label_index, dim] + variances.tolist())
|
{"hexsha": "6f876350811ec7c6ebdaf94080bc717bda476a1b", "size": 7457, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dao.py", "max_stars_repo_name": "DogeMajor/GDFT", "max_stars_repo_head_hexsha": "bd84a8cef8d68f88c3c80de9936ee65ce85fcb40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dao.py", "max_issues_repo_name": "DogeMajor/GDFT", "max_issues_repo_head_hexsha": "bd84a8cef8d68f88c3c80de9936ee65ce85fcb40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dao.py", "max_forks_repo_name": "DogeMajor/GDFT", "max_forks_repo_head_hexsha": "bd84a8cef8d68f88c3c80de9936ee65ce85fcb40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1990291262, "max_line_length": 98, "alphanum_fraction": 0.6123105807, "include": true, "reason": "import numpy", "num_tokens": 1671}
|
import time
import argparse
import traceback
import numpy as np
import torch
from torch.utils.data import DataLoader
import networkx as nx
import dgl
from models import MLP, InteractionNet, PrepareLayer
from dataloader import MultiBodyGraphCollator, MultiBodyTrainDataset,\
MultiBodyValidDataset, MultiBodyTestDataset
from utils import make_video
def train(optimizer, loss_fn,reg_fn, model, prep, dataloader, lambda_reg, device):
total_loss = 0
model.train()
for i, (graph_batch, data_batch, label_batch) in enumerate(dataloader):
graph_batch = graph_batch.to(device)
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
optimizer.zero_grad()
node_feat, edge_feat = prep(graph_batch, data_batch)
dummy_relation = torch.zeros(edge_feat.shape[0], 1).float().to(device)
dummy_global = torch.zeros(node_feat.shape[0], 1).float().to(device)
v_pred, out_e = model(graph_batch, node_feat[:, 3:5].float(
), edge_feat.float(), dummy_global, dummy_relation)
loss = loss_fn(v_pred, label_batch)
total_loss += float(loss)
zero_target = torch.zeros_like(out_e)
loss = loss + lambda_reg*reg_fn(out_e, zero_target)
reg_loss = 0
for param in model.parameters():
reg_loss = reg_loss + lambda_reg * \
reg_fn(param, torch.zeros_like(
param).float().to(device))
loss = loss + reg_loss
loss.backward()
optimizer.step()
return total_loss/(i+1)
# One step evaluation
def eval(loss_fn, model, prep, dataloader, device):
total_loss = 0
model.eval()
for i, (graph_batch, data_batch, label_batch) in enumerate(dataloader):
graph_batch = graph_batch.to(device)
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
node_feat, edge_feat = prep(graph_batch, data_batch)
dummy_relation = torch.zeros(
edge_feat.shape[0], 1).float().to(device)
dummy_global = torch.zeros(
node_feat.shape[0], 1).float().to(device)
v_pred, _ = model(graph_batch, node_feat[:, 3:5].float(
), edge_feat.float(), dummy_global, dummy_relation)
loss = loss_fn(v_pred, label_batch)
total_loss += float(loss)
return total_loss/(i+1)
# Rollout Evaluation based in initial state
# Need to integrate
def eval_rollout(model, prep, initial_frame, n_object, device):
current_frame = initial_frame.to(device)
base_graph = nx.complete_graph(n_object)
graph = dgl.from_networkx(base_graph).to(device)
pos_buffer = []
model.eval()
for step in range(100):
node_feats, edge_feats = prep(graph, current_frame)
dummy_relation = torch.zeros(
edge_feats.shape[0], 1).float().to(device)
dummy_global = torch.zeros(
node_feats.shape[0], 1).float().to(device)
v_pred, _ = model(graph, node_feats[:, 3:5].float(
), edge_feats.float(), dummy_global, dummy_relation)
current_frame[:, [1, 2]] += v_pred*0.001
current_frame[:, 3:5] = v_pred
pos_buffer.append(current_frame[:, [1, 2]].cpu().numpy())
pos_buffer = np.vstack(pos_buffer).reshape(100, n_object, -1)
make_video(pos_buffer, 'video_model.mp4')
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--lr', type=float, default=0.001,
help='learning rate')
argparser.add_argument('--epochs', type=int, default=40000,
help='Number of epochs in training')
argparser.add_argument('--lambda_reg', type=float, default=0.001,
help='regularization weight')
argparser.add_argument('--gpu', type=int, default=-1,
help='gpu device code, -1 means cpu')
argparser.add_argument('--batch_size', type=int, default=100,
help='size of each mini batch')
argparser.add_argument('--num_workers', type=int, default=0,
help='number of workers for dataloading')
argparser.add_argument('--visualize', action='store_true', default=False,
help='Whether enable trajectory rollout mode for visualization')
args = argparser.parse_args()
# Select Device to be CPU or GPU
if args.gpu != -1:
device = torch.device('cuda:{}'.format(args.gpu))
else:
device = torch.device('cpu')
train_data = MultiBodyTrainDataset()
valid_data = MultiBodyValidDataset()
test_data = MultiBodyTestDataset()
collator = MultiBodyGraphCollator(train_data.n_particles)
train_dataloader = DataLoader(
train_data, args.batch_size, True, collate_fn=collator, num_workers=args.num_workers)
valid_dataloader = DataLoader(
valid_data, args.batch_size, True, collate_fn=collator, num_workers=args.num_workers)
test_full_dataloader = DataLoader(
test_data, args.batch_size, True, collate_fn=collator, num_workers=args.num_workers)
node_feats = 5
stat = {'median': torch.from_numpy(train_data.stat_median).to(device),
'max': torch.from_numpy(train_data.stat_max).to(device),
'min': torch.from_numpy(train_data.stat_min).to(device)}
print("Weight: ", train_data.stat_median[0],
train_data.stat_max[0], train_data.stat_min[0])
print("Position: ", train_data.stat_median[[
1, 2]], train_data.stat_max[[1, 2]], train_data.stat_min[[1, 2]])
print("Velocity: ", train_data.stat_median[[
3, 4]], train_data.stat_max[[3, 4]], train_data.stat_min[[3, 4]])
prepare_layer = PrepareLayer(node_feats, stat).to(device)
interaction_net = InteractionNet(node_feats, stat).to(device)
print(interaction_net)
optimizer = torch.optim.Adam(interaction_net.parameters(), lr=args.lr)
state_dict = interaction_net.state_dict()
loss_fn = torch.nn.MSELoss()
reg_fn = torch.nn.MSELoss(reduction='sum')
try:
for e in range(args.epochs):
last_t = time.time()
loss = train(optimizer, loss_fn,reg_fn, interaction_net,
prepare_layer, train_dataloader, args.lambda_reg, device)
print("Epoch time: ", time.time()-last_t)
if e % 1 == 0:
valid_loss = eval(loss_fn, interaction_net,
prepare_layer, valid_dataloader, device)
test_full_loss = eval(
loss_fn, interaction_net, prepare_layer, test_full_dataloader, device)
print("Epoch: {}.Loss: Valid: {} Full: {}".format(
e, valid_loss, test_full_loss))
except:
traceback.print_exc()
finally:
if args.visualize:
eval_rollout(interaction_net, prepare_layer,
test_data.first_frame, test_data.n_particles, device)
make_video(test_data.test_traj[:100, :, [1, 2]], 'video_truth.mp4')
|
{"hexsha": "103aa2fe2e1217139be35631e990fa4d9d552cd5", "size": 7027, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/pytorch/graphsim/train.py", "max_stars_repo_name": "ketyi/dgl", "max_stars_repo_head_hexsha": "a1b859c29b63a673c148d13231a49504740e0e01", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9516, "max_stars_repo_stars_event_min_datetime": "2018-12-08T22:11:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:04:33.000Z", "max_issues_repo_path": "examples/pytorch/graphsim/train.py", "max_issues_repo_name": "ketyi/dgl", "max_issues_repo_head_hexsha": "a1b859c29b63a673c148d13231a49504740e0e01", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2494, "max_issues_repo_issues_event_min_datetime": "2018-12-08T22:43:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:16:27.000Z", "max_forks_repo_path": "examples/pytorch/graphsim/train.py", "max_forks_repo_name": "ketyi/dgl", "max_forks_repo_head_hexsha": "a1b859c29b63a673c148d13231a49504740e0e01", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2529, "max_forks_repo_forks_event_min_datetime": "2018-12-08T22:56:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:07:41.000Z", "avg_line_length": 42.5878787879, "max_line_length": 93, "alphanum_fraction": 0.6449409421, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1627}
|
# Activation normalization from Kingma & Dhariwal (2018)
# Author: Philipp Witte, pwitte3@gatech.edu
# Date: January 2020
using InvertibleNetworks, LinearAlgebra, Test
# Input
nx = 64
ny = 64
k = 10
batchsize = 4
# Input image: nx x ny x k x batchsize
X = randn(Float32, nx, ny, k, batchsize)
Y = randn(Float32, nx, ny, k, batchsize)
# Activation normalization
AN = ActNorm(k; logdet=true)
# Test invertibility
Y_, logdet = AN.forward(X)
ΔY = Y_ - Y
# Backpropagation
ΔX, X_ = AN.backward(ΔY, Y_)
# Test invertibility
isapprox(norm(X - X_)/norm(X), 0f0, atol=1f-6)
|
{"hexsha": "dd7f4f049d21c2b5c332a7a67bab3d73af26de9d", "size": 572, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/layers/layer_actnorm.jl", "max_stars_repo_name": "PetersBas/InvertibleNetworks.jl", "max_stars_repo_head_hexsha": "c53dacf426ecd1381f79f297f6954e6695c515b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-05-26T02:30:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T12:58:09.000Z", "max_issues_repo_path": "examples/layers/layer_actnorm.jl", "max_issues_repo_name": "PetersBas/InvertibleNetworks.jl", "max_issues_repo_head_hexsha": "c53dacf426ecd1381f79f297f6954e6695c515b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2021-02-16T20:52:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T03:45:18.000Z", "max_forks_repo_path": "examples/layers/layer_actnorm.jl", "max_forks_repo_name": "PetersBas/InvertibleNetworks.jl", "max_forks_repo_head_hexsha": "c53dacf426ecd1381f79f297f6954e6695c515b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-04-22T14:43:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T19:44:39.000Z", "avg_line_length": 19.724137931, "max_line_length": 56, "alphanum_fraction": 0.7062937063, "num_tokens": 206}
|
#!/usr/bin/env python3
# -*- coding: latin-1 -*-
# HEREHEREHERE
# MAINMAINMAIN
#############################################################################
# fits2psqlraw
#
# ls -1 *fits > input.txt # 7421 files
# fits2psqlraw --list input.txt -D wayne -t myfits -c
#
# /home/git/clones/NGSL/data/stis_xxx/fits2psqlraw
#
#emacs helpers
# (insert (buffer-file-name))
#
# (ediff-current-file)
# (wg-python-fix-pdbrc)
# (find-file-other-frame "./.pdbrc")
# (wg-python-fix-pdbrc) # PDB DASH DEBUG end-comments
#
# (setq mypdbcmd (concat (buffer-file-name) "<args...>"))
# (progn (wg-python-fix-pdbrc) (pdb mypdbcmd))
#
# (wg-astroconda-pdb) # IRAF27
# (wg-astroconda3-pdb) # CONDA Python3
#
# (set-background-color "light blue")
# (wg-python-toc)
#
#
#
# (iv (setq tmp (/ 7421.0 (+ (* 60.0 8) 11.847 )))) 15.088 records per second
#############################################################################
import optparse # we're flexible about our sources etc.
import os
import re # idiots put single quotes in comments
import sys # because sys!
from astropy.io import fits # open file, deal with the header
import json # json data
import collections # use an ordered dict, keep cards in order
import pprint
from .utils import * # cleanpath,cleantarget,s2r,s2d,pquote
# (wg-python-graphics)
__doc__ = """
fits2psqlraw [options] files...
ls -1 \*fits > table.idx
fits2psqlraw -D mydb -t mynewtable --list table.idx
./fits2psqlraw -D wayne -t rawngslheaders --target=TARGNAME --index \*fits > rawngslheaders.psql
Given a FITS files on on the command line or -T <filename> with fits
files, one per line with blank lines and sharp to end of line ignored,
dig out the header(s) and make a json structure of the headers. Produce
a 'raw' '.psql' file capable of being loaded as a data script to stdout.
It is 'raw' in the sense that it captures the image as-is. CREATE/SELECT/JOIN
and/or INSERT/SELECT/JOIN can transliterate these raw data into the
main collection.
Multiple FITS extensions are supported. No FITS data embedded,
Tfree main questions reflected in the data record:
Where (ra,dec)
What (the target's name)
and the file's fully qualified original name.
A "-n, --basepath" switch gives a key directory name, default is
'Observations'. For the actual path to the file, say,
"/home/edwin/Observations/25Dec2012/focus/foc.0001.fits"
the fqpn will be "25Dec2012/focus/foc.0001.fits" with
NO leading slash.
The target name will have leading/trailing spaces
removed, intermediate spaces compressed into one
space.
Some 'Philisophy' is needed here.
The --ra <keyword> --dec <keyword> options take precedence and RA and
DEC keywords used by default if they exist, else NULL. (In some
reference images, there is no hint of the ra/dec only a target name).
The --target <keyword> takes presidence. If not the default OBJECT is
used if found, else NULL.
Support extended HDUs within the JSON structure
as { hdu0 : {header json} [, hdun : {header 1+ json} }.
This program sends output to stdout.
Command Line flags
==================
-b, --basepath str the part from / down to the base directory.
-c, --create bool create table and Q3C index
-t, --table str table name
-D, --database str database name
-v, --verbose bool be verbose about work
-w, --write str fqpath filename for a psql image
--list str file of files; one fqpathname per line
Keyword Fields
--------------
--ra str keyword of the RA field e.g.: TARGRA
--dec str keyword of the DEC field
--target str keyword of the Target's catalog name.
OK,,, a FITS header, ala AstroPY, is a 'card' consisting of a tuple of
3 things: a keyword, a value, and a comment. A Python dictionary is a
(key,value) pair. So we make an insert statement that carries a
PostgreSQL 'jsonb' datatype, with the 'key' from the fits files
together with a nested 'jsonb' structure consisting of 'value' and
'comment' for the card. The comment information carries important
data.
q3c_radial_query(tablera, tabledec, queryra, querydec, radiusdegrees)
Here are some sample queries:
.. code-block:: psql
:linenos:
select count(*) from myfits;
select ora,odec from myfits limit 10;
select fqpname,ora::numeric(7,5),odec::numeric(7,5) from myfits
where q3c_radial_query(ora, odec, 9.7419958, 48.3369, 0.1);
-- Acid test with timing examples
\\timing
select header->'OBJECT' ->> 'value' as "OBJECT",
fqpname,
ora::numeric(7,5) as "RA",
odec::numeric(7,5) as "Dec",
COALESCE(header->'FILTER' ->> 'value' as "Filter"), -- force NULL if json fails.
COALESCE(header->'DATE-OBS' ->> 'value' as "dateobs")
from myfits
where q3c_radial_query(ora, odec, 9.7419958, 48.3369, 0.1)
order by header->'FILTER'->> 'value',header->'DATE-OBS'->> 'value';
\\timing
.. csv-table:: "Query Example"
:header" "OBJECT", "fqpname", "RA", "Dec", "Filter", "dateobs"
:widths" 18,40,12,12,12,32
'NGC185' , elp1m008-kb74-20140525-0059-e90.fits , 9.74200 , 48.33700 , 'ip' , '2014-05-26T10:50:17.539'
'NGC185' , elp1m008-kb74-20140525-0060-e90.fits , 9.74200 , 48.33701 , 'ip' , '2014-05-26T10:52:14.272'
'NGC185' , elp1m008-kb74-20140624-0048-e90.fits , 9.74200 , 48.33700 , 'ip' , '2014-06-25T09:14:32.307'
'NGC185' , elp1m008-kb74-20140624-0049-e90.fits , 9.74199 , 48.33698 , 'ip' , '2014-06-25T09:16:28.901'
'NGC185' , elp1m008-kb74-20140919-0035-e90.fits , 9.74200 , 48.33699 , 'ip' , '2014-09-20T08:29:59.623'
'NGC185' , elp1m008-kb74-20140919-0036-e90.fits , 9.74200 , 48.33699 , 'ip' , '2014-09-20T08:31:56.280'
'NGC0185' , elp1m008-kb74-20141026-0084-e90.fits , 9.74199 , 48.33699 , 'ip' , '2014-10-27T01:59:41.961'
'NGC0185' , elp1m008-kb74-20141026-0085-e90.fits , 9.74198 , 48.33700 , 'ip' , '2014-10-27T02:01:39.030'
'NGC0185' , elp1m008-kb74-20141113-0148-e90.fits , 9.74201 , 48.33703 , 'ip' , '2014-11-14T08:04:45.533'
'NGC0185' , elp1m008-kb74-20141113-0149-e90.fits , 9.74199 , 48.33701 , 'ip' , '2014-11-14T08:06:41.930'
'NGC0185' , elp1m008-kb74-20141213-0212-e90.fits , 9.74200 , 48.33700 , 'ip' , '2014-12-14T06:05:21.695'
'NGC0185' , elp1m008-kb74-20141213-0213-e90.fits , 9.74199 , 48.33701 , 'ip' , '2014-12-14T06:07:18.217'
'NGC0185' , elp1m008-kb74-20150119-0112-e90.fits , 9.74199 , 48.33697 , 'ip' , '2015-01-20T03:35:00.511'
'NGC0185' , elp1m008-kb74-20150119-0113-e90.fits , 9.74199 , 48.33696 , 'ip' , '2015-01-20T03:36:57.501'
'NGC0185' , elp1m008-kb74-20141026-0086-e90.fits , 9.74200 , 48.33701 , 'rp' , '2014-10-27T02:03:46.954'
'NGC0185' , elp1m008-kb74-20141026-0087-e90.fits , 9.74201 , 48.33701 , 'rp' , '2014-10-27T02:05:43.386'
'NGC0185' , elp1m008-kb74-20141113-0150-e90.fits , 9.74200 , 48.33701 , 'rp' , '2014-11-14T08:08:50.869'
'NGC0185' , elp1m008-kb74-20141113-0151-e90.fits , 9.74200 , 48.33696 , 'rp' , '2014-11-14T08:10:47.421'
'NGC0185' , elp1m008-kb74-20141213-0214-e90.fits , 9.74200 , 48.33699 , 'rp' , '2014-12-14T06:09:26.973'
'NGC0185' , elp1m008-kb74-20141213-0215-e90.fits , 9.74201 , 48.33700 , 'rp' , '2014-12-14T06:11:24.220'
'NGC0185' , elp1m008-kb74-20150119-0114-e90.fits , 9.74201 , 48.33699 , 'rp' , '2015-01-20T03:39:05.758'
'NGC0185' , elp1m008-kb74-20150119-0115-e90.fits , 9.74200 , 48.33699 , 'rp' , '2015-01-20T03:41:03.008'
Time: 12.903 ms
Taking Sergey's advice, and re-timing the query:
.. code-block:: psql
:linenos:
\\timing
select header->'OBJECT' ->> 'value' as "OBJECT",
fqpname,
ora::numeric(7,5) as "RA",
odec::numeric(7,5) as "Dec",
header->'FILTER' ->> 'value' as "Filter",
header->'DATE-OBS' ->> 'value' as dateobs
from myfits
where q3c_join(9.7419958, 48.3369, ora, odec, 0.1)
order by header->'FILTER'->> 'value',header->'DATE-OBS'->> 'value';
\\timing
Time: 9.848 ms
% (iv (setq tmp (/ 12.903 9.848 ))) 1.31 speed up.
See https://github.com/segasai/q3c for details.
Notes
=====
The code to fix path names is here, just unused.
Target names are 'fixed' down to one space to stand the best chance
of a match with SIMBAD. Please use actual SIMBAD names.
The output is 'raw' for a reason, any changes to the filename,
or header values should come later. For example, a new header
may be created with campaign specific headers while the
old header's JSON structure may be retained.
"""
__author__ = 'Wayne Green'
__version__ = '1.0'
##############################################################################
# Main
# Regression Tests
##############################################################################
if __name__ == "__main__":
opts = optparse.OptionParser(usage="%prog "+__doc__)
# opts.add_option("-", "--", action="store", dest="",
# default=,
# help="<> .")
opts.add_option("-c", "--create", action="store_true", dest="create",
default=False,
help="<bool> create table.")
opts.add_option("-D", "--database", action="store", dest="database",
default='database',
help="<str> database name.")
opts.add_option("-i", "--index", action="store_true", dest="index",
default=False,
help="<bool> create table.")
opts.add_option("-s", "--schema", action="store", dest="schema",
default='public',
help="<str> schema name else public .")
opts.add_option("-t", "--table", action="store", dest="table",
default='table',
help="<str> table name.")
opts.add_option("-v", "--verbose", action="store_true", dest="verboseflag",
default=False,
help="<bool> be verbose about work.")
opts.add_option("-w", "--write", action="store", dest="write",
default=None,
help="<str> output file name for \\i <includepsqlfile>....")
opts.add_option("--basepath", action="store", dest="basepath",
default=None,
help="<str> remove the base from filename.")
opts.add_option("--list", action="store", dest="list",
default=None,
help="<str> file with 1 filename per line")
opts.add_option("--ra", action="store", dest="rakeyword",
default='RA',
help="<str> The keyword identifying the RA field")
opts.add_option("--dec", action="store", dest="deckeyword",
default='DEC',
help="<str> the keyword identifying the Dec field")
opts.add_option("--target", action="store", dest="targetkeyword",
default='OBJECT',
help="<str> the keyword identifying the target name field")
(options, args) = opts.parse_args()
###################################################################
# Load list of files (command line is only so big! Adds list
# to any found on the command line.
###################################################################
if(options.list is not None):
with open(options.list,'r') as f:
for l in f:
fname = l.split('#')[0].strip()
args.append(l.strip())
###################################################################
# Get the options local and formatted.
###################################################################
schema = options.schema
table = options.table # collec the options values.
database = options.database
rakeyword = options.rakeyword
deckeyword = options.deckeyword
targetkeyword = options.targetkeyword
basepath = options.basepath
###################################################################
# The create statement. Pretty simple.
# Make a raw table, then select from that into a working table
# if needed. Indexing at the bottom of this file.
###################################################################
createquery = """\\c {0}\n
CREATE SCHEMA IF NOT EXISTS {1};
DROP TABLE IF EXISTS {1}.{2} CASCADE;
DROP SEQUENCE IF EXISTS {1}.{2}_sequence;
CREATE SEQUENCE {1}.{2}_sequence START 100000;
CREATE TABLE {1}.{2} (
uniqueid integer PRIMARY KEY DEFAULT nextval('{2}_sequence'),
fqpname text, -- fully qualified path name
target text, -- the target name or NULL
ora double precision, -- raw ra (if we can find one)
odec double precision, -- raw dec (if we can find one)
nhdu integer, -- count of ndu in complex json field [0,1,...,n-1]
header jsonb -- json binary image of fits header
);
COMMENT ON TABLE {1}.{2} is 'Raw FITS file name and its header.';
COMMENT ON COLUMN {1}.{2}.fqpname is 'fully qualified path name';
COMMENT ON COLUMN {1}.{2}.target is 'Cleaned target name';
COMMENT ON COLUMN {1}.{2}.ora is 'ra [decimal degrees]';
COMMENT ON COLUMN {1}.{2}.odec is 'dec [decimal degrees]';
COMMENT ON COLUMN {1}.{2}.nhdu is 'number of HDUs';
COMMENT ON COLUMN {1}.{2}.header is 'PostgreSQL jsonb header';
""".format(database,schema,table) # database extablished with connection
insertstmt = "INSERT INTO {}.".format(schema) + "{} (ora, odec, target, fqpname, nhdu, header) values ( {}, {}, '{}', '{}', {}, '{}' );\n"
ofile = sys.stdout # output file as needed
if(options.write is not None): # default the output to stdout
ofile = open(options.write,'w')
print(createquery,file=ofile) # get the top part of psql file out
###################################################################
# Process each file.
# Assume only one HDU perfile, Ignore the data.
###################################################################
# MAINMAINMAIN
msgs = []
for filename in args: # PDB-DEBUG
try:
fqpn = os.path.abspath(filename) # determine a fqpn
if(basepath is not None):
parts = fqpn.split(basepath)
fqpn = '/'.join(parts[1:]) # all but the first one
if(fqpn[0] is '/'):
fqpn = fqpn[1:] # do not permit leading slash
records = collections.OrderedDict() # all the hdu's for this file.
tblvalues = []
if(options.verboseflag):
print("File {}".format(filename))
f = fits.open(filename)
raval = decval = target = 'NULL' # grab the first hint of a RA/DEC This file.
for hduidx,hdu in enumerate(f): # the list of hdus, get one.
h = hdu.header
history = [] # initialize structures for this file
comment = [] # aggregate history anc comments for file
myheader = collections.OrderedDict() # collect json bound header's values
################################################################
# Load up an ordered dictionary for the cards, aggregate the
# history and comments in order.
################################################################
for c in h.cards:
key = c.keyword
if(type(c.value) is type("") and "'" in c.value):
c.value = _requote.sub(",",c.value)
elif('Undefined' in "{}".format(type(c.value))):
c.value = 'NULL' # PDB-DEBUG
msgs.append("Warning File {:s}[{:d}] keyword {:s} undefined, set to NULL".format(filename,hduidx,c.keyword))
if(type(c.comment) is type("") and "'" in c.comment):
c.comment = _requote.sub("",c.comment)
if(options.verboseflag): print("|{}| |{}| ".format(key,c.value))
if(key == 'HISTORY'):
history.append(c.value)
if(options.verboseflag):
print("HISTORY keyword |{}| value |{}| c.comment |{}|".format(key, c.value, c.comment))
elif(key == 'COMMENT'):
history.append(c.value)
if(options.verboseflag):
print("COMMENT keyword |{}| value |{}| c.comment |{}|".format(key, c.value, c.comment))
elif(key != ''): # the tail end of a 'block', Astropy's interesting artifact!
myheader[key] = {'value' : pquote(c.value), 'comment' : pquote(c.comment)} # HEREHEREHERE
#myheader[key] = '{'+ "{},{}".format(pquote(c.value), pquote(c.comment)) + '}'
#myheader[key] = '{{"value" : {}, "comment" : {}}}'.format(c.value,c.comment) # examples
if(key == rakeyword):
if(type(c.value) == type('str')):
myheader[key] = s2r(c.value) # change to decimal if ra
else:
myheader[key] = c.value
elif(key == deckeyword):
if(type(c.value) == type('str')):
myheader[key] = s2d(c.value) # or dec
else:
myheader[key] = c.value
elif(key == targetkeyword):
if(type(c.value)):
myheader[targetkeyword] = c.value # or dec
if(len(history) != 0): # add any history statements
myheader['HISTORY'] = "HISTORY "+ ",HISTORY ".join(history)
if(len(comment) != 0): # add any comment statements
myheader['COMMENT'] = "COMMENT " + ",COMMENT ".join(comment)
# end of cards for one hdu element of a file.
if(rakeyword in myheader and raval == 'NULL'): # pick up the dfirst one
raval = "{:8.5f}".format(myheader[rakeyword])
if(deckeyword in myheader and decval == 'NULL'):
decval = "{:8.5f}".format(myheader[deckeyword]) # string as raw
if(targetkeyword in myheader and target == 'NULL'):
target = "{}".format(myheader[targetkeyword])
if(len(tblvalues) == 0):
tblvalues = [raval,decval,fqpn]
records['HDU[{}]'.format(hduidx)] = myheader
# records is an ordered dict, by 'HDU[{}]' arrays raval,decval,filename,myheader
hduidx += 1 # hduidx bump to cardinal number
jd = json.dumps(records) # make the json part.
insertquery = insertstmt.format(table,raval,decval,target,filename,hduidx,jd)
print(insertquery,file=ofile)
# end of this filename's header
except Exception as e:
print ("Exception with {:s}[{:d}] - {:s}".format(filename, hduidx, e.__str__()),file=sys.stderr)
print("Card {}".format(c),file=sys.stderr)
pprint.pprint(records,stream=sys.stderr,indent=3)
###################################################################
# Prepare the indexing.
###################################################################
q3cquery = """
CREATE INDEX ON {0} (public.q3c_ang2ipix(ora,odec)); -- {0}_q3c_ang2ipix_idx
CLUSTER {0}_q3c_ang2ipix_idx ON {0};
ANALYZE {0};
"""
if(options.index):
print(q3cquery.format(table),file=ofile)
print("commit",file=ofile)
ofile.close()
if(len(msgs) != 0):
print('\n'.join(msgs),file=sys.stderr)
"""
select target, r2s(ora) as "RA", d2s(odec) as "DEC",
quote_literal(header -> 'HDU[0]' -> 'GRATING' ->> 'value') as "Grating",
quote_literal(header -> 'HDU[0]' -> 'SPECTYPE' ->> 'value') as "SpType"
from rawngslheaders
where header -> 'HDU[0]' -> 'SPECTYPE' ->> 'value' = 'composite'
order by ora::integer/15,odec
;
select jsonb_object_keys(hdu) from
(select header -> 'HDU[0]' as "hdu"
from rawngslheaders
limit 1) xx
;
select jsonb_object_keys(hdu) from
(select header -> 'HDU[1]' as "hdu"
from rawngslheaders
limit 1) xx
;
-- Tie in the SIMBAD data with topcat
select ora, odec from rawngslheaders;
"""
|
{"hexsha": "3afcd69afafc11987da8a83ec26f5ae14e388198", "size": 20802, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/fits2psqlraw.py", "max_stars_repo_name": "The-SMTSci/NGSL", "max_stars_repo_head_hexsha": "dbdad80ef521811387eeaafd90821b07df2fe6b0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py/fits2psqlraw.py", "max_issues_repo_name": "The-SMTSci/NGSL", "max_issues_repo_head_hexsha": "dbdad80ef521811387eeaafd90821b07df2fe6b0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py/fits2psqlraw.py", "max_forks_repo_name": "The-SMTSci/NGSL", "max_forks_repo_head_hexsha": "dbdad80ef521811387eeaafd90821b07df2fe6b0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.979338843, "max_line_length": 144, "alphanum_fraction": 0.5499471205, "include": true, "reason": "from astropy", "num_tokens": 5791}
|
from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pycpd import deformable_registration
import numpy as np
import time
def visualize(iteration, error, X, Y, ax):
plt.cla()
ax.scatter(X[:,0], X[:,1], X[:,2], color='red', label='Target')
ax.scatter(Y[:,0], Y[:,1], Y[:,2], color='blue', label='Source')
ax.text2D(0.87, 0.92, 'Iteration: {:d}\nError: {:06.4f}'.format(iteration, error), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
ax.legend(loc='upper left', fontsize='x-large')
plt.draw()
plt.pause(0.001)
def main():
fish_target = np.loadtxt('data/fish_target.txt')
X1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
X1[:,:-1] = fish_target
X2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
X2[:,:-1] = fish_target
X = np.vstack((X1, X2))
fish_source = np.loadtxt('data/fish_source.txt')
Y1 = np.zeros((fish_source.shape[0], fish_source.shape[1] + 1))
Y1[:,:-1] = fish_source
Y2 = np.ones((fish_source.shape[0], fish_source.shape[1] + 1))
Y2[:,:-1] = fish_source
Y = np.vstack((Y1, Y2))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
callback = partial(visualize, ax=ax)
reg = deformable_registration(**{ 'X': X, 'Y': Y })
reg.register(callback)
plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "0a5779d986326eef36e40101288e5afed58116e8", "size": 1457, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/fish_deformable_3D.py", "max_stars_repo_name": "KingDeng005/pycpd", "max_stars_repo_head_hexsha": "a2d383d5aee96b2e0fc4d5efa238efb57f8536c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-12T15:06:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-12T15:06:32.000Z", "max_issues_repo_path": "examples/fish_deformable_3D.py", "max_issues_repo_name": "KingDeng005/pycpd", "max_issues_repo_head_hexsha": "a2d383d5aee96b2e0fc4d5efa238efb57f8536c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/fish_deformable_3D.py", "max_forks_repo_name": "KingDeng005/pycpd", "max_forks_repo_head_hexsha": "a2d383d5aee96b2e0fc4d5efa238efb57f8536c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-29T04:46:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-29T04:46:22.000Z", "avg_line_length": 34.6904761905, "max_line_length": 188, "alphanum_fraction": 0.6403568977, "include": true, "reason": "import numpy", "num_tokens": 446}
|
!==============================================================================!
subroutine Backup_Mod_Write_Variable(fh, disp, vc, var_name, var)
!------------------------------------------------------------------------------!
! Writes a whole variable to backup file. !
!------------------------------------------------------------------------------!
!----------------------------------[Modules]-----------------------------------!
use Comm_Mod
use Grid_Mod
use Var_Mod
!------------------------------------------------------------------------------!
implicit none
!---------------------------------[Arguments]----------------------------------!
integer :: fh, disp, vc
character(len=*) :: var_name
type(Var_Type) :: var
!-----------------------------------[Locals]-----------------------------------!
character(len=80) :: vn
integer :: vs ! variable size
!==============================================================================!
if(this_proc < 2) print *, '# Writing variable: ', trim(var_name)
! Increase variable count
vc = vc + 1
! Vector without boundaries
vn = var_name; call Comm_Mod_Write_Text(fh, vn, disp)
vs = (2*nc_t + 2*nb_t) * SIZE_REAL; call Comm_Mod_Write_Int (fh, vs, disp)
call Comm_Mod_Write_Cell_Real(fh, var % n(1:nc_s), disp)
call Comm_Mod_Write_Bnd_Real (fh, var % n(-nb_s:-1), disp)
call Comm_Mod_Write_Bnd_Real (fh, var % q(-nb_s:-1), disp)
call Comm_Mod_Write_Cell_Real(fh, var % o(1:nc_s), disp)
end subroutine
|
{"hexsha": "81d5d1d2a17b042e0848a5e432dd9a72831ca44b", "size": 1578, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Sources/Process/Backup_Mod/Write_Variable.f90", "max_stars_repo_name": "MassimoBorrelliPhysicist/T-Flows", "max_stars_repo_head_hexsha": "48c330f1e9576bf0ef79edbee1b7bc09092e0706", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Sources/Process/Backup_Mod/Write_Variable.f90", "max_issues_repo_name": "MassimoBorrelliPhysicist/T-Flows", "max_issues_repo_head_hexsha": "48c330f1e9576bf0ef79edbee1b7bc09092e0706", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/Process/Backup_Mod/Write_Variable.f90", "max_forks_repo_name": "MassimoBorrelliPhysicist/T-Flows", "max_forks_repo_head_hexsha": "48c330f1e9576bf0ef79edbee1b7bc09092e0706", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8333333333, "max_line_length": 80, "alphanum_fraction": 0.3846641318, "num_tokens": 320}
|
from typing import Optional
import gym
import numpy as np
import pytest
from gym.spaces import Box, Dict, Discrete
from gym.utils.env_checker import check_env
class ActionDictTestEnv(gym.Env):
action_space = Dict({"position": Discrete(1), "velocity": Discrete(1)})
observation_space = Box(low=-1.0, high=2.0, shape=(3,), dtype=np.float32)
def step(self, action):
observation = np.array([1.0, 1.5, 0.5])
reward = 1
done = True
return observation, reward, done
def reset(self, seed: Optional[int] = None):
super().reset(seed=seed)
return np.array([1.0, 1.5, 0.5])
def render(self, mode="human"):
pass
def test_check_env_dict_action():
# Environment.step() only returns 3 values: obs, reward, done. Not info!
test_env = ActionDictTestEnv()
with pytest.raises(AssertionError) as errorinfo:
check_env(env=test_env, warn=True)
assert (
str(errorinfo.value)
== "The `step()` method must return four values: obs, reward, done, info"
)
|
{"hexsha": "32dd2f7b89c3e6f9a70cbd84209c9c886bf94507", "size": 1073, "ext": "py", "lang": "Python", "max_stars_repo_path": "libs/gym/tests/utils/test_env_checker.py", "max_stars_repo_name": "maxgold/icml22", "max_stars_repo_head_hexsha": "49f026dd2314091639b52f5b8364a29e8000b738", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libs/gym/tests/utils/test_env_checker.py", "max_issues_repo_name": "maxgold/icml22", "max_issues_repo_head_hexsha": "49f026dd2314091639b52f5b8364a29e8000b738", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/gym/tests/utils/test_env_checker.py", "max_forks_repo_name": "maxgold/icml22", "max_forks_repo_head_hexsha": "49f026dd2314091639b52f5b8364a29e8000b738", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5128205128, "max_line_length": 85, "alphanum_fraction": 0.6402609506, "include": true, "reason": "import numpy", "num_tokens": 283}
|
%%%%% CPLOP %%%%%
\section{\cploplong{}}\label{sec:background:cplop}
This section details the aspects of \cploplong{} (\cplop{}) relevant to \mstlong{} (\mst{}).
It explains the nature of \pyros{} and the \pyro{}ing process, including what segments of \ecoli{} \dna{} \cplop{} researchers use and how they collect the \isols{} used in the process.
Finally, it describes the steps necessary to properly compare two \ecoli{} \isols{} for strain identification and \mst{} and provides an overview of how \cplop{} stores the \pyro{} data to facilitate it.
%%%%% PYROPRINTS %%%%%
\subsection{\Pyros{}}\label{sec:pyroprints}
\Pyros{} are the core data structure in \cplop{} used to represent \ecoli{} \isols{}.
\index{\pyro{}}
Using an inexpensive \dna{} sequencing technique called pyrosequencing, we can build a fingerprint\footnote{hence, the portmanteau ``\pyro{}''} that allows us to effectively differentiate between \ecoli{} strains.
Building \pyros{} requires careful use of the pyrosequencing process.
Pyrosequencing is a \dna{} sequencing technique appropriate for sequencing short \dna{} fragments (up to around 150-200 base pairs) \cite{ronaghi1998sequencing}.
A machine dispenses a predefined series of nucleotides and carefully measures the light output of the reaction.
The amount of light emitted is directly proportional to the amount of the corresponding nucleotide present in the \dna{}.
The output of the machine is a time series graph depicting the measured light before and after each dispensation, called a pyrogram.
Previous work in \cite{montana2013algorithms, Shealy:SeniorProject} helped us determine the optimal dispensation order and length and exactly which portions of this graph to use.
Determining the optimal dispensation order and its length are crucial to effective sequencing: too few dispensations may not encode enough information, while too many may degrade the quality of the data.
A \textit{\pyro{}} is a vector representing the peak light values of the pyrosequencing of one of the \itsshort{} regions in the seven loci of the \ecoli{} genome.
\index{\pyro{}}
As explained in \autoref{sec:its}, \Ssixt{} and \Sfive{} offer keen insight into \ecoli{} strains, since random variation can occur in them without affecting the survivability of the \ecoli{} microbes.
We \pyro{} each \itsshort{} separately, building at least two \pyros{} for each \isol{}: one for \Ssixt{} and another for \Sfive{}.
%%%%% ITS %%%%%
\subsection{\ITSlong{}s}\label{sec:its}
Choosing the proper region of \dna{} to fingerprint is crucial to effective strain delineation.
Generally, when fingerprinting \fib{}, researchers avoid using regions that code for functional products and focus instead on non-coding regions of \dna{}, since variations within do no affect the survivability of the microbe.
When differentiating between \ecoli{} strains, researchers use the two \itsshort{} regions between three genes: \Gsixt{}, \Gtwen{}, \Gfive{}.
The \Gsixt{}, \Gtwen{}, and \Gfive{} \rrnalong{}s (\rrna{}) are genes that help code for proteins in \ecoli{} bacteria.
\index{\Gsixt{}}
\index{\Gtwen{}}
\index{\Gfive{}}
\index{\rrna{}}
\index{\rrnalong{}}
Any changes to these regions may affect the rate or nature of protein synthesis and thus affect the survivability of the bacteria.
As a result, we consider these regions to remain \textit{conserved} across \ecoli{} strains.
\index{conserved}
Using these segments directly to differentiate between strains would be fruitless, since even wildly different \ecoli{} strains will still have nearly identical copies of these three regions.
Between these three genes are two non-coding, and thus \textit{unconserved}, regions, each called an \itslong{} (\itsshort{}).
\index{unconserved}
\index{\itslong{}}
\index{\itsshort{}}
Since \itsshort{} do not code for functional products, random variations occur in \itsshort{} regions that do not affect the survivability or reproducibility of the bacteria.
Researchers frequently use \itsshort{} for strain delineation, due to their unconserved nature.
Importantly, any offspring of a microbe inherit the \Ssixt{} and \Sfive{} regions, allowing biologists to use them to differentiate strains \cite{SolimanDVMBNWKG12}.
The two \itsshort{} regions that bridge \Ssixtname{} and \Sfivename{} we respectively refer to as \Ssixt{} and \Sfive{}.
\index{\Ssixtname{}}
\index{\Sfivename{}}
\index{\Ssixt{}}
\index{\Sfive{}}
Amplifying the \itsshort{} regions of \dna{} becomes a straightforward and inexpensive process, due to the highly conserved regions flanking each \itsshort{}.
Primers can reliably attach to the \rrna{} immediately next to each \itsshort{}, because of their conserved nature, allowing for \pcrlong{} (\pcr{}) amplification of the \Ssixt{} and \Sfive{} regions.
\index{\pcrlong{}}
\index{\pcr{}}
\index{\pcr{} amplification}
Applying \pcr{} to an \ecoli{} \isol{} requires awareness of the following observation, crucially affecting how we can interpret a fingerprint:
%\begin{principle}[Repeated Loci\index{Repeated Loci Principle}]
\begin{quote}
The \itsshort{} regions of \ecoli{} and the \rrna{} --- referred to collectively as \textit{loci} --- repeat around the \ecoli{} genome seven times.
\index{loci}
\end{quote}
%\end{principle}
\begin{figure}
\centering
\input{figures/regions.tex}
\caption{A diagram of a simplified segment of \ecoli{} DNA, outlining the \Ssixt{} and \Sfive{} \itsshort{} regions, which repeat 7 times around the \ecoli{} genome.}
\label{fig:regionsdiagram}
\end{figure}
\autoref{fig:regionsdiagram} depicts these seven loci and the relative position of the \itsshort{} regions between the \rrna{}.
The primers used to attach to the \rrna{} attach to each of the seven instances, resulting in \pcr{} amplification of all seven copies of \Ssixt{} and \Sfive{}.
What results from \pcr{} is an amplified mixture of these seven unconserved \itsshort{} regions that we can use as a fingerprint for the \isol{}.
The repetition of the \itsshort{} regions makes \pyros{} different from traditional pyrosequencing.
Traditional pyrosequencing allows researchers to figure out the sequence of nucleotides that make up the segment of \dna{}, because they only pyrosequence the \pcr{} amplification of a single segment --- or multiple conserved segments --- at a time.
\Pyro{}ing considers the \pcr{} amplification of more than one unconserved segment of \dna{} at a time, encoding more information with a single \pyro{}.
As a result, \cplop{} researchers cannot use a \pyro{} to figure out the nucleotide sequence of the \pyro{}ed \itsshort{} region.
%%%%% ISOLATES %%%%%
\subsection{Obtaining \& Comparing \ecolilong{} \Isols{}}\label{sec:background:isolates}
\cp{} students collect \ecolilong{} (\ecoli{}) \isols{} from the fecal samples of a variety of different sources and compare them for a multitude of different studies.
Culturing and \ecoli{} extraction occurs in an introductory cell and molecular biology class.
Comparing two \isols{} requires separate consideration of each \itsshort{} region.
The data stored in \cplop{} are obtained as follows.
Biologists collect fecal samples from a host-subject of a known species.
They extract \ecoli{} and culture individual bacterial cells from the bacterial material contained in each sample.
A bacterial \textit{\isol{}} is an individual culture grown from a fecal sample, as shown in \autoref{fig:isolate:petri}.
\index{\isol{}}
Each \isol{} undergoes \pcr{} to amplify the \dna{} in the two \itsshort{} regions of \dna{}, after which the pyrosequencing of each region produces \pyros{} that are stored in the \cplop{} database.
\begin{figure}
\centering
\begin{tikzpicture}
%[remember picture,overlay, shift={(current page.center)}]
\node (petri) at (0,0)
{\includegraphics[width=5cm]{figures/petri}};
\node[above right of=petri] (isolate_text) [xshift=3cm, yshift=2cm]
{\Isol{}};
\path[->] (isolate_text) edge [bend left, line width=1pt] (.60,-0.04) ;
\end{tikzpicture}
\caption{In order to obtain an \isol{}, researchers streak fecal matter onto this dish and make dots from the streaks that they then culture.}
\label{fig:isolate:petri}
\end{figure}
Sources of \isols{} in \cplop{} are often animal species, but include many \isols{} cultured from environmental sources, like creeks and the ocean.
A large portion of \cplop{} consists of \isols{} derived from Cow and Human sources.
The disproportionate number of Cows in \cplop{} is due to a study investigating the strain demographics and transmission in cattle \cite{dillard2015demographics, dillard2013coli}.
Every year, \cp{} houses cattle from around the state starting in May for testing and vaccination before they leave for auction in September.
\cp{} researchers obtained fecal sample from every cow as they arrived and when they departed, comparing the \isols{} for similarity before and after cohabitation.
\Isols{} derived from Humans make up a large proportion of \cplop{} because \cp{} students investigated \ecoli{} strain characteristics in a variety of studies \cite{montana2012investigating, neal2012demographics, neal2013escherichia}.
Such disproportionate representation of \spec{} in library-based \mst{} is a common problem and \autoref{sec:results:krap} discusses the effect with respect to \cplop{}.
The nature of separately \pyroing{} the \Ssixt{} and \Sfive{} regions of an \isol{} requires adherence to the following principle:
\begin{principle}[Comparing \Isols{}\index{Principle of Comparing \Isols{}}]\label{principle:comparing-isolates}
The only valid comparison between \isols{} using the a comparison metric \pcfunclabel{} is a separate comparison of the \Ssixt{} \pyros{} using \pcfunclabel{} and \Sfive{} \pyros{} using \pcfunclabel{}:
Given two isolates \isola{} and \isolb{}, where
\isola{} has \pyros{} \isolasixt{} and \isolafive{}
and
\isolb{} has \pyros{} \isolbsixt{} and \isolbfive{},
\pcisolsixt{}
and
\pcisolfive{}
are the only valid comparisons between the two \isols{}.
\end{principle}
\index{comparing \isols{}}
\index{\pearson{}}
\index{\pcfunclabel{}}
\noindent
In other words, in order to compare two \isols{}, one must consider the \pyros{} of each \itsshort{} region separately, because these \pyros{} represent different regions of \dna{}.
\autoref{fig:isolcompare} depicts the comparison process.
Comparing an \Ssixt{} \pyro{} to an \Sfive{} \pyro{} with \pcfunclabel{} is completely meaningless, since the two \pyros{} represent different segments of \dna{} in the \ecoli{}.
%%%%% PEARSON %%%%%
\subsection{\Pearson{}}
Strain delineation underlies the goals of \cplop{} --- the ability to encode the concept of strains of the \fib{} \ecoli{} and distinguish between different ones is fundamental to library-based \mst{}.
Towards that end, \cplop{} researchers need some way to compare \isols{} using the \pyros{} of their \itsshort{} regions that effectively distinguishes between different strains.
Picking the right comparison function to compare the \pyros{} of two \isols{} is crucial and we found that the \Pearson{} is an effective metric.
%%%%% DEFINITION
Given two \numdims{}-dimensional vectors, $\pcveca{}=\fullvec{\pca{}}{\numdims}$ and $\pcvecb{}=\fullvec{\pcb{}}{\numdims}$, the \pearson{}
%(\pcshort{})
\pcfunclabel{} is:
\begin{equation}\label{eq:pearson}
\pcfunc{\pca{}}{\pcb{}}
=
\pceric{\pca{}}{\pcb{}}
=
\frac{\veccov{\pca{}}{\pcb{}}}{\vecstddev{\pca{}}\cdot\vecstddev{\pcb}}
\end{equation}
where $\mu_{\pcveca{}}$, $\mu_{\pcvecb{}}$ are the means of $\pca{}_i$'s and $\pcb{}_i$'s respectively,
$\vecstddev{\pca{}}$, $\vecstddev{\pcb{}}$ are their standard deviations,
and \veccov{\pca{}}{\pcb{}} is the covariance between the two vectors.
\index{\pearson{}}
\index{\pcfunclabel{}}
\index{\pcshort{}}
The \pearson{} encodes a notion of similarity:
as the final portion of \autoref{eq:pearson} shows, \pcshort{} calculates the covariance of the two vectors, normalizing the value by the standard deviation of both.
The \textit{covariance} of two vectors measures the joint variability of the vectors, i.e. how much one vector varies with respect to another.
Given two \numdims{}-dimensional vectors, $\pcveca{}=\fullvec{\pca{}}{\numdims}$ and $\pcvecb{}=\fullvec{\pcb{}}{\numdims}$:
\begin{equation}
\veccov{\pca{}}{\pcb{}} = \covfull{\pca{}}{\pcb{}}
\end{equation}
\index{covariance}
Positive covariance between two vectors means the two behave similarly, negative means they behave in the opposite manner, and zero means they behave independently of one another.
The \textit{standard deviation} measures the amount of deviation a set of values has from its average.
Given a \numdims{}-dimensional vector $\pcveca{}=\fullvec{\pca{}}{\numdims}$, its standard deviation is:
\begin{equation}
\vecstddev{\pca{}} = \stddevfull{\pca{}}
\end{equation}
\index{standard deviation}
One can also define the standard deviation as the square root of the covariance:
\begin{equation}
\vecstddev{\pca{}} = \sqrt{\veccov{\pca{}}{\pca{}}}
\end{equation}
Using the \pearson{} as a measure of similarity is straightforward, due to it being a combination of the covariance and the standard deviation.
Since one can define the covariance of a vector with itself in terms of the standard deviation:
\begin{equation}
\veccov{\pca{}}{\pca{}} = \vecstddev{\pca{}}^2
\end{equation}
it is clear that given two vectors, \pcveca{} and $\pcveca{}\,'$, where $\pcveca{} = \pcvecaa{}$:
\begin{equation}
\pcfunclabel({\pcveca{}},{\pcvecaa{}})
= \frac{\cov{\pcveca{}}{\pcvecaa{}}}{\stddev{\pcveca{}}\stddev{\pcvecaa{}}}
= \frac{\cov{\pcveca{}}{\pcveca{}}}{\stddev{\pcveca{}}\stddev{\pcveca{}}}
= \frac{\stddev{\pcveca{}}^2}{\stddev{\pcveca{}}^2}
= 1
\end{equation}
Due to the normalizing effect of the $\sigma{}$'s, it is impossible for $\pcfunclabel{} > 1$.
Conversely, two very dissimilar vectors will have a $\pcfunclabel{} < 1$.
Specifically, \pearson{} measures a linear correlation between vectors, so two vectors with a $\pcfunclabel{} = 0$ means that the two vectors have no linear relationship\footnote{For completeness, if $\pcfunclabel{} \approx -1$, then there is an \textit{inverse correlation} between the two vectors. It is easy to see if
given $\pcveca{} = \fullvec{\pca{}}{\numdims{}}$
and $\pcvecaa{}=\fullvec{\pca{}'}{\numdims{}} = \fullvec{-\pca{}}{\numdims{}}$,
then $\cov{\pcveca{}}{\pcvecaa{}} = -\veccov{\pca{}}{\pca{}}\Rightarrow\pcfunclabel{(\pcveca{}, \pcvecaa{}) = -\pcfunc{\pca{}}{\pca{}}} = -1$. By similar reasoning, $-1 \leq \pcfunclabel{} \leq 1$
}.
Work done in \cite{Shealy:SeniorProject} determined that multiple \pyros{} of the same \isol{} obtain a $\pcfunclabel{} > 0.995$ and \cplop{} researchers use this value for quality control \cite{Black2014121, kent2014pyroprinting}.
It is from this measure of similarity \pcfunclabel{} that we define a comparison metric between \pyros{}.
The nature of this function works perfectly for what \pyros{} encode.
The values in a \pyro{} represent peak light intensity values from chemical reactions, the intensity of which is proportional to the nucleotide content of the \dna{} \pyro{}ed.
Peak intensity differences and noise from the machine are accounted for by \pearson{}, since it measures how the \pyro{} vector values change with respect to each other and machine variations will be similar between \pyro{}ings.
\begin{figure}
\centering
\input{figures/comparing}
\caption{Comparing \isols{} involves comparing the \pyros{} of each \isol{} using \pcfunclabel{}, the \pearson{}, with the stipulation that one can only compare \pyros{} from the same \itsshort{} in their respective \isol{}. The green bar plots represent a \pyro{} of \Ssixt{}, while gold bar plots represent a \pyro{} of \Sfive{}.}
\label{fig:isolcompare}
\end{figure}
Defining strains using the \pearson{} requires a similarity threshold, above which we may consider two \isols{} to be part of the same strain.
\index{defining strains}
Two \isols{} are considered to be of the same strain if the \pyros{} of both regions have a $ > \a{}$, where $\a{} = 0.990$ \cite{Shealy:SeniorProject, Black2014121}.
\index{\a{}}
Work done in \cite{Shealy:SeniorProject} determined this \a{} threshold by simulating the \pyro{}ing process with tools from \cite{montana2013algorithms} on known \ecoli{} strains from the \ncbilong{} database.
Simulations performed in \cite{DBLP:conf/bibm/BrandtMSBGK12} further confirmed the usefulness of this \a{} value.
%%%%% DATABASE %%%%%
\subsection{Database}
There are three components that comprise \cplop{}: the physical cold storage of fecal samples and \isols{}, the back end data store, and the front end web interface.
Cold storage allows \cplop{} researchers to perform
The back end data store holds the \pyros{} and metadata about their \spec{} and collection.
The web interface, shown in \autoref{fig:front-page}, allows researchers to perform queries and test whether \isols{} match.
\begin{figure}
\centering
\includegraphics[width=\frontendwidth]{figures/frontend/front-page}
\caption{Researchers use \cplop{} through a web-accessible frontend.}
\label{fig:front-page}
\end{figure}
Cold storage holds the collected fecal samples and the \isols{} cultured from them.
It allows \cplop{} researchers to culture additional \isols{} and re-\pyro{} existing \isols{}.
Often, researchers refer to the cold storage as the ``library'' in library-based \mst{}.
The data store for \cplop{} \pyros{} is a \mysql{} database.
It stores metadata for each collected sample, including who collected it and where and when they collected it.
Importantly, the name of the \spec{} and a unique designation for the \host{} marks the sample that the \pyros{} of an \isol{} came from.
For computationally intense procedures, researchers export the data and run it one a different machine.
The web front end, written in \php{}, allows \cplop{} to access the information in the database from the Internet to: perform queries for \isols{} and \pyros{}, browse \isol{} and \pyro{} datasets, and perform forensic matching.
The \isols{} in \cplop{} are visible from this interface (see \autoref{fig:browse}) as are the \pyros{}.
\input{figures/frontend/browse}
\Isols{} may have multiple \pyros{}, which the \cplop{} front end allows researchers to browse, which \autoref{fig:browse-isolate-pyroprints} shows.
\input{figures/frontend/browse-isolate-pyroprints}
Certain \isols{} come from particular collection runs, be they certain studies or classroom examples, and appear collectively as datasets on the website.
\input{figures/frontend/browse-isolate-datasets}
\cplop{} also provides the ability to view the \pyro{} histogram, as \autoref{fig:pyroprint-histogram} shows.
\input{figures/frontend/pyroprint-histogram}
Forensic matching (\autoref{fig:forensic}) is a crucial feature of \cplop{}, allowing researchers to query a dataset against the \cplop{} database to find matching \isols{}.
\input{figures/frontend/forensic}
\cp{} servers host the \cplop{} website at \url{http://cplop.cosam.calpoly.edu/}.
The servers are limited in computational ability, only containing \gb{4} of \ram{}.
Such limitations make it difficult to implement algorithms like \ohclust{} \cite{montana2013algorithms, montana2013ontological, montana2012investigating}, which require more than \gb{4} of \ram{} for efficient computation, or \cite{adams2016using}, which requires access to a cluster of computers for \mapreduce{} ability.
Future work will assess the feasibility of moving over to more dynamic systems, like \awslong{}.
\subsection{\cplop{} Makeup}
\autoref{fig:species} shows the distribution of \cplop{} \isols{} considered in this study among its 53 different \spec{}.
\begin{figure}[t]
\centering
\includegraphics[width=\linewidth]{figures/bs/species_hist.pdf}
\caption{
A histogram of the number of \isols{} of each species in our study, taken from \cplop{}.
There are 4,610 total \isols{} from 53 different \specs{}.}
\label{fig:species}
\end{figure}
\begin{longtable}{|c|c|}
\caption{The number of \isols{} per \spec{}.}
\endfirsthead
\hline
\bf \Spec{} & \bf Number of \Isols{} \\ \hline
\endhead
Cow & 1749 \\ \hline
Human & 1643 \\ \hline
Ground Squirrel & 196 \\ \hline
Pigeon & 196 \\ \hline
Dog & 179 \\ \hline
Sheep & 94 \\ \hline
Wild Turkey & 72 \\ \hline
Pig & 66 \\ \hline
Horse & 52 \\ \hline
Cat & 46 \\ \hline
Chicken & 44 \\ \hline
Bat & 37 \\ \hline
Mountain Lion & 32 \\ \hline
Cliff Sparrow & 28 \\ \hline
Deer & 20 \\ \hline
White Crowned Sparrow & 15 \\ \hline
Opossum & 12 \\ \hline
Seagull & 11 \\ \hline
Sea Otter & 10 \\ \hline
Pelican & 8 \\ \hline
Bear & 6 \\ \hline
Owl & 6 \\ \hline
California Sea Lion & 6 \\ \hline
Red Tailed Hawk & 5 \\ \hline
Grey Fox & 4 \\ \hline
Coyote & 4 \\ \hline
Red-shoulder Hawk & 4 \\ \hline
Rabbit & 4 \\ \hline
Elephant Seal & 4 \\ \hline
Bobcat & 4 \\ \hline
Common Loon & 4 \\ \hline
Great Horned Owl & 4 \\ \hline
Racoon & 4 \\ \hline
Golden Eagle & 3 \\ \hline
American Kestrel & 3 \\ \hline
Mallard Duck & 3 \\ \hline
Deer Mouse & 2 \\ \hline
Tree Swallow & 2 \\ \hline
Red Wind Blackbird & 2 \\ \hline
Eared Grebe & 2 \\ \hline
Crow & 2 \\ \hline
Clark Grebe & 2 \\ \hline
Red Shoulder Hawk & 2 \\ \hline
Surf Scoter & 2 \\ \hline
Red Throated Loon & 2 \\ \hline
Western Kingbird & 2 \\ \hline
Turkey Vulture & 2 \\ \hline
Red-Winged Blackbird & 2 \\ \hline
Sea Lion & 2 \\ \hline
Guinni & 2 \\ \hline
Common Murre & 2 \\ \hline
Cougar & 1 \\ \hline
Orangutan & 1 \\ \hline
\end{longtable}
There are a total of 4,610 \isols{} in our dataset\footnote{A simplified version of \cplop{} containing \isol{} IDs, \spec{}, and \zscore{}s can be found at \texttt{https://github.com/jmcgover/cplop-acm-bcb-2016}.}.
As seen from Figure \ref{fig:species}, the organic growth of \cplop{} yielded disproportionately many \ecoli{} isolates originating
from humans and cows (however, as shall be seen below, these isolates belong to a large number of strains). Each \isol{} is represented in \cplop{} with two \pyros{} --- one each for \its{1} and \its{2}
region.
We ignore species with fewer than 4 instances, since their representation in \cplop{} means they are reasonably unlikely to ever dominate a \knnlong{} list or cluster.
|
{"hexsha": "fd2335f0312c779c2914dc9d2f118109237d42e8", "size": 22319, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/background/cplop.tex", "max_stars_repo_name": "jmcgover/thesis", "max_stars_repo_head_hexsha": "25664684158d00864dbe697276d2691ba84461cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/background/cplop.tex", "max_issues_repo_name": "jmcgover/thesis", "max_issues_repo_head_hexsha": "25664684158d00864dbe697276d2691ba84461cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/background/cplop.tex", "max_forks_repo_name": "jmcgover/thesis", "max_forks_repo_head_hexsha": "25664684158d00864dbe697276d2691ba84461cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.9512893983, "max_line_length": 336, "alphanum_fraction": 0.7373538241, "num_tokens": 6296}
|
[STATEMENT]
lemma compE1_eq_Call [simp]:
"compE1 Vs e = obj\<bullet>M(params) \<longleftrightarrow> (\<exists>obj' params'. e = obj'\<bullet>M(params') \<and> compE1 Vs obj' = obj \<and> compEs1 Vs params' = params)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (compE1 Vs e = obj\<bullet>M(params)) = (\<exists>obj' params'. e = obj'\<bullet>M(params') \<and> compE1 Vs obj' = obj \<and> compEs1 Vs params' = params)
[PROOF STEP]
by(cases e, auto)
|
{"llama_tokens": 182, "file": "JinjaThreads_Compiler_Compiler1", "length": 1}
|
import pandas as pd
import numpy as np
from tqdm import trange
from time import sleep
import glob
import os
import matplotlib.pyplot as plt
# Import module to get a current time and date used to name the files containing normalization information
from datetime import datetime
import csv
try:
# Use gitpython to get a current revision number and use it in description of experimental data
from git import Repo
except:
pass
try:
from SI_Toolkit_ApplicationSpecificFiles.user_defined_normalization_correction import apply_user_defined_normalization_correction
except:
print('SI_Toolkit_ApplicationSpecificFiles not created yet')
import yaml, os
config = yaml.load(open(os.path.join('SI_Toolkit_ApplicationSpecificFiles', 'config.yml'), 'r'), Loader=yaml.FullLoader)
PATH_TO_NORMALIZATION_INFO = config["paths"]["PATH_TO_EXPERIMENT_RECORDINGS"] + config["paths"]["path_to_experiment"] + 'NormalizationInfo/'
normalization_rounding_decimals = 5
def get_paths_to_datafiles(paths_to_data_information):
"""
There are three options to load data:
1. provide path_to_normalization_information
and the program will load the datafiles with which normalization was computed
2. provide path_to_folder_with_data
and program will load all the csv files it encounters at this location
(nested folders are not searched though)
3. provide list_of_paths_to_datafiles
and datafiles will be loaded
Options 1., 2. and 3. are exclusive. get_paths_to_datafiles distinguishes if it got a list (does nothing),
path to a folder (list csv files in this folder), or path to a csv file (it assumes it is normalization file)
and try to find list of paths in it.
"""
list_of_paths_to_datafiles = []
def list_of_paths_from_norminfo():
with open(paths_to_data_information, 'r') as cmt_file: # open file
reached_path_list = False
for line in cmt_file: # read each line
if reached_path_list:
if line == '#': # Empty line means we reached end of path list
break
path = line[len('# '):] # remove first '# '
list_of_paths_to_datafiles.append(path)
# After this line paths are listed
if line == '# Data files used to calculate normalization information:':
reached_path_list = True
if isinstance(paths_to_data_information, list):
for path in paths_to_data_information:
if path[-4:] == '.csv':
list_of_paths_to_datafiles.append(path)
else:
# Assume that path to folder was provided
folder_content = glob.glob(path + '*.csv')
list_of_paths_to_datafiles += folder_content
elif isinstance(paths_to_data_information, str):
if paths_to_data_information[-4:] == '.csv':
# Get list of paths from normalization_information
list_of_paths_from_norminfo(paths_to_data_information)
else:
# Assume that path to folder was provided
list_of_paths_to_datafiles = glob.glob(paths_to_data_information + '*.csv')
else:
raise TypeError('Unsupported type of input argument to get_paths_to_datafiles')
return sorted(list_of_paths_to_datafiles)
def load_data(list_of_paths_to_datafiles=None):
all_dfs = [] # saved separately to get normalization
print('Loading data files:')
sleep(0.1)
for file_number in trange(len(list_of_paths_to_datafiles)):
filepath = list_of_paths_to_datafiles[file_number]
# Read column names from file
# FIXME: This is a quick fix to exclude column in CartPole recording which cannot be converted to float32
# cols = list(pd.read_csv(filepath, comment='#', nrows=1))
# df = pd.read_csv(filepath, comment='#', dtype=np.float32, usecols =[i for i in cols if i != 'measurement'])
df = pd.read_csv(filepath, comment='#')
cols = df.columns
all_dfs.append(df)
# Change to float32 wherever numeric column
df[cols] = df[cols].apply(pd.to_numeric, errors='ignore', downcast='float')
return all_dfs
# This function returns the saving interval of datafile
# Used to ensure that datafiles used for training save data with the same frequency
def get_sampling_interval_from_datafile(path_to_datafile):
preceding_text = '# Saving: '
dt_save = None
with open(path_to_datafile, 'r') as cmt_file: # open file
for line in cmt_file: # read each line
if line[0:len(preceding_text)] == preceding_text:
dt_save = float(line[len(preceding_text):-2])
return dt_save
return dt_save
# This function returns the saving interval of datafile
# Used to ensure that datafiles used for training save data with the same frequency
def get_sampling_interval_from_normalization_info(path_to_normalization_info):
preceding_text = '# Sampling interval of data used to calculate normalization: '
with open(path_to_normalization_info, 'r') as cmt_file: # open file
for line in cmt_file: # read each line
if line[0:len(preceding_text)] == preceding_text:
dt_information = line[len(preceding_text):]
if dt_information == 'Not constant!':
print('The normalization information was calculated with data with varying sampling frequency.')
dt_save = None
else:
dt_save = float(dt_information[:-2])
return dt_save
def calculate_normalization_info(paths_to_data_information=None, plot_histograms=True, user_correction=True, path_to_norm_info=None):
"""
This function creates csv file with information about dataset statistics which may be used for normalization.
The statistics are calculated from provided datafiles
BUT may include user corrections to account for prior knowledge about distribution (e.g. 0 mean)
"""
list_of_paths_to_datafiles = get_paths_to_datafiles(paths_to_data_information)
# print paths to datafiles
print('')
print('# Datafiles used to calculate normalization:')
for i in range(len(list_of_paths_to_datafiles)):
print(' - ' + list_of_paths_to_datafiles[i])
# endregion
# region Check if all datafile have the same sampling interval
dts_save = []
dt_save = None
for path in list_of_paths_to_datafiles:
dt_save = get_sampling_interval_from_datafile(path)
dts_save.append(dt_save)
dts_save = np.array(dt_save)
tol = 1.0e-6
sampling_interval_str = '# Sampling interval of data used to calculate normalization: '
try:
if np.all(abs(dts_save - dt_save) < tol):
sampling_interval_str += '{} s'.format(dt_save)
else:
sampling_interval_str += 'Not constant!'
except TypeError:
print('Save interval unknown.')
# endregion
# region Load data
df = load_data(list_of_paths_to_datafiles=list_of_paths_to_datafiles)
# endregion
# region Concatinate all data frames into one
if type(df) is list:
df_total = pd.concat(df)
else:
df_total = df
del df
# endregion
# region Exclude time from normalization
if 'time' in df_total.columns:
df_total.drop('time',
axis='columns', inplace=True)
# endregion
# region Calculate normalization values from data
df_mean = df_total.mean(axis=0)
df_std = df_total.std(axis=0)
df_max = df_total.max(axis=0)
df_min = df_total.min(axis=0)
frame = {'mean': df_mean, 'std': df_std, 'max': df_max, 'min': df_min}
df_norm_info = pd.DataFrame(frame).transpose()
# endregion
# region User correction to calculated normalization values
# This way user can impose prior knowledge of the distribution and
# e.g. impose 0 mean even if data used for normalization does not show it.
# Make a copy of the original normalization values (from calculations)
# to see which changes was done by user
df_norm_info_from_data = df_norm_info.copy()
# User defined normalization values:
if user_correction:
df_norm_info = apply_user_defined_normalization_correction(df_norm_info)
if df_norm_info.equals(df_norm_info_from_data):
modified = 'No'
# print to file also original dataframe, so that anybody can check changes done by user
else:
modified = 'Yes'
df_norm_info = df_norm_info.round(normalization_rounding_decimals)
# endregion
# region Transform original dataframe to comment by adding "comment column" and "space columns"
df_norm_info_from_data = df_norm_info_from_data.reindex(sorted(df_norm_info_from_data.columns), axis=1)
df_norm_info_from_data = df_norm_info_from_data.round(normalization_rounding_decimals)
df_index = df_norm_info_from_data.index
df_norm_info_from_data.insert(0, " ", df_index, True)
df_norm_info_from_data.insert(0, "#", 4 * ['#'], True)
for i in range(len(df_norm_info_from_data.columns)):
df_norm_info_from_data.insert(2 * i + 1, ' ', 4 * [' '], True)
# endregion
# region Make folder to save normalization info (if not yet existing)
try:
os.makedirs(PATH_TO_NORMALIZATION_INFO[:-1])
except FileExistsError:
pass
# endregion
# region Write the .csv file
date_now = datetime.now().strftime('%Y-%m-%d')
time_now = datetime.now().strftime('%H-%M-%S')
if path_to_norm_info is None:
csv_filepath = PATH_TO_NORMALIZATION_INFO + 'NI_' + date_now + '_' + time_now + '.csv'
else:
csv_filepath = path_to_norm_info + 'NI_' + date_now + '_' + time_now + '.csv'
with open(csv_filepath, "a") as outfile:
writer = csv.writer(outfile)
writer.writerow(['# ' + 'This is normalization information calculated {} at time {}'
.format(date_now, time_now)])
try:
repo = Repo()
git_revision = repo.head.object.hexsha
except:
git_revision = 'unknown'
writer.writerow(['# ' + 'Done with git-revision: {}'
.format(git_revision)])
writer.writerow(['#'])
writer.writerow([sampling_interval_str])
writer.writerow(['#'])
writer.writerow(['# Data files used to calculate normalization information:'])
for path in list_of_paths_to_datafiles:
writer.writerow(['# {}'.format(path)])
writer.writerow(['#'])
writer.writerow(['# Original (calculated from data) Normalization Information:'])
df_norm_info_from_data.to_csv(csv_filepath, index=False, header=True, mode='a') # Mode (a)ppend
with open(csv_filepath, "a") as outfile:
writer = csv.writer(outfile)
writer.writerow(['#'])
writer.writerow(['# Does user modified normalization info calculated from data?: {}'.format(modified)])
writer.writerow(['#'])
writer.writerow(['# Normalization Information:'])
df_norm_info = df_norm_info.reindex(sorted(df_norm_info.columns), axis=1)
df_norm_info.to_csv(csv_filepath, index=True, header=True, mode='a') # Mode (a)ppend
# endregion
# region Plot histograms of data used for normalization
if plot_histograms:
# Plot historgrams to make the firs check about gaussian assumption
for feature in df_total.columns:
plt.hist(df_total[feature].to_numpy(), 50, density=True, facecolor='g', alpha=0.75)
plt.title(feature)
plt.show()
# endregion
return df_norm_info, csv_filepath
def load_normalization_info(path_to_normalization_info):
return pd.read_csv(path_to_normalization_info, index_col=0, comment='#')
def normalize_feature(feature, normalization_info, normalization_type='minmax_sym', name=None):
"""feature needs to have atribute name!!!"""
if hasattr(feature, 'name'):
name = feature.name
else:
pass
if name in normalization_info.columns:
pass
else:
return feature
if normalization_type == 'gaussian':
col_mean = normalization_info.loc['mean', name]
col_std = normalization_info.loc['std', name]
if col_std == 0:
return 0
else:
return (feature - col_mean) / col_std
elif normalization_type == 'minmax_pos':
col_min = normalization_info.loc['min', name]
col_max = normalization_info.loc['max', name]
if (col_max - col_min) == 0:
return 0
else:
return (feature - col_min) / (col_max - col_min)
elif normalization_type == 'minmax_sym':
col_min = normalization_info.loc['min', name]
col_max = normalization_info.loc['max', name]
if (col_max - col_min) == 0:
return 0
else:
return -1.0 + 2.0 * (feature - col_min) / (col_max - col_min)
def normalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
if type(dfs) is list:
for i in range(len(dfs)):
dfs[i] = dfs[i].apply(normalize_feature, axis=0,
normalization_info=normalization_info,
normalization_type=normalization_type)
else:
dfs = dfs.apply(normalize_feature, axis=0,
normalization_info=normalization_info,
normalization_type=normalization_type)
return dfs
def denormalize_feature(feature, normalization_info, normalization_type='minmax_sym', name=None):
"""feature needs to have atribute name!!!"""
if hasattr(feature, 'name'):
name = feature.name
else:
pass
if normalization_type == 'gaussian':
# col_mean = normalization_info.loc['mean', name]
# col_std = normalization_info.loc['std', name]
# return feature * col_std + col_mean
return feature * normalization_info.loc['std', name] + normalization_info.loc['mean', name]
elif normalization_type == 'minmax_pos':
# col_min = normalization_info.loc['min', name]
# col_max = normalization_info.loc['max', name]
# return feature * (col_max - col_min) + col_min
# return feature * col_std + col_mean
return feature * (normalization_info.loc['max', name] - normalization_info.loc['min', name]) + \
normalization_info.loc['min', name]
elif normalization_type == 'minmax_sym':
# col_min = normalization_info.loc['min', name]
# col_max = normalization_info.loc['max', name]
# return ((feature + 1.0) / 2.0) * (col_max - col_min) + col_min
return ((feature + 1.0) / 2.0) * (normalization_info.loc['max', name] - normalization_info.loc['min', name]) \
+ normalization_info.loc['min', name]
def denormalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
if type(dfs) is list:
for i in range(len(dfs)):
dfs[i] = dfs[i].apply(denormalize_feature, axis=0,
normalization_info=normalization_info,
normalization_type=normalization_type)
else:
dfs = dfs.apply(denormalize_feature, axis=0,
normalization_info=normalization_info,
normalization_type=normalization_type)
return dfs
def denormalize_numpy_array(normalized_array,
features,
normalization_info,
normalization_type='minmax_sym'):
denormalized_array = np.zeros_like(normalized_array)
for feature_idx in range(len(features)):
if normalization_type == 'gaussian':
denormalized_array[..., feature_idx] = normalized_array[..., feature_idx] * \
normalization_info.at['std', features[feature_idx]] + \
normalization_info.at['mean', features[feature_idx]]
elif normalization_type == 'minmax_pos':
denormalized_array[..., feature_idx] = normalized_array[..., feature_idx] \
* (normalization_info.at['max', features[feature_idx]] -
normalization_info.at['min', features[feature_idx]]) + \
normalization_info.at['min', features[feature_idx]]
elif normalization_type == 'minmax_sym':
denormalized_array[..., feature_idx] = ((normalized_array[..., feature_idx] + 1.0) / 2.0) * \
(normalization_info.at['max', features[feature_idx]] -
normalization_info.at['min', features[feature_idx]]) \
+ normalization_info.at['min', features[feature_idx]]
return denormalized_array
def normalize_numpy_array(denormalized_array,
features,
normalization_info,
normalization_type='minmax_sym',
normalized_array = None,): # If you want to write to an existing array instead of creating your own
if normalized_array is None: # This option gives a possibility of using preallocated array
normalized_array = np.zeros_like(denormalized_array)
for feature_idx in range(len(features)):
if normalization_type == 'gaussian':
normalized_array[..., feature_idx] = (denormalized_array[..., feature_idx]
- normalization_info.at['mean', features[feature_idx]]) / \
normalization_info.at['std', features[feature_idx]]
elif normalization_type == 'minmax_pos':
normalized_array[..., feature_idx] = (denormalized_array[..., feature_idx]
- normalization_info.at['min', features[feature_idx]])\
/ (normalization_info.at['max', features[feature_idx]] -
normalization_info.at['min', features[feature_idx]])
elif normalization_type == 'minmax_sym':
normalized_array[..., feature_idx] = -1.0 + 2.0 * (
(denormalized_array[..., feature_idx]-normalization_info.at['min', features[feature_idx]])
/
(normalization_info.at['max', features[feature_idx]] - normalization_info.at['min', features[feature_idx]])
)
return normalized_array
if __name__ == '__main__':
folder_with_data_to_calculate_norm_info = config["paths"]["PATH_TO_EXPERIMENT_RECORDINGS"] + config["paths"]["path_to_experiment"] + "Recordings/Train/"
calculate_normalization_info(folder_with_data_to_calculate_norm_info)
|
{"hexsha": "bef2e58abaff75959cd129834ffbca719868b106", "size": 19147, "ext": "py", "lang": "Python", "max_stars_repo_path": "SI_Toolkit/load_and_normalize.py", "max_stars_repo_name": "jhuebotter/CartpoleSNNdemo", "max_stars_repo_head_hexsha": "d18a85cbc45bff48295c46c9cd8c9fc00192318c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SI_Toolkit/load_and_normalize.py", "max_issues_repo_name": "jhuebotter/CartpoleSNNdemo", "max_issues_repo_head_hexsha": "d18a85cbc45bff48295c46c9cd8c9fc00192318c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SI_Toolkit/load_and_normalize.py", "max_forks_repo_name": "jhuebotter/CartpoleSNNdemo", "max_forks_repo_head_hexsha": "d18a85cbc45bff48295c46c9cd8c9fc00192318c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4799154334, "max_line_length": 156, "alphanum_fraction": 0.6370188541, "include": true, "reason": "import numpy", "num_tokens": 4063}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 4 12:25:18 2019
@author: liujinyang
"""
import zlib
import pandas as pd
import sys
import os
import tarfile
import glob
import multiprocessing as mp
import re
import json
import pickle
import time
import itertools
import shutil
import lzma
import gc
from io import StringIO
import gzip
from collections import defaultdict
from itertools import zip_longest
from itertools import islice
import pickle
import subprocess
import argparse
import numpy as np
from logparser import Drain
from matcher import treematch
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
split_regex = re.compile("([^a-zA-Z0-9]+)")
def split_item(astr):
return split_regex.split(astr)
def split_list(alist):
# print(alist)
return list(map(split_item, alist))
def split_para(seires):
return seires.map(split_list)
def split_normal(dataframe):
return [dataframe[col].map(split_item).tolist() \
for col in dataframe.columns]
def baseN(num, b):
if isinstance(num, str):
num = int(num)
if num is None: return ""
return ((num == 0) and "0") or \
(baseN(num // b, b).lstrip("0") + "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+="[num % b])
class Ziplog():
def __init__(self, outdir, outname, kernel="gz", tmp_dir="", level=3, lossy=False, n_workers=1, compress_single=True):
self.outdir = outdir
self.outname = outname
self.kernel = kernel
self.io_time = 0
self.level = level
self.lossy = lossy
self.tmp_dir = tmp_dir
self.n_workers = n_workers
self.compress_single =compress_single
self.splitting_time = 0
self.packing_time = 0
def compress_all(self):
self.file_all_column_dict = {}
ignore_columns = ["LineId", "EventTemplate", "ParameterList", "EventId"]
focus_columns = [col for col in self.para_df.columns if col not in ignore_columns]
for column in focus_columns:
filename = column+"_0"
self.file_all_column_dict[filename] = self.para_df[column]
del self.para_df
def compress_normal(self):
ignore_columns = ["LineId", "EventTemplate", "ParameterList", "EventId", "Content"]
focus_columns = [col for col in self.para_df.columns if col not in ignore_columns]
splited_columns = split_normal(self.para_df[focus_columns])
self.para_df.drop(focus_columns, axis=1,inplace=True)
self.file_normal_column_dict = {}
for idx, colname in enumerate(focus_columns):
columns_t = list(zip_longest(*splited_columns[idx], fillvalue="")) # transpose
for sub_idx, col in enumerate(columns_t):
filename = f"{colname}_{sub_idx}"
self.file_normal_column_dict[filename] = col
self.file_normal_column_dict["EventId_0"] = self.para_df["EventId"]
def __pack_params(self, dataframe):
'''
Input: dataframe with tow columns [EventId, ParameterList]
'''
self.file_para_dict = {}
for eid in dataframe["EventId"].unique():
paras = dataframe.loc[dataframe["EventId"]==eid, "ParameterList"]
paracolumns = list(zip_longest(*paras, fillvalue=""))
for para_idx, subparas in enumerate(paracolumns):
subparas_columns = list(zip_longest(*subparas, fillvalue=""))
for sub_para_idx, sub_subparas in enumerate(subparas_columns):
filename = f"{eid}_{para_idx}_{sub_para_idx}"
self.file_para_dict[filename] = sub_subparas
def __build_para_index(self):
index = 0
para_index_dict = {}
for filename, paras in self.file_para_dict.items():
para_set = set(paras)
for upara in para_set:
index += 1
index_64 = baseN(index, 64)
para_index_dict[upara] = index_64
paras_mapped = [para_index_dict[para] for para in paras]
self.file_para_dict[filename] = paras_mapped
self.index_para_dict = {v:k for k,v in para_index_dict.items()}
def compress_content(self):
self.template_mapping = dict(zip(self.para_df["EventId"],\
self.para_df["EventTemplate"]))
eids = [eid for eid in self.template_mapping \
if "<*>" in self.template_mapping[eid]]
print("{} events to be split.".format(len(eids)))
# eids = eids[0:1]
focus_index = self.para_df["EventId"].isin(eids)
focus_df = self.para_df.loc[focus_index,\
["EventId","ParameterList"]]
del self.para_df
splitted_para = split_para(focus_df["ParameterList"])
focus_df["ParameterList"] = splitted_para
self.__pack_params(focus_df)
if self.level == 3:
self.__build_para_index()
gc.collect()
def __kernel_compress(self):
'''
level1 : only normal
[self.file_all_column_dict]
---
level2 : parse without index
[self.file_para_dict, self.file_normal_column_dict]
---
leve3: parse and index
[self.file_para_dict, self.file_normal_column_dict]
'''
def output_dict(adict):
for filename, content_list in adict.items():
with open(os.path.join(self.tmp_dir, filename+".csv"), "w") as fw:
fw.writelines("\n".join(list(content_list)))
def files_to_tar(filepaths):
worker_id = os.getpid()
print("Worker {} start taring {} files.".format(worker_id, len(filepaths)))
for idx, filepath in enumerate(filepaths, 1):
if len(filepaths) > 10 and idx % (len(filepaths)// 10) == 0:
print("Worker {}, {}/{}".format(worker_id, idx, len(filepaths)))
if self.kernel == "gz" or self.kernel == "bz2":
tar = tarfile.open(filepath + ".tar.{}".format(self.kernel ),\
"w:{}".format(self.kernel ))
tar.add(filepath, arcname=os.path.basename(filepath))
tar.close()
elif self.kernel == "lzma":
os.system('lzma -k {}'.format(filepath))
## output begin
if self.level==3 and not self.lossy:
with open(os.path.join(self.tmp_dir, "parameter_mapping.json"), "w") as fw:
json.dump(self.index_para_dict, fw)
if self.level > 1:
with open(os.path.join(self.tmp_dir, "template_mapping.json"), "w") as fw:
json.dump(self.template_mapping, fw)
if self.level == 1:
output_dict(self.file_all_column_dict)
elif self.level == 2 or self.level == 3:
if not self.lossy:
output_dict(self.file_para_dict)
output_dict(self.file_normal_column_dict)
else:
raise RuntimeError(f"The level {self.level} is illegal!")
## output end
# self.compress_single
## compress begin
if self.kernel in set(["gz", "bz2"]):
raw_files = glob.glob(os.path.join(self.tmp_dir, "*.csv"))\
+ glob.glob(os.path.join(self.tmp_dir, "*.json"))
tarall = tarfile.open(os.path.join(self.outdir, \
"{}.tar.{}".format(self.outname, self.kernel)),\
"w:{}".format(self.kernel))
if self.compress_single:
files_to_tar(raw_files)
files = glob.glob(os.path.join(self.tmp_dir,\
"*.tar.{}".format(self.kernel)))
else:
files = raw_files
for idx, filepath in enumerate(files, 1):
tarall.add(filepath, arcname=os.path.basename(filepath))
tarall.close()
## compress end
def zip_file(self, para_df=None, delete_tmp=True):
self.para_df = para_df.fillna("")
t1 = time.time()
if self.level == 1:
self.compress_all()
elif self.level == 2 or self.level==3:
self.compress_normal()
self.compress_content()
t2 = time.time()
self.__kernel_compress()
t3 = time.time()
self.splitting_time = t2 - t1
self.packing_time = t3 - t2
def main():
args = None
try:
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str, default="../../logs/HDFS_2k.log")
parser.add_argument('--log_format', type=str, default="<Date> <Time> <Pid> <Level> <Component>: <Content>")
parser.add_argument('--template_file', type=str, default="")
parser.add_argument('--tmp_dir', type=str, default="../../zip_out/tmp_dir")
parser.add_argument('--out_dir', type=str, default="../../zip_out/")
parser.add_argument('--compress_single', type=boolean_string, default=False)
parser.add_argument('--n_workers', type=int, default=3)
parser.add_argument('--level', type=int, default=3)
parser.add_argument('--top_event', type=int, default=2000)
parser.add_argument('--kernel', type=str, default="gz")
parser.add_argument('--sample_ratio', type=float, default=0.01)
parser.add_argument('--lossy', type=boolean_string, default=True)
args = vars(parser.parse_args())
except Exception as e:
print(e)
pass
filepath = args["file"]
kernel = args["kernel"]
log_format = args["log_format"]
top_event = args["top_event"]
template_file = args["template_file"]
compress_single = args["compress_single"]
sample_ratio = args["sample_ratio"]
n_workers = args["n_workers"]
level = args["level"]
tmp_dir = args["tmp_dir"]
out_dir = args["out_dir"]
lossy = args["lossy"]
logname = os.path.basename(filepath)
outname = logname + ".logzip"
print("Tmp files are in {}".format(tmp_dir))
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# """
# 0. sampling
# """
#
# line_num = subprocess.check_output("wc -l {}".format(filepath), shell=True)
# line_num = int(line_num.split()[0])
# sample_num = 10000
# sample_file_path = filepath + ".sample"
# try:
# subprocess.check_output("gshuf -n{} {} > {}".format(sample_num, filepath,
# sample_file_path), shell=True)
# except:
# subprocess.check_output("shuf -n{} {} > {}".format(sample_num, filepath,
# sample_file_path), shell=True)
#
## subprocess.check_output("head -{} {} > {}".format(sample_num, filepath,
## sample_file_path), shell=True)
#
# """
# 1. get template file
# """
# st = 0.5 # Similarity threshold
# depth = 4 # Depth of all leaf nodes
# regex = [
# r'blk_(|-)[0-9]+' , # block id
# r'(/|)([0-9]+\.){3}[0-9]+(:[0-9]+|)(:|)', # IP
# r'(?<=[^A-Za-z0-9])(\-?\+?\d+)(?=[^A-Za-z0-9])|[0-9]+$', # Numbers
# ]
#
# parse_begin_time = time.time()
# parser = Drain.LogParser(log_format, outdir=out_dir, depth=depth, st=st, rex=regex)
# templates = parser.parse(sample_file_path)
# os.remove(sample_file_path)
# parse_end_time = time.time()
# print("Parser cost [{:.3f}s]".format(parse_end_time-parse_begin_time))
#
# print(templates)
matcher_begin_time = time.time()
with open(template_file) as fr:
templates = [item.strip() for item in fr.readlines()]
matcher = treematch.PatternMatch(tmp_dir=tmp_dir, outdir=out_dir, logformat=log_format)
structured_log = matcher.match(filepath, templates)
matcher_end_time = time.time()
print("Matcher cost [{:.3f}s]".format(matcher_end_time-matcher_begin_time))
# print(structured_log.head())
# parse_begin_time = time.time()
# parser = NaiveParser.LogParser(tmp_dira, out_dir,
# log_format,
# top_event=top_event)
# structured_log = parser.parse(filepath, dump=False)
# parse_end_time = time.time()
# structured_log.to_csv(os.path.join(out_dir, "tmp.csv"))
zipper_begin_time = time.time()
zipper = Ziplog(outdir=out_dir,
outname=outname,
kernel=kernel,
tmp_dir=tmp_dir,
level=level,
lossy=lossy,
compress_single=compress_single,
n_workers=n_workers)
zipper.zip_file(para_df=structured_log)
zipper_end_time = time.time()
print("Zipper cost [{:.3f}s]".format(zipper_end_time-zipper_begin_time))
#
if __name__ == "__main__":
import NaiveParser
main()
|
{"hexsha": "0c45cf3fb9a9c5ca823cff1e8a22a1325241193d", "size": 13361, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/logzip/zipper_longgest.py", "max_stars_repo_name": "JinYang88/LogZip", "max_stars_repo_head_hexsha": "796bd632623d010989fb7dfa61f51c72ea6b68b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-06T07:31:14.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-06T07:31:14.000Z", "max_issues_repo_path": "src/logzip/zipper_longgest.py", "max_issues_repo_name": "JinYang88/LogZip", "max_issues_repo_head_hexsha": "796bd632623d010989fb7dfa61f51c72ea6b68b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/logzip/zipper_longgest.py", "max_forks_repo_name": "JinYang88/LogZip", "max_forks_repo_head_hexsha": "796bd632623d010989fb7dfa61f51c72ea6b68b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8203753351, "max_line_length": 122, "alphanum_fraction": 0.5766035476, "include": true, "reason": "import numpy", "num_tokens": 3077}
|
(* Title: HOL/Auth/KerberosV.thy
Author: Giampaolo Bella, Catania University
*)
section\<open>The Kerberos Protocol, Version V\<close>
theory KerberosV imports Public begin
text\<open>The "u" prefix indicates theorems referring to an updated version of the protocol. The "r" suffix indicates theorems where the confidentiality assumptions are relaxed by the corresponding arguments.\<close>
abbreviation
Kas :: agent where
"Kas == Server"
abbreviation
Tgs :: agent where
"Tgs == Friend 0"
axiomatization where
Tgs_not_bad [iff]: "Tgs \<notin> bad"
\<comment> \<open>Tgs is secure --- we already know that Kas is secure\<close>
definition
(* authKeys are those contained in an authTicket *)
authKeys :: "event list \<Rightarrow> key set" where
"authKeys evs = {authK. \<exists>A Peer Ta.
Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Peer, Ta\<rbrace>,
Crypt (shrK Peer) \<lbrace>Agent A, Agent Peer, Key authK, Ta\<rbrace>
\<rbrace> \<in> set evs}"
definition
(* A is the true creator of X if she has sent X and X never appeared on
the trace before this event. Recall that traces grow from head. *)
Issues :: "[agent, agent, msg, event list] \<Rightarrow> bool"
("_ Issues _ with _ on _") where
"A Issues B with X on evs =
(\<exists>Y. Says A B Y \<in> set evs \<and> X \<in> parts {Y} \<and>
X \<notin> parts (spies (takeWhile (\<lambda>z. z \<noteq> Says A B Y) (rev evs))))"
consts
(*Duration of the authentication key*)
authKlife :: nat
(*Duration of the service key*)
servKlife :: nat
(*Duration of an authenticator*)
authlife :: nat
(*Upper bound on the time of reaction of a server*)
replylife :: nat
specification (authKlife)
authKlife_LB [iff]: "2 \<le> authKlife"
by blast
specification (servKlife)
servKlife_LB [iff]: "2 + authKlife \<le> servKlife"
by blast
specification (authlife)
authlife_LB [iff]: "Suc 0 \<le> authlife"
by blast
specification (replylife)
replylife_LB [iff]: "Suc 0 \<le> replylife"
by blast
abbreviation
(*The current time is just the length of the trace!*)
CT :: "event list \<Rightarrow> nat" where
"CT == length"
abbreviation
expiredAK :: "[nat, event list] \<Rightarrow> bool" where
"expiredAK T evs == authKlife + T < CT evs"
abbreviation
expiredSK :: "[nat, event list] \<Rightarrow> bool" where
"expiredSK T evs == servKlife + T < CT evs"
abbreviation
expiredA :: "[nat, event list] \<Rightarrow> bool" where
"expiredA T evs == authlife + T < CT evs"
abbreviation
valid :: "[nat, nat] \<Rightarrow> bool" ("valid _ wrt _") where
"valid T1 wrt T2 == T1 \<le> replylife + T2"
(*---------------------------------------------------------------------*)
(* Predicate formalising the association between authKeys and servKeys *)
definition AKcryptSK :: "[key, key, event list] \<Rightarrow> bool" where
"AKcryptSK authK servK evs ==
\<exists>A B tt.
Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, tt\<rbrace>,
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, tt\<rbrace> \<rbrace>
\<in> set evs"
inductive_set kerbV :: "event list set"
where
Nil: "[] \<in> kerbV"
| Fake: "\<lbrakk> evsf \<in> kerbV; X \<in> synth (analz (spies evsf)) \<rbrakk>
\<Longrightarrow> Says Spy B X # evsf \<in> kerbV"
(*Authentication phase*)
| KV1: "\<lbrakk> evs1 \<in> kerbV \<rbrakk>
\<Longrightarrow> Says A Kas \<lbrace>Agent A, Agent Tgs, Number (CT evs1)\<rbrace> # evs1
\<in> kerbV"
(*Unlike version IV, authTicket is not re-encrypted*)
| KV2: "\<lbrakk> evs2 \<in> kerbV; Key authK \<notin> used evs2; authK \<in> symKeys;
Says A' Kas \<lbrace>Agent A, Agent Tgs, Number T1\<rbrace> \<in> set evs2 \<rbrakk>
\<Longrightarrow> Says Kas A \<lbrace>
Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number (CT evs2)\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Number (CT evs2)\<rbrace>
\<rbrace> # evs2 \<in> kerbV"
(* Authorisation phase *)
| KV3: "\<lbrakk> evs3 \<in> kerbV; A \<noteq> Kas; A \<noteq> Tgs;
Says A Kas \<lbrace>Agent A, Agent Tgs, Number T1\<rbrace> \<in> set evs3;
Says Kas' A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>,
authTicket\<rbrace> \<in> set evs3;
valid Ta wrt T1
\<rbrakk>
\<Longrightarrow> Says A Tgs \<lbrace>authTicket,
(Crypt authK \<lbrace>Agent A, Number (CT evs3)\<rbrace>),
Agent B\<rbrace> # evs3 \<in> kerbV"
(*Unlike version IV, servTicket is not re-encrypted*)
| KV4: "\<lbrakk> evs4 \<in> kerbV; Key servK \<notin> used evs4; servK \<in> symKeys;
B \<noteq> Tgs; authK \<in> symKeys;
Says A' Tgs \<lbrace>
(Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK,
Number Ta\<rbrace>),
(Crypt authK \<lbrace>Agent A, Number T2\<rbrace>), Agent B\<rbrace>
\<in> set evs4;
\<not> expiredAK Ta evs4;
\<not> expiredA T2 evs4;
servKlife + (CT evs4) \<le> authKlife + Ta
\<rbrakk>
\<Longrightarrow> Says Tgs A \<lbrace>
Crypt authK \<lbrace>Key servK, Agent B, Number (CT evs4)\<rbrace>,
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number (CT evs4)\<rbrace>
\<rbrace> # evs4 \<in> kerbV"
(*Service phase*)
| KV5: "\<lbrakk> evs5 \<in> kerbV; authK \<in> symKeys; servK \<in> symKeys;
A \<noteq> Kas; A \<noteq> Tgs;
Says A Tgs
\<lbrace>authTicket, Crypt authK \<lbrace>Agent A, Number T2\<rbrace>,
Agent B\<rbrace>
\<in> set evs5;
Says Tgs' A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>,
servTicket\<rbrace>
\<in> set evs5;
valid Ts wrt T2 \<rbrakk>
\<Longrightarrow> Says A B \<lbrace>servTicket,
Crypt servK \<lbrace>Agent A, Number (CT evs5)\<rbrace> \<rbrace>
# evs5 \<in> kerbV"
| KV6: "\<lbrakk> evs6 \<in> kerbV; B \<noteq> Kas; B \<noteq> Tgs;
Says A' B \<lbrace>
(Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>),
(Crypt servK \<lbrace>Agent A, Number T3\<rbrace>)\<rbrace>
\<in> set evs6;
\<not> expiredSK Ts evs6;
\<not> expiredA T3 evs6
\<rbrakk>
\<Longrightarrow> Says B A (Crypt servK (Number Ta2))
# evs6 \<in> kerbV"
(* Leaking an authK... *)
| Oops1:"\<lbrakk> evsO1 \<in> kerbV; A \<noteq> Spy;
Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>,
authTicket\<rbrace> \<in> set evsO1;
expiredAK Ta evsO1 \<rbrakk>
\<Longrightarrow> Notes Spy \<lbrace>Agent A, Agent Tgs, Number Ta, Key authK\<rbrace>
# evsO1 \<in> kerbV"
(*Leaking a servK... *)
| Oops2: "\<lbrakk> evsO2 \<in> kerbV; A \<noteq> Spy;
Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>,
servTicket\<rbrace> \<in> set evsO2;
expiredSK Ts evsO2 \<rbrakk>
\<Longrightarrow> Notes Spy \<lbrace>Agent A, Agent B, Number Ts, Key servK\<rbrace>
# evsO2 \<in> kerbV"
declare Says_imp_knows_Spy [THEN parts.Inj, dest]
declare parts.Body [dest]
declare analz_into_parts [dest]
declare Fake_parts_insert_in_Un [dest]
subsection\<open>Lemmas about lists, for reasoning about Issues\<close>
lemma spies_Says_rev: "spies (evs @ [Says A B X]) = insert X (spies evs)"
apply (induct_tac "evs")
apply (rename_tac [2] a b)
apply (induct_tac [2] "a", auto)
done
lemma spies_Gets_rev: "spies (evs @ [Gets A X]) = spies evs"
apply (induct_tac "evs")
apply (rename_tac [2] a b)
apply (induct_tac [2] "a", auto)
done
lemma spies_Notes_rev: "spies (evs @ [Notes A X]) =
(if A\<in>bad then insert X (spies evs) else spies evs)"
apply (induct_tac "evs")
apply (rename_tac [2] a b)
apply (induct_tac [2] "a", auto)
done
lemma spies_evs_rev: "spies evs = spies (rev evs)"
apply (induct_tac "evs")
apply (rename_tac [2] a b)
apply (induct_tac [2] "a")
apply (simp_all (no_asm_simp) add: spies_Says_rev spies_Gets_rev spies_Notes_rev)
done
lemmas parts_spies_evs_revD2 = spies_evs_rev [THEN equalityD2, THEN parts_mono]
lemma spies_takeWhile: "spies (takeWhile P evs) \<subseteq> spies evs"
apply (induct_tac "evs")
apply (rename_tac [2] a b)
apply (induct_tac [2] "a", auto)
txt\<open>Resembles \<open>used_subset_append\<close> in theory Event.\<close>
done
lemmas parts_spies_takeWhile_mono = spies_takeWhile [THEN parts_mono]
subsection\<open>Lemmas about \<^term>\<open>authKeys\<close>\<close>
lemma authKeys_empty: "authKeys [] = {}"
by (simp add: authKeys_def)
lemma authKeys_not_insert:
"(\<forall>A Ta akey Peer.
ev \<noteq> Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>akey, Agent Peer, Ta\<rbrace>,
Crypt (shrK Peer) \<lbrace>Agent A, Agent Peer, akey, Ta\<rbrace> \<rbrace>)
\<Longrightarrow> authKeys (ev # evs) = authKeys evs"
by (auto simp add: authKeys_def)
lemma authKeys_insert:
"authKeys
(Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent Peer, Number Ta\<rbrace>,
Crypt (shrK Peer) \<lbrace>Agent A, Agent Peer, Key K, Number Ta\<rbrace> \<rbrace> # evs)
= insert K (authKeys evs)"
by (auto simp add: authKeys_def)
lemma authKeys_simp:
"K \<in> authKeys
(Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key K', Agent Peer, Number Ta\<rbrace>,
Crypt (shrK Peer) \<lbrace>Agent A, Agent Peer, Key K', Number Ta\<rbrace> \<rbrace> # evs)
\<Longrightarrow> K = K' | K \<in> authKeys evs"
by (auto simp add: authKeys_def)
lemma authKeysI:
"Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent Tgs, Number Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key K, Number Ta\<rbrace> \<rbrace> \<in> set evs
\<Longrightarrow> K \<in> authKeys evs"
by (auto simp add: authKeys_def)
lemma authKeys_used: "K \<in> authKeys evs \<Longrightarrow> Key K \<in> used evs"
by (auto simp add: authKeys_def)
subsection\<open>Forwarding Lemmas\<close>
lemma Says_ticket_parts:
"Says S A \<lbrace>Crypt K \<lbrace>SesKey, B, TimeStamp\<rbrace>, Ticket\<rbrace>
\<in> set evs \<Longrightarrow> Ticket \<in> parts (spies evs)"
by blast
lemma Says_ticket_analz:
"Says S A \<lbrace>Crypt K \<lbrace>SesKey, B, TimeStamp\<rbrace>, Ticket\<rbrace>
\<in> set evs \<Longrightarrow> Ticket \<in> analz (spies evs)"
by (blast dest: Says_imp_knows_Spy [THEN analz.Inj, THEN analz.Snd])
lemma Oops_range_spies1:
"\<lbrakk> Says Kas A \<lbrace>Crypt KeyA \<lbrace>Key authK, Peer, Ta\<rbrace>, authTicket\<rbrace>
\<in> set evs ;
evs \<in> kerbV \<rbrakk> \<Longrightarrow> authK \<notin> range shrK \<and> authK \<in> symKeys"
apply (erule rev_mp)
apply (erule kerbV.induct, auto)
done
lemma Oops_range_spies2:
"\<lbrakk> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Ts\<rbrace>, servTicket\<rbrace>
\<in> set evs ;
evs \<in> kerbV \<rbrakk> \<Longrightarrow> servK \<notin> range shrK \<and> servK \<in> symKeys"
apply (erule rev_mp)
apply (erule kerbV.induct, auto)
done
(*Spy never sees another agent's shared key! (unless it's lost at start)*)
lemma Spy_see_shrK [simp]:
"evs \<in> kerbV \<Longrightarrow> (Key (shrK A) \<in> parts (spies evs)) = (A \<in> bad)"
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
apply (blast+)
done
lemma Spy_analz_shrK [simp]:
"evs \<in> kerbV \<Longrightarrow> (Key (shrK A) \<in> analz (spies evs)) = (A \<in> bad)"
by auto
lemma Spy_see_shrK_D [dest!]:
"\<lbrakk> Key (shrK A) \<in> parts (spies evs); evs \<in> kerbV \<rbrakk> \<Longrightarrow> A\<in>bad"
by (blast dest: Spy_see_shrK)
lemmas Spy_analz_shrK_D = analz_subset_parts [THEN subsetD, THEN Spy_see_shrK_D, dest!]
text\<open>Nobody can have used non-existent keys!\<close>
lemma new_keys_not_used [simp]:
"\<lbrakk>Key K \<notin> used evs; K \<in> symKeys; evs \<in> kerbV\<rbrakk>
\<Longrightarrow> K \<notin> keysFor (parts (spies evs))"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
txt\<open>Fake\<close>
apply (force dest!: keysFor_parts_insert)
txt\<open>Others\<close>
apply (force dest!: analz_shrK_Decrypt)+
done
(*Earlier, all protocol proofs declared this theorem.
But few of them actually need it! (Another is Yahalom) *)
lemma new_keys_not_analzd:
"\<lbrakk>evs \<in> kerbV; K \<in> symKeys; Key K \<notin> used evs\<rbrakk>
\<Longrightarrow> K \<notin> keysFor (analz (spies evs))"
by (blast dest: new_keys_not_used intro: keysFor_mono [THEN subsetD])
subsection\<open>Regularity Lemmas\<close>
text\<open>These concern the form of items passed in messages\<close>
text\<open>Describes the form of all components sent by Kas\<close>
lemma Says_Kas_message_form:
"\<lbrakk> Says Kas A \<lbrace>Crypt K \<lbrace>Key authK, Agent Peer, Ta\<rbrace>, authTicket\<rbrace>
\<in> set evs;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> authK \<notin> range shrK \<and> authK \<in> authKeys evs \<and> authK \<in> symKeys \<and>
authTicket = (Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Ta\<rbrace>) \<and>
K = shrK A \<and> Peer = Tgs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (simp_all (no_asm) add: authKeys_def authKeys_insert)
apply blast+
done
(*This lemma is essential for proving Says_Tgs_message_form:
the session key authK
supplied by Kas in the authentication ticket
cannot be a long-term key!
Generalised to any session keys (both authK and servK).
*)
lemma SesKey_is_session_key:
"\<lbrakk> Crypt (shrK Tgs_B) \<lbrace>Agent A, Agent Tgs_B, Key SesKey, Number T\<rbrace>
\<in> parts (spies evs); Tgs_B \<notin> bad;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> SesKey \<notin> range shrK"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all, blast)
done
lemma authTicket_authentic:
"\<lbrakk> Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Ta\<rbrace>
\<in> parts (spies evs);
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Ta\<rbrace>\<rbrace>
\<in> set evs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
txt\<open>Fake, K4\<close>
apply (blast+)
done
lemma authTicket_crypt_authK:
"\<lbrakk> Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Number Ta\<rbrace>
\<in> parts (spies evs);
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> authK \<in> authKeys evs"
by (metis authKeysI authTicket_authentic)
text\<open>Describes the form of servK, servTicket and authK sent by Tgs\<close>
lemma Says_Tgs_message_form:
"\<lbrakk> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Ts\<rbrace>, servTicket\<rbrace>
\<in> set evs;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> B \<noteq> Tgs \<and>
servK \<notin> range shrK \<and> servK \<notin> authKeys evs \<and> servK \<in> symKeys \<and>
servTicket = (Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Ts\<rbrace>) \<and>
authK \<notin> range shrK \<and> authK \<in> authKeys evs \<and> authK \<in> symKeys"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (simp_all add: authKeys_insert authKeys_not_insert authKeys_empty authKeys_simp, blast, auto)
txt\<open>Three subcases of Message 4\<close>
apply (blast dest!: authKeys_used Says_Kas_message_form)
apply (blast dest!: SesKey_is_session_key)
apply (blast dest: authTicket_crypt_authK)
done
(*
lemma authTicket_form:
lemma servTicket_form:
lemma Says_kas_message_form:
lemma Says_tgs_message_form:
cannot be proved for version V, but a new proof strategy can be used in their
place. The new strategy merely says that both the authTicket and the servTicket
are in parts and in analz as soon as they appear, using lemmas Says_ticket_parts and Says_ticket_analz.
The new strategy always lets the simplifier solve cases K3 and K5, saving on
long dedicated analyses, which seemed unavoidable. For this reason, lemma
servK_notin_authKeysD is no longer needed.
*)
subsection\<open>Authenticity theorems: confirm origin of sensitive messages\<close>
lemma authK_authentic:
"\<lbrakk> Crypt (shrK A) \<lbrace>Key authK, Peer, Ta\<rbrace>
\<in> parts (spies evs);
A \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists> AT. Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Peer, Ta\<rbrace>, AT\<rbrace>
\<in> set evs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
apply blast+
done
text\<open>If a certain encrypted message appears then it originated with Tgs\<close>
lemma servK_authentic:
"\<lbrakk> Crypt authK \<lbrace>Key servK, Agent B, Ts\<rbrace>
\<in> parts (spies evs);
Key authK \<notin> analz (spies evs);
authK \<notin> range shrK;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists>A ST. Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Ts\<rbrace>, ST\<rbrace>
\<in> set evs"
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
apply blast+
done
lemma servK_authentic_bis:
"\<lbrakk> Crypt authK \<lbrace>Key servK, Agent B, Ts\<rbrace>
\<in> parts (spies evs);
Key authK \<notin> analz (spies evs);
B \<noteq> Tgs;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists>A ST. Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Ts\<rbrace>, ST\<rbrace>
\<in> set evs"
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all, blast+)
done
text\<open>Authenticity of servK for B\<close>
lemma servTicket_authentic_Tgs:
"\<lbrakk> Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Ts\<rbrace>
\<in> parts (spies evs); B \<noteq> Tgs; B \<notin> bad;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists>authK.
Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Ts\<rbrace>,
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Ts\<rbrace>\<rbrace>
\<in> set evs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all, blast+)
done
text\<open>Anticipated here from next subsection\<close>
lemma K4_imp_K2:
"\<lbrakk> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>, servTicket\<rbrace>
\<in> set evs; evs \<in> kerbV\<rbrakk>
\<Longrightarrow> \<exists>Ta. Says Kas A
\<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Number Ta\<rbrace> \<rbrace>
\<in> set evs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all, auto)
apply (metis MPair_analz Says_imp_analz_Spy analz_conj_parts authTicket_authentic)
done
text\<open>Anticipated here from next subsection\<close>
lemma u_K4_imp_K2:
"\<lbrakk> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>, servTicket\<rbrace> \<in> set evs; evs \<in> kerbV\<rbrakk>
\<Longrightarrow> \<exists>Ta. Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Number Ta\<rbrace> \<rbrace>
\<in> set evs
\<and> servKlife + Ts \<le> authKlife + Ta"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all, auto)
apply (blast dest!: Says_imp_spies [THEN parts.Inj, THEN parts.Fst, THEN authTicket_authentic])
done
lemma servTicket_authentic_Kas:
"\<lbrakk> Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs); B \<noteq> Tgs; B \<notin> bad;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists>authK Ta.
Says Kas A
\<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Number Ta\<rbrace> \<rbrace>
\<in> set evs"
by (metis K4_imp_K2 servTicket_authentic_Tgs)
lemma u_servTicket_authentic_Kas:
"\<lbrakk> Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs); B \<noteq> Tgs; B \<notin> bad;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists>authK Ta.
Says Kas A
\<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Number Ta\<rbrace> \<rbrace>
\<in> set evs \<and>
servKlife + Ts \<le> authKlife + Ta"
by (metis servTicket_authentic_Tgs u_K4_imp_K2)
lemma servTicket_authentic:
"\<lbrakk> Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs); B \<noteq> Tgs; B \<notin> bad;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists>Ta authK.
Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Number Ta\<rbrace> \<rbrace> \<in> set evs
\<and> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>,
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>\<rbrace>
\<in> set evs"
by (metis K4_imp_K2 servTicket_authentic_Tgs)
lemma u_servTicket_authentic:
"\<lbrakk> Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs); B \<noteq> Tgs; B \<notin> bad;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists>Ta authK.
Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Number Ta\<rbrace>\<rbrace> \<in> set evs
\<and> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>,
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>\<rbrace>
\<in> set evs
\<and> servKlife + Ts \<le> authKlife + Ta"
by (metis servTicket_authentic_Tgs u_K4_imp_K2)
lemma u_NotexpiredSK_NotexpiredAK:
"\<lbrakk> \<not> expiredSK Ts evs; servKlife + Ts \<le> authKlife + Ta \<rbrakk>
\<Longrightarrow> \<not> expiredAK Ta evs"
by (metis order_le_less_trans)
subsection\<open>Reliability: friendly agents send something if something else happened\<close>
lemma K3_imp_K2:
"\<lbrakk> Says A Tgs
\<lbrace>authTicket, Crypt authK \<lbrace>Agent A, Number T2\<rbrace>, Agent B\<rbrace>
\<in> set evs;
A \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists>Ta AT. Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Ta\<rbrace>,
AT\<rbrace> \<in> set evs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all, blast, blast)
apply (blast dest: Says_imp_spies [THEN parts.Inj, THEN parts.Fst, THEN authK_authentic])
done
text\<open>Anticipated here from next subsection. An authK is encrypted by one and only one Shared key. A servK is encrypted by one and only one authK.\<close>
lemma Key_unique_SesKey:
"\<lbrakk> Crypt K \<lbrace>Key SesKey, Agent B, T\<rbrace>
\<in> parts (spies evs);
Crypt K' \<lbrace>Key SesKey, Agent B', T'\<rbrace>
\<in> parts (spies evs); Key SesKey \<notin> analz (spies evs);
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> K=K' \<and> B=B' \<and> T=T'"
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
txt\<open>Fake, K2, K4\<close>
apply (blast+)
done
text\<open>This inevitably has an existential form in version V\<close>
lemma Says_K5:
"\<lbrakk> Crypt servK \<lbrace>Agent A, Number T3\<rbrace> \<in> parts (spies evs);
Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>,
servTicket\<rbrace> \<in> set evs;
Key servK \<notin> analz (spies evs);
A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists> ST. Says A B \<lbrace>ST, Crypt servK \<lbrace>Agent A, Number T3\<rbrace>\<rbrace> \<in> set evs"
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [5] Says_ticket_parts)
apply (frule_tac [7] Says_ticket_parts)
apply (simp_all (no_asm_simp) add: all_conj_distrib)
apply blast
txt\<open>K3\<close>
apply (blast dest: authK_authentic Says_Kas_message_form Says_Tgs_message_form)
txt\<open>K4\<close>
apply (force dest!: Crypt_imp_keysFor)
txt\<open>K5\<close>
apply (blast dest: Key_unique_SesKey)
done
text\<open>Anticipated here from next subsection\<close>
lemma unique_CryptKey:
"\<lbrakk> Crypt (shrK B) \<lbrace>Agent A, Agent B, Key SesKey, T\<rbrace>
\<in> parts (spies evs);
Crypt (shrK B') \<lbrace>Agent A', Agent B', Key SesKey, T'\<rbrace>
\<in> parts (spies evs); Key SesKey \<notin> analz (spies evs);
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> A=A' \<and> B=B' \<and> T=T'"
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
txt\<open>Fake, K2, K4\<close>
apply (blast+)
done
lemma Says_K6:
"\<lbrakk> Crypt servK (Number T3) \<in> parts (spies evs);
Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>,
servTicket\<rbrace> \<in> set evs;
Key servK \<notin> analz (spies evs);
A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Says B A (Crypt servK (Number T3)) \<in> set evs"
apply (frule Says_Tgs_message_form, assumption, clarify)
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts)
apply simp_all
txt\<open>fake\<close>
apply blast
txt\<open>K4\<close>
apply (force dest!: Crypt_imp_keysFor)
txt\<open>K6\<close>
apply (metis MPair_parts Says_imp_parts_knows_Spy unique_CryptKey)
done
text\<open>Needs a unicity theorem, hence moved here\<close>
lemma servK_authentic_ter:
"\<lbrakk> Says Kas A
\<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Ta\<rbrace>, authTicket\<rbrace> \<in> set evs;
Crypt authK \<lbrace>Key servK, Agent B, Ts\<rbrace>
\<in> parts (spies evs);
Key authK \<notin> analz (spies evs);
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Ts\<rbrace>,
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Ts\<rbrace> \<rbrace>
\<in> set evs"
apply (frule Says_Kas_message_form, assumption)
apply clarify
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all, blast)
txt\<open>K2 and K4 remain\<close>
apply (blast dest!: servK_authentic Says_Tgs_message_form authKeys_used)
apply (blast dest!: unique_CryptKey)
done
subsection\<open>Unicity Theorems\<close>
text\<open>The session key, if secure, uniquely identifies the Ticket
whether authTicket or servTicket. As a matter of fact, one can read
also Tgs in the place of B.\<close>
lemma unique_authKeys:
"\<lbrakk> Says Kas A
\<lbrace>Crypt Ka \<lbrace>Key authK, Agent Tgs, Ta\<rbrace>, X\<rbrace> \<in> set evs;
Says Kas A'
\<lbrace>Crypt Ka' \<lbrace>Key authK, Agent Tgs, Ta'\<rbrace>, X'\<rbrace> \<in> set evs;
evs \<in> kerbV \<rbrakk> \<Longrightarrow> A=A' \<and> Ka=Ka' \<and> Ta=Ta' \<and> X=X'"
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
apply blast+
done
text\<open>servK uniquely identifies the message from Tgs\<close>
lemma unique_servKeys:
"\<lbrakk> Says Tgs A
\<lbrace>Crypt K \<lbrace>Key servK, Agent B, Ts\<rbrace>, X\<rbrace> \<in> set evs;
Says Tgs A'
\<lbrace>Crypt K' \<lbrace>Key servK, Agent B', Ts'\<rbrace>, X'\<rbrace> \<in> set evs;
evs \<in> kerbV \<rbrakk> \<Longrightarrow> A=A' \<and> B=B' \<and> K=K' \<and> Ts=Ts' \<and> X=X'"
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
apply blast+
done
subsection\<open>Lemmas About the Predicate \<^term>\<open>AKcryptSK\<close>\<close>
lemma not_AKcryptSK_Nil [iff]: "\<not> AKcryptSK authK servK []"
apply (simp add: AKcryptSK_def)
done
lemma AKcryptSKI:
"\<lbrakk> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, tt\<rbrace>, X \<rbrace> \<in> set evs;
evs \<in> kerbV \<rbrakk> \<Longrightarrow> AKcryptSK authK servK evs"
by (metis AKcryptSK_def Says_Tgs_message_form)
lemma AKcryptSK_Says [simp]:
"AKcryptSK authK servK (Says S A X # evs) =
(S = Tgs \<and>
(\<exists>B tt. X = \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, tt\<rbrace>,
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, tt\<rbrace> \<rbrace>)
| AKcryptSK authK servK evs)"
by (auto simp add: AKcryptSK_def)
lemma AKcryptSK_Notes [simp]:
"AKcryptSK authK servK (Notes A X # evs) =
AKcryptSK authK servK evs"
by (auto simp add: AKcryptSK_def)
(*A fresh authK cannot be associated with any other
(with respect to a given trace). *)
lemma Auth_fresh_not_AKcryptSK:
"\<lbrakk> Key authK \<notin> used evs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<not> AKcryptSK authK servK evs"
unfolding AKcryptSK_def
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all, blast)
done
(*A fresh servK cannot be associated with any other
(with respect to a given trace). *)
lemma Serv_fresh_not_AKcryptSK:
"Key servK \<notin> used evs \<Longrightarrow> \<not> AKcryptSK authK servK evs"
by (auto simp add: AKcryptSK_def)
lemma authK_not_AKcryptSK:
"\<lbrakk> Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, tk\<rbrace>
\<in> parts (spies evs); evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<not> AKcryptSK K authK evs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all)
txt\<open>Fake,K2,K4\<close>
apply (auto simp add: AKcryptSK_def)
done
text\<open>A secure serverkey cannot have been used to encrypt others\<close>
lemma servK_not_AKcryptSK:
"\<lbrakk> Crypt (shrK B) \<lbrace>Agent A, Agent B, Key SK, tt\<rbrace> \<in> parts (spies evs);
Key SK \<notin> analz (spies evs); SK \<in> symKeys;
B \<noteq> Tgs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<not> AKcryptSK SK K evs"
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, simp_all, blast)
txt\<open>K4\<close>
apply (metis Auth_fresh_not_AKcryptSK MPair_parts Says_imp_parts_knows_Spy authKeys_used authTicket_crypt_authK unique_CryptKey)
done
text\<open>Long term keys are not issued as servKeys\<close>
lemma shrK_not_AKcryptSK:
"evs \<in> kerbV \<Longrightarrow> \<not> AKcryptSK K (shrK A) evs"
unfolding AKcryptSK_def
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts, auto)
done
text\<open>The Tgs message associates servK with authK and therefore not with any
other key authK.\<close>
lemma Says_Tgs_AKcryptSK:
"\<lbrakk> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, tt\<rbrace>, X \<rbrace>
\<in> set evs;
authK' \<noteq> authK; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<not> AKcryptSK authK' servK evs"
by (metis AKcryptSK_def unique_servKeys)
lemma AKcryptSK_not_AKcryptSK:
"\<lbrakk> AKcryptSK authK servK evs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<not> AKcryptSK servK K evs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts)
apply (simp_all, safe)
txt\<open>K4 splits into subcases\<close>
prefer 4 apply (blast dest!: authK_not_AKcryptSK)
txt\<open>servK is fresh and so could not have been used, by
\<open>new_keys_not_used\<close>\<close>
prefer 2
apply (force dest!: Crypt_imp_invKey_keysFor simp add: AKcryptSK_def)
txt\<open>Others by freshness\<close>
apply (blast+)
done
lemma not_different_AKcryptSK:
"\<lbrakk> AKcryptSK authK servK evs;
authK' \<noteq> authK; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<not> AKcryptSK authK' servK evs \<and> servK \<in> symKeys"
apply (simp add: AKcryptSK_def)
apply (blast dest: unique_servKeys Says_Tgs_message_form)
done
text\<open>The only session keys that can be found with the help of session keys are
those sent by Tgs in step K4.\<close>
text\<open>We take some pains to express the property
as a logical equivalence so that the simplifier can apply it.\<close>
lemma Key_analz_image_Key_lemma:
"P \<longrightarrow> (Key K \<in> analz (Key`KK \<union> H)) \<longrightarrow> (K\<in>KK \<or> Key K \<in> analz H)
\<Longrightarrow>
P \<longrightarrow> (Key K \<in> analz (Key`KK \<union> H)) = (K\<in>KK \<or> Key K \<in> analz H)"
by (blast intro: analz_mono [THEN subsetD])
lemma AKcryptSK_analz_insert:
"\<lbrakk> AKcryptSK K K' evs; K \<in> symKeys; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Key K' \<in> analz (insert (Key K) (spies evs))"
apply (simp add: AKcryptSK_def, clarify)
apply (drule Says_imp_spies [THEN analz.Inj, THEN analz_insertI], auto)
done
lemma authKeys_are_not_AKcryptSK:
"\<lbrakk> K \<in> authKeys evs \<union> range shrK; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<forall>SK. \<not> AKcryptSK SK K evs \<and> K \<in> symKeys"
apply (simp add: authKeys_def AKcryptSK_def)
apply (blast dest: Says_Kas_message_form Says_Tgs_message_form)
done
lemma not_authKeys_not_AKcryptSK:
"\<lbrakk> K \<notin> authKeys evs;
K \<notin> range shrK; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<forall>SK. \<not> AKcryptSK K SK evs"
apply (simp add: AKcryptSK_def)
apply (blast dest: Says_Tgs_message_form)
done
subsection\<open>Secrecy Theorems\<close>
text\<open>For the Oops2 case of the next theorem\<close>
lemma Oops2_not_AKcryptSK:
"\<lbrakk> evs \<in> kerbV;
Says Tgs A \<lbrace>Crypt authK
\<lbrace>Key servK, Agent B, Number Ts\<rbrace>, servTicket\<rbrace>
\<in> set evs \<rbrakk>
\<Longrightarrow> \<not> AKcryptSK servK SK evs"
by (blast dest: AKcryptSKI AKcryptSK_not_AKcryptSK)
text\<open>Big simplification law for keys SK that are not crypted by keys in KK
It helps prove three, otherwise hard, facts about keys. These facts are
exploited as simplification laws for analz, and also "limit the damage"
in case of loss of a key to the spy. See ESORICS98.\<close>
lemma Key_analz_image_Key [rule_format (no_asm)]:
"evs \<in> kerbV \<Longrightarrow>
(\<forall>SK KK. SK \<in> symKeys \<and> KK \<subseteq> -(range shrK) \<longrightarrow>
(\<forall>K \<in> KK. \<not> AKcryptSK K SK evs) \<longrightarrow>
(Key SK \<in> analz (Key`KK \<union> (spies evs))) =
(SK \<in> KK | Key SK \<in> analz (spies evs)))"
apply (erule kerbV.induct)
apply (frule_tac [10] Oops_range_spies2)
apply (frule_tac [9] Oops_range_spies1)
(*Used to apply Says_tgs_message form, which is no longer available.
Instead\<dots>*)
apply (drule_tac [7] Says_ticket_analz)
(*Used to apply Says_kas_message form, which is no longer available.
Instead\<dots>*)
apply (drule_tac [5] Says_ticket_analz)
apply (safe del: impI intro!: Key_analz_image_Key_lemma [THEN impI])
txt\<open>Case-splits for Oops1 and message 5: the negated case simplifies using
the induction hypothesis\<close>
apply (case_tac [9] "AKcryptSK authK SK evsO1")
apply (case_tac [7] "AKcryptSK servK SK evs5")
apply (simp_all del: image_insert
add: analz_image_freshK_simps AKcryptSK_Says shrK_not_AKcryptSK
Oops2_not_AKcryptSK Auth_fresh_not_AKcryptSK
Serv_fresh_not_AKcryptSK Says_Tgs_AKcryptSK Spy_analz_shrK)
txt\<open>Fake\<close>
apply spy_analz
txt\<open>K2\<close>
apply blast
txt\<open>Cases K3 and K5 solved by the simplifier thanks to the ticket being in
analz - this strategy is new wrt version IV\<close>
txt\<open>K4\<close>
apply (blast dest!: authK_not_AKcryptSK)
txt\<open>Oops1\<close>
apply (metis AKcryptSK_analz_insert insert_Key_singleton)
done
text\<open>First simplification law for analz: no session keys encrypt
authentication keys or shared keys.\<close>
lemma analz_insert_freshK1:
"\<lbrakk> evs \<in> kerbV; K \<in> authKeys evs \<union> range shrK;
SesKey \<notin> range shrK \<rbrakk>
\<Longrightarrow> (Key K \<in> analz (insert (Key SesKey) (spies evs))) =
(K = SesKey | Key K \<in> analz (spies evs))"
apply (frule authKeys_are_not_AKcryptSK, assumption)
apply (simp del: image_insert
add: analz_image_freshK_simps add: Key_analz_image_Key)
done
text\<open>Second simplification law for analz: no service keys encrypt any other keys.\<close>
lemma analz_insert_freshK2:
"\<lbrakk> evs \<in> kerbV; servK \<notin> (authKeys evs); servK \<notin> range shrK;
K \<in> symKeys \<rbrakk>
\<Longrightarrow> (Key K \<in> analz (insert (Key servK) (spies evs))) =
(K = servK | Key K \<in> analz (spies evs))"
apply (frule not_authKeys_not_AKcryptSK, assumption, assumption)
apply (simp del: image_insert
add: analz_image_freshK_simps add: Key_analz_image_Key)
done
text\<open>Third simplification law for analz: only one authentication key encrypts a certain service key.\<close>
lemma analz_insert_freshK3:
"\<lbrakk> AKcryptSK authK servK evs;
authK' \<noteq> authK; authK' \<notin> range shrK; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> (Key servK \<in> analz (insert (Key authK') (spies evs))) =
(servK = authK' | Key servK \<in> analz (spies evs))"
apply (drule_tac authK' = authK' in not_different_AKcryptSK, blast, assumption)
apply (simp del: image_insert
add: analz_image_freshK_simps add: Key_analz_image_Key)
done
lemma analz_insert_freshK3_bis:
"\<lbrakk> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>, servTicket\<rbrace>
\<in> set evs;
authK \<noteq> authK'; authK' \<notin> range shrK; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> (Key servK \<in> analz (insert (Key authK') (spies evs))) =
(servK = authK' | Key servK \<in> analz (spies evs))"
apply (frule AKcryptSKI, assumption)
apply (simp add: analz_insert_freshK3)
done
text\<open>a weakness of the protocol\<close>
lemma authK_compromises_servK:
"\<lbrakk> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>, servTicket\<rbrace>
\<in> set evs; authK \<in> symKeys;
Key authK \<in> analz (spies evs); evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Key servK \<in> analz (spies evs)"
by (metis Says_imp_analz_Spy analz.Fst analz_Decrypt')
text\<open>lemma \<open>servK_notin_authKeysD\<close> not needed in version V\<close>
text\<open>If Spy sees the Authentication Key sent in msg K2, then
the Key has expired.\<close>
lemma Confidentiality_Kas_lemma [rule_format]:
"\<lbrakk> authK \<in> symKeys; A \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Says Kas A
\<lbrace>Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>,
Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key authK, Number Ta\<rbrace>\<rbrace>
\<in> set evs \<longrightarrow>
Key authK \<in> analz (spies evs) \<longrightarrow>
expiredAK Ta evs"
apply (erule kerbV.induct)
apply (frule_tac [10] Oops_range_spies2)
apply (frule_tac [9] Oops_range_spies1)
apply (frule_tac [7] Says_ticket_analz)
apply (frule_tac [5] Says_ticket_analz)
apply (safe del: impI conjI impCE)
apply (simp_all (no_asm_simp) add: Says_Kas_message_form less_SucI analz_insert_eq not_parts_not_analz analz_insert_freshK1 pushes)
txt\<open>Fake\<close>
apply spy_analz
txt\<open>K2\<close>
apply blast
txt\<open>K4\<close>
apply blast
txt\<open>Oops1\<close>
apply (blast dest!: unique_authKeys intro: less_SucI)
txt\<open>Oops2\<close>
apply (blast dest: Says_Tgs_message_form Says_Kas_message_form)
done
lemma Confidentiality_Kas:
"\<lbrakk> Says Kas A
\<lbrace>Crypt Ka \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>, authTicket\<rbrace>
\<in> set evs;
\<not> expiredAK Ta evs;
A \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Key authK \<notin> analz (spies evs)"
apply (blast dest: Says_Kas_message_form Confidentiality_Kas_lemma)
done
text\<open>If Spy sees the Service Key sent in msg K4, then
the Key has expired.\<close>
lemma Confidentiality_lemma [rule_format]:
"\<lbrakk> Says Tgs A
\<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>,
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>\<rbrace>
\<in> set evs;
Key authK \<notin> analz (spies evs);
servK \<in> symKeys;
A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Key servK \<in> analz (spies evs) \<longrightarrow>
expiredSK Ts evs"
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (rule_tac [9] impI)+
\<comment> \<open>The Oops1 case is unusual: must simplify
\<^term>\<open>Authkey \<notin> analz (spies (ev#evs))\<close>, not letting
\<open>analz_mono_contra\<close> weaken it to
\<^term>\<open>Authkey \<notin> analz (spies evs)\<close>,
for we then conclude \<^term>\<open>authK \<noteq> authKa\<close>.\<close>
apply analz_mono_contra
apply (frule_tac [10] Oops_range_spies2)
apply (frule_tac [9] Oops_range_spies1)
apply (frule_tac [7] Says_ticket_analz)
apply (frule_tac [5] Says_ticket_analz)
apply (safe del: impI conjI impCE)
apply (simp_all add: less_SucI new_keys_not_analzd Says_Kas_message_form Says_Tgs_message_form analz_insert_eq not_parts_not_analz analz_insert_freshK1 analz_insert_freshK2 analz_insert_freshK3_bis pushes)
txt\<open>Fake\<close>
apply spy_analz
txt\<open>K2\<close>
apply (blast intro: parts_insertI less_SucI)
txt\<open>K4\<close>
apply (blast dest: authTicket_authentic Confidentiality_Kas)
txt\<open>Oops1\<close>
apply (blast dest: Says_Kas_message_form Says_Tgs_message_form intro: less_SucI)
txt\<open>Oops2\<close>
apply (metis Suc_le_eq linorder_linear linorder_not_le msg.simps(2) unique_servKeys)
done
text\<open>In the real world Tgs can't check wheter authK is secure!\<close>
lemma Confidentiality_Tgs:
"\<lbrakk> Says Tgs A
\<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>, servTicket\<rbrace>
\<in> set evs;
Key authK \<notin> analz (spies evs);
\<not> expiredSK Ts evs;
A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Key servK \<notin> analz (spies evs)"
by (blast dest: Says_Tgs_message_form Confidentiality_lemma)
text\<open>In the real world Tgs CAN check what Kas sends!\<close>
lemma Confidentiality_Tgs_bis:
"\<lbrakk> Says Kas A
\<lbrace>Crypt Ka \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>, authTicket\<rbrace>
\<in> set evs;
Says Tgs A
\<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>, servTicket\<rbrace>
\<in> set evs;
\<not> expiredAK Ta evs; \<not> expiredSK Ts evs;
A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Key servK \<notin> analz (spies evs)"
by (blast dest!: Confidentiality_Kas Confidentiality_Tgs)
text\<open>Most general form\<close>
lemmas Confidentiality_Tgs_ter = authTicket_authentic [THEN Confidentiality_Tgs_bis]
lemmas Confidentiality_Auth_A = authK_authentic [THEN exE, THEN Confidentiality_Kas]
text\<open>Needs a confidentiality guarantee, hence moved here.
Authenticity of servK for A\<close>
lemma servK_authentic_bis_r:
"\<lbrakk> Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>
\<in> parts (spies evs);
Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>
\<in> parts (spies evs);
\<not> expiredAK Ta evs; A \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>,
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace> \<rbrace>
\<in> set evs"
by (metis Confidentiality_Kas authK_authentic servK_authentic_ter)
lemma Confidentiality_Serv_A:
"\<lbrakk> Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>
\<in> parts (spies evs);
Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>
\<in> parts (spies evs);
\<not> expiredAK Ta evs; \<not> expiredSK Ts evs;
A \<notin> bad; B \<notin> bad; B \<noteq> Tgs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Key servK \<notin> analz (spies evs)"
apply (drule authK_authentic, assumption, assumption)
apply (blast dest: Confidentiality_Kas Says_Kas_message_form servK_authentic_ter Confidentiality_Tgs_bis)
done
lemma Confidentiality_B:
"\<lbrakk> Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs);
Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>
\<in> parts (spies evs);
Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>
\<in> parts (spies evs);
\<not> expiredSK Ts evs; \<not> expiredAK Ta evs;
A \<notin> bad; B \<notin> bad; B \<noteq> Tgs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Key servK \<notin> analz (spies evs)"
apply (frule authK_authentic)
apply (erule_tac [3] exE)
apply (frule_tac [3] Confidentiality_Kas)
apply (frule_tac [6] servTicket_authentic, auto)
apply (blast dest!: Confidentiality_Tgs_bis dest: Says_Kas_message_form servK_authentic unique_servKeys unique_authKeys)
done
lemma u_Confidentiality_B:
"\<lbrakk> Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs);
\<not> expiredSK Ts evs;
A \<notin> bad; B \<notin> bad; B \<noteq> Tgs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Key servK \<notin> analz (spies evs)"
by (blast dest: u_servTicket_authentic u_NotexpiredSK_NotexpiredAK Confidentiality_Tgs_bis)
subsection\<open>Authentication\<close>
text\<open>Each party verifies "the identity of
another party who generated some data" (quoted from Neuman and Ts'o).\<close>
text\<open>These guarantees don't assess whether two parties agree on
the same session key: sending a message containing a key
doesn't a priori state knowledge of the key.\<close>
text\<open>These didn't have existential form in version IV\<close>
lemma B_authenticates_A:
"\<lbrakk> Crypt servK \<lbrace>Agent A, Number T3\<rbrace> \<in> parts (spies evs);
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs);
Key servK \<notin> analz (spies evs);
A \<notin> bad; B \<notin> bad; B \<noteq> Tgs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists> ST. Says A B \<lbrace>ST, Crypt servK \<lbrace>Agent A, Number T3\<rbrace> \<rbrace> \<in> set evs"
by (blast dest: servTicket_authentic_Tgs intro: Says_K5)
text\<open>The second assumption tells B what kind of key servK is.\<close>
lemma B_authenticates_A_r:
"\<lbrakk> Crypt servK \<lbrace>Agent A, Number T3\<rbrace> \<in> parts (spies evs);
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs);
Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>
\<in> parts (spies evs);
Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>
\<in> parts (spies evs);
\<not> expiredSK Ts evs; \<not> expiredAK Ta evs;
B \<noteq> Tgs; A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists> ST. Says A B \<lbrace>ST, Crypt servK \<lbrace>Agent A, Number T3\<rbrace> \<rbrace> \<in> set evs"
by (blast intro: Says_K5 dest: Confidentiality_B servTicket_authentic_Tgs)
text\<open>\<open>u_B_authenticates_A\<close> would be the same as \<open>B_authenticates_A\<close> because the
servK confidentiality assumption is yet unrelaxed\<close>
lemma u_B_authenticates_A_r:
"\<lbrakk> Crypt servK \<lbrace>Agent A, Number T3\<rbrace> \<in> parts (spies evs);
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs);
\<not> expiredSK Ts evs;
B \<noteq> Tgs; A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists> ST. Says A B \<lbrace>ST, Crypt servK \<lbrace>Agent A, Number T3\<rbrace> \<rbrace> \<in> set evs"
by (blast intro: Says_K5 dest: u_Confidentiality_B servTicket_authentic_Tgs)
lemma A_authenticates_B:
"\<lbrakk> Crypt servK (Number T3) \<in> parts (spies evs);
Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>
\<in> parts (spies evs);
Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>
\<in> parts (spies evs);
Key authK \<notin> analz (spies evs); Key servK \<notin> analz (spies evs);
A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Says B A (Crypt servK (Number T3)) \<in> set evs"
by (metis authK_authentic Oops_range_spies1 Says_K6 servK_authentic u_K4_imp_K2 unique_authKeys)
lemma A_authenticates_B_r:
"\<lbrakk> Crypt servK (Number T3) \<in> parts (spies evs);
Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>
\<in> parts (spies evs);
Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>
\<in> parts (spies evs);
\<not> expiredAK Ta evs; \<not> expiredSK Ts evs;
A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Says B A (Crypt servK (Number T3)) \<in> set evs"
apply (frule authK_authentic)
apply (erule_tac [3] exE)
apply (frule_tac [3] Says_Kas_message_form)
apply (frule_tac [4] Confidentiality_Kas)
apply (frule_tac [7] servK_authentic)
apply auto
apply (metis Confidentiality_Tgs K4_imp_K2 Says_K6 unique_authKeys)
done
subsection\<open>Parties' knowledge of session keys\<close>
text\<open>An agent knows a session key if he used it to issue a cipher. These
guarantees can be interpreted both in terms of key distribution
and of non-injective agreement on the session key.\<close>
lemma Kas_Issues_A:
"\<lbrakk> Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key authK, Peer, Ta\<rbrace>, authTicket\<rbrace> \<in> set evs;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Kas Issues A with (Crypt (shrK A) \<lbrace>Key authK, Peer, Ta\<rbrace>)
on evs"
unfolding Issues_def
apply (rule exI)
apply (rule conjI, assumption)
apply (simp (no_asm))
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (frule_tac [5] Says_ticket_parts)
apply (frule_tac [7] Says_ticket_parts)
apply (simp_all (no_asm_simp) add: all_conj_distrib)
txt\<open>K2\<close>
apply (simp add: takeWhile_tail)
apply (metis MPair_parts parts.Body parts_idem parts_spies_takeWhile_mono parts_trans spies_evs_rev usedI)
done
lemma A_authenticates_and_keydist_to_Kas:
"\<lbrakk> Crypt (shrK A) \<lbrace>Key authK, Peer, Ta\<rbrace> \<in> parts (spies evs);
A \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Kas Issues A with (Crypt (shrK A) \<lbrace>Key authK, Peer, Ta\<rbrace>)
on evs"
by (blast dest!: authK_authentic Kas_Issues_A)
lemma Tgs_Issues_A:
"\<lbrakk> Says Tgs A \<lbrace>Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>, servTicket\<rbrace>
\<in> set evs;
Key authK \<notin> analz (spies evs); evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Tgs Issues A with
(Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>) on evs"
unfolding Issues_def
apply (rule exI)
apply (rule conjI, assumption)
apply (simp (no_asm))
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [5] Says_ticket_parts)
apply (frule_tac [7] Says_ticket_parts)
apply (simp_all (no_asm_simp) add: all_conj_distrib)
apply (simp add: takeWhile_tail)
(*Last two thms installed only to derive authK \<notin> range shrK*)
apply (blast dest: servK_authentic parts_spies_takeWhile_mono [THEN subsetD]
parts_spies_evs_revD2 [THEN subsetD] authTicket_authentic
Says_Kas_message_form)
done
lemma A_authenticates_and_keydist_to_Tgs:
"\<lbrakk> Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>
\<in> parts (spies evs);
Key authK \<notin> analz (spies evs); B \<noteq> Tgs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<exists>A. Tgs Issues A with
(Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>) on evs"
by (blast dest: Tgs_Issues_A servK_authentic_bis)
lemma B_Issues_A:
"\<lbrakk> Says B A (Crypt servK (Number T3)) \<in> set evs;
Key servK \<notin> analz (spies evs);
A \<notin> bad; B \<notin> bad; B \<noteq> Tgs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> B Issues A with (Crypt servK (Number T3)) on evs"
unfolding Issues_def
apply (rule exI)
apply (rule conjI, assumption)
apply (simp (no_asm))
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (simp_all (no_asm_simp) add: all_conj_distrib)
apply blast
txt\<open>K6 requires numerous lemmas\<close>
apply (simp add: takeWhile_tail)
apply (blast intro: Says_K6 dest: servTicket_authentic
parts_spies_takeWhile_mono [THEN subsetD]
parts_spies_evs_revD2 [THEN subsetD])
done
lemma A_authenticates_and_keydist_to_B:
"\<lbrakk> Crypt servK (Number T3) \<in> parts (spies evs);
Crypt authK \<lbrace>Key servK, Agent B, Number Ts\<rbrace>
\<in> parts (spies evs);
Crypt (shrK A) \<lbrace>Key authK, Agent Tgs, Number Ta\<rbrace>
\<in> parts (spies evs);
Key authK \<notin> analz (spies evs); Key servK \<notin> analz (spies evs);
A \<notin> bad; B \<notin> bad; B \<noteq> Tgs; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> B Issues A with (Crypt servK (Number T3)) on evs"
by (blast dest!: A_authenticates_B B_Issues_A)
(*Must use \<le> rather than =, otherwise it cannot be proved inductively!*)
(*This is too strong for version V but would hold for version IV if only B
in K6 said a fresh timestamp.
lemma honest_never_says_newer_timestamp:
"\<lbrakk> (CT evs) \<le> T ; Number T \<in> parts {X}; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<forall> A B. A \<noteq> Spy \<longrightarrow> Says A B X \<notin> set evs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply (simp_all)
apply force
apply force
txt{*clarifying case K3*}
apply (rule impI)
apply (rule impI)
apply (frule Suc_leD)
apply (clarify)
txt{*cannot solve K3 or K5 because the spy might send CT evs as authTicket
or servTicket, which the honest agent would forward*}
prefer 2 apply force
prefer 4 apply force
prefer 4 apply force
txt{*cannot solve K6 unless B updates the timestamp - rather than bouncing T3*}
oops
*)
text\<open>But can prove a less general fact conerning only authenticators!\<close>
lemma honest_never_says_newer_timestamp_in_auth:
"\<lbrakk> (CT evs) \<le> T; Number T \<in> parts {X}; A \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Says A B \<lbrace>Y, X\<rbrace> \<notin> set evs"
apply (erule rev_mp)
apply (erule kerbV.induct)
apply auto
done
lemma honest_never_says_current_timestamp_in_auth:
"\<lbrakk> (CT evs) = T; Number T \<in> parts {X}; A \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Says A B \<lbrace>Y, X\<rbrace> \<notin> set evs"
by (metis honest_never_says_newer_timestamp_in_auth le_refl)
lemma A_Issues_B:
"\<lbrakk> Says A B \<lbrace>ST, Crypt servK \<lbrace>Agent A, Number T3\<rbrace>\<rbrace> \<in> set evs;
Key servK \<notin> analz (spies evs);
B \<noteq> Tgs; A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> A Issues B with (Crypt servK \<lbrace>Agent A, Number T3\<rbrace>) on evs"
unfolding Issues_def
apply (rule exI)
apply (rule conjI, assumption)
apply (simp (no_asm))
apply (erule rev_mp)
apply (erule rev_mp)
apply (erule kerbV.induct, analz_mono_contra)
apply (frule_tac [7] Says_ticket_parts)
apply (frule_tac [5] Says_ticket_parts)
apply (simp_all (no_asm_simp))
txt\<open>K5\<close>
apply auto
apply (simp add: takeWhile_tail)
txt\<open>Level 15: case study necessary because the assumption doesn't state
the form of servTicket. The guarantee becomes stronger.\<close>
prefer 2 apply (simp add: takeWhile_tail)
(**This single command of version IV...
apply (blast dest: Says_imp_spies [THEN analz.Inj, THEN analz_Decrypt']
K3_imp_K2 K4_trustworthy'
parts_spies_takeWhile_mono [THEN subsetD]
parts_spies_evs_revD2 [THEN subsetD]
intro: Says_Auth)
...expands as follows - including extra exE because of new form of lemmas*)
apply (frule K3_imp_K2, assumption, assumption, erule exE, erule exE)
apply (case_tac "Key authK \<in> analz (spies evs5)")
apply (metis Says_imp_analz_Spy analz.Fst analz_Decrypt')
apply (frule K3_imp_K2, assumption, assumption, erule exE, erule exE)
apply (drule Says_imp_knows_Spy [THEN parts.Inj, THEN parts.Fst])
apply (frule servK_authentic_ter, blast, assumption+)
apply (drule parts_spies_takeWhile_mono [THEN subsetD])
apply (drule parts_spies_evs_revD2 [THEN subsetD])
txt\<open>\<^term>\<open>Says_K5\<close> closes the proof in version IV because it is clear which
servTicket an authenticator appears with in msg 5. In version V an authenticator can appear with any item that the spy could replace the servTicket with\<close>
apply (frule Says_K5, blast)
txt\<open>We need to state that an honest agent wouldn't send the wrong timestamp
within an authenticator, wathever it is paired with\<close>
apply (auto simp add: honest_never_says_current_timestamp_in_auth)
done
lemma B_authenticates_and_keydist_to_A:
"\<lbrakk> Crypt servK \<lbrace>Agent A, Number T3\<rbrace> \<in> parts (spies evs);
Crypt (shrK B) \<lbrace>Agent A, Agent B, Key servK, Number Ts\<rbrace>
\<in> parts (spies evs);
Key servK \<notin> analz (spies evs);
B \<noteq> Tgs; A \<notin> bad; B \<notin> bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> A Issues B with (Crypt servK \<lbrace>Agent A, Number T3\<rbrace>) on evs"
by (blast dest: B_authenticates_A A_Issues_B)
subsection\<open>Novel guarantees, never studied before\<close>
text\<open> Because honest agents always say
the right timestamp in authenticators, we can prove unicity guarantees based
exactly on timestamps. Classical unicity guarantees are based on nonces.
Of course assuming the agent to be different from the Spy, rather than not in
bad, would suffice below. Similar guarantees must also hold of
Kerberos IV.\<close>
text\<open>Notice that an honest agent can send the same timestamp on two
different traces of the same length, but not on the same trace!\<close>
lemma unique_timestamp_authenticator1:
"\<lbrakk> Says A Kas \<lbrace>Agent A, Agent Tgs, Number T1\<rbrace> \<in> set evs;
Says A Kas' \<lbrace>Agent A, Agent Tgs', Number T1\<rbrace> \<in> set evs;
A \<notin>bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Kas=Kas' \<and> Tgs=Tgs'"
apply (erule rev_mp, erule rev_mp)
apply (erule kerbV.induct)
apply (auto simp add: honest_never_says_current_timestamp_in_auth)
done
lemma unique_timestamp_authenticator2:
"\<lbrakk> Says A Tgs \<lbrace>AT, Crypt AK \<lbrace>Agent A, Number T2\<rbrace>, Agent B\<rbrace> \<in> set evs;
Says A Tgs' \<lbrace>AT', Crypt AK' \<lbrace>Agent A, Number T2\<rbrace>, Agent B'\<rbrace> \<in> set evs;
A \<notin>bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> Tgs=Tgs' \<and> AT=AT' \<and> AK=AK' \<and> B=B'"
apply (erule rev_mp, erule rev_mp)
apply (erule kerbV.induct)
apply (auto simp add: honest_never_says_current_timestamp_in_auth)
done
lemma unique_timestamp_authenticator3:
"\<lbrakk> Says A B \<lbrace>ST, Crypt SK \<lbrace>Agent A, Number T\<rbrace>\<rbrace> \<in> set evs;
Says A B' \<lbrace>ST', Crypt SK' \<lbrace>Agent A, Number T\<rbrace>\<rbrace> \<in> set evs;
A \<notin>bad; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> B=B' \<and> ST=ST' \<and> SK=SK'"
apply (erule rev_mp, erule rev_mp)
apply (erule kerbV.induct)
apply (auto simp add: honest_never_says_current_timestamp_in_auth)
done
text\<open>The second part of the message is treated as an authenticator by the last
simplification step, even if it is not an authenticator!\<close>
lemma unique_timestamp_authticket:
"\<lbrakk> Says Kas A \<lbrace>X, Crypt (shrK Tgs) \<lbrace>Agent A, Agent Tgs, Key AK, T\<rbrace>\<rbrace> \<in> set evs;
Says Kas A' \<lbrace>X', Crypt (shrK Tgs') \<lbrace>Agent A', Agent Tgs', Key AK', T\<rbrace>\<rbrace> \<in> set evs;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> A=A' \<and> X=X' \<and> Tgs=Tgs' \<and> AK=AK'"
apply (erule rev_mp, erule rev_mp)
apply (erule kerbV.induct)
apply (auto simp add: honest_never_says_current_timestamp_in_auth)
done
text\<open>The second part of the message is treated as an authenticator by the last
simplification step, even if it is not an authenticator!\<close>
lemma unique_timestamp_servticket:
"\<lbrakk> Says Tgs A \<lbrace>X, Crypt (shrK B) \<lbrace>Agent A, Agent B, Key SK, T\<rbrace>\<rbrace> \<in> set evs;
Says Tgs A' \<lbrace>X', Crypt (shrK B') \<lbrace>Agent A', Agent B', Key SK', T\<rbrace>\<rbrace> \<in> set evs;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> A=A' \<and> X=X' \<and> B=B' \<and> SK=SK'"
apply (erule rev_mp, erule rev_mp)
apply (erule kerbV.induct)
apply (auto simp add: honest_never_says_current_timestamp_in_auth)
done
(*Uses assumption K6's assumption that B \<noteq> Kas, otherwise B should say
fresh timestamp*)
lemma Kas_never_says_newer_timestamp:
"\<lbrakk> (CT evs) \<le> T; Number T \<in> parts {X}; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<forall> A. Says Kas A X \<notin> set evs"
apply (erule rev_mp)
apply (erule kerbV.induct, auto)
done
lemma Kas_never_says_current_timestamp:
"\<lbrakk> (CT evs) = T; Number T \<in> parts {X}; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<forall> A. Says Kas A X \<notin> set evs"
by (metis Kas_never_says_newer_timestamp eq_imp_le)
lemma unique_timestamp_msg2:
"\<lbrakk> Says Kas A \<lbrace>Crypt (shrK A) \<lbrace>Key AK, Agent Tgs, T\<rbrace>, AT\<rbrace> \<in> set evs;
Says Kas A' \<lbrace>Crypt (shrK A') \<lbrace>Key AK', Agent Tgs', T\<rbrace>, AT'\<rbrace> \<in> set evs;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> A=A' \<and> AK=AK' \<and> Tgs=Tgs' \<and> AT=AT'"
apply (erule rev_mp, erule rev_mp)
apply (erule kerbV.induct)
apply (auto simp add: Kas_never_says_current_timestamp)
done
(*Uses assumption K6's assumption that B \<noteq> Tgs, otherwise B should say
fresh timestamp*)
lemma Tgs_never_says_newer_timestamp:
"\<lbrakk> (CT evs) \<le> T; Number T \<in> parts {X}; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<forall> A. Says Tgs A X \<notin> set evs"
apply (erule rev_mp)
apply (erule kerbV.induct, auto)
done
lemma Tgs_never_says_current_timestamp:
"\<lbrakk> (CT evs) = T; Number T \<in> parts {X}; evs \<in> kerbV \<rbrakk>
\<Longrightarrow> \<forall> A. Says Tgs A X \<notin> set evs"
by (metis Tgs_never_says_newer_timestamp eq_imp_le)
lemma unique_timestamp_msg4:
"\<lbrakk> Says Tgs A \<lbrace>Crypt (shrK A) \<lbrace>Key SK, Agent B, T\<rbrace>, ST\<rbrace> \<in> set evs;
Says Tgs A' \<lbrace>Crypt (shrK A') \<lbrace>Key SK', Agent B', T\<rbrace>, ST'\<rbrace> \<in> set evs;
evs \<in> kerbV \<rbrakk>
\<Longrightarrow> A=A' \<and> SK=SK' \<and> B=B' \<and> ST=ST'"
apply (erule rev_mp, erule rev_mp)
apply (erule kerbV.induct)
apply (auto simp add: Tgs_never_says_current_timestamp)
done
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/Auth/KerberosV.thy"}
|
"""
Collection of generic numpy array functions
"""
import math
import warnings
import numpy as np
from margrie_libs.margrie_libs.signal_processing.exceptions import BadRandomError, PeakDetectionError
def _get_decimate_new_n_pnts(trace, window_width, end_method):
methods = ("drop", "strict", "pad")
n_remaining_points = trace.size % window_width
if end_method == 'strict' and n_remaining_points != 0:
raise ValueError(
"The decimation factor does not create an exact point numbers and you have selected 'strict'")
elif end_method == 'drop':
n_samples_last_window = 0
elif end_method == 'pad':
n_samples_last_window = n_remaining_points if n_remaining_points <= 2 else 2 # TODO: find better name for 'pad'
else:
raise ValueError("end_method should be one of {}, got {}".
format(methods, end_method))
n_complete_windows = trace.size // window_width
new_n_pnts = n_complete_windows * 2 + n_samples_last_window
return new_n_pnts
def decimate(trace, decimation_factor=10, end_method="drop"):
"""
Decimate (reduce the number of points) of the source trace to plot the trace.
To preserve the visual aspect of the trace, the algorithm takes the min and max on a sliding window defined by
decimation_factor.
.. important:
This function is intended for plotting only. For other uses, see more appropriate downsampling methods.
:param trace: The trace to decimate
:param int decimation_factor: the number X such that trace.size = X * out.size
:param string end_method: How to deal with the last points
:return: A decimated copy of the trace
"""
if not isinstance(decimation_factor, int):
raise TypeError("Decimation factor should be an integer number. Got {}.".format(decimation_factor))
if decimation_factor < 1:
raise ValueError("Decimation factor needs to be at least 1 to get a window of 2. Got {}.".
format(decimation_factor))
window_width = decimation_factor * 2
new_n_pnts = _get_decimate_new_n_pnts(trace, window_width, end_method)
out = np.zeros(new_n_pnts)
for i, j in enumerate(range(0, new_n_pnts, 2)): # by 2 because 1 min and 1 max for each window
window_start_p = i * window_width
window_end_p = int(window_start_p + window_width)
if window_start_p > trace.size:
raise RuntimeError("Array {}, of size {}, iteration {}, from {} to {} ({} points)".
format(trace, trace.shape, i, window_start_p, window_end_p, window_width))
if window_end_p > trace.size:
window_end_p = -1
segment = trace[window_start_p:window_end_p]
out[j] = segment.min()
try:
out[j+1] = segment.max()
except IndexError: # If trace.size % window_width == 1
break
return out
def decimate_x(x_trace, decimation_factor=10, end_method="drop"):
window_width = decimation_factor * 2
new_n_pnts = _get_decimate_new_n_pnts(x_trace, window_width, end_method)
return np.linspace(x_trace[0], x_trace[-1], new_n_pnts) # FIXME: adjust not exactly x_trace[-1] because of drop
def find_sine_peaks_ranges(sine_trace):
"""
Sine has to be zero centered
"""
return abs(sine_trace) > (0.9 * sine_trace.max())
def find_sine_peaks(sine_trace):
"""
Returns the indices (points) of the peaks
Sine has to be zero centered
"""
peak_ranges = find_sine_peaks_ranges(sine_trace)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
boundaries = np.diff(peak_ranges)
boundaries_indices = (np.where(boundaries == True))[0]
peak_starts = boundaries_indices[::2]
peak_starts += 1 # add 1 because of diff
peak_ends = boundaries_indices[1::2]
peak_ends += 1 # add 1 because of diff
peaks_pos = []
for peak_start, peak_end in zip(peak_starts, peak_ends):
peak = abs(sine_trace[peak_start:peak_end]) # abs because positive and negative peaks
peak_pos = np.where(peak == peak.max())[0] # because there may be several points at max
if 0 in peak:
raise PeakDetectionError('There should be no 0 in the peak, found {}'.format(peak))
middle = int(math.floor(peak_pos.size / 2))
if peak_pos.size % 2 == 0:
middle -= 1
peak_pos = peak_pos[middle]
peak_pos += peak_start # absolute position
peaks_pos.append(peak_pos)
return peaks_pos
def cut_and_avg_sine(sine_trace, trace, scaling=1):
"""
sineTrace and trace must have same number of points
Cut trace based on peaks of sine. to extract one period and averages all corresponding segments
"""
# error_msg = "sineTrace and trace must have same number of points, got {} and {}"
# .format(sineTrace.shape, trace.shape)
# assert sineTrace.size == trace.size, error_msg
segments = cut_and_get_multiple(sine_trace, trace, scaling=scaling)
segments = np.array(segments, dtype=np.float64)
return segments.mean(0)
def cut_and_sum_sine(sine_trace, trace, scaling=1):
segments = cut_and_get_multiple(sine_trace, trace, scaling=scaling)
segments = np.array(segments, dtype=np.float64)
return segments.sum(0)
def cut_in_half(trace):
middle = int(trace.size/2)
first_half = trace[:middle]
second_half = trace[middle:]
if first_half.size != second_half.size:
second_half = second_half[:-1]
assert first_half.size == second_half.size, "Length of first half and second half differ: {} and {}".format(first_half.size, second_half.size)
return first_half, second_half
def cut_and_avg_halves(trace):
segments = cut_in_half(trace)
return np.array(segments, dtype=np.float64).mean(0)
def cut_and_get_multiple(sine_trace, trace, scaling=1):
"""
sineTrace and trace must have same number of points
Cut trace based on peaks of sine. to extract one period and returns all corresponding segments
.. warning:
If the number of clockwise and counterclockwise segments differs, will only return the first N segments of each
kind such that N = min(nClockWise, nCounterClockWise).
"""
peaks_locs = np.array(find_sine_peaks(sine_trace), dtype=np.int64) # full peaks ==> not the ramp peak
peaks_locs *= scaling
segments = []
lengths = []
for i in range(0, (peaks_locs.size - 1), 2):
start_p = peaks_locs[i]
try:
end_p = peaks_locs[i+2]
except IndexError:
break
segment = trace[start_p:end_p]
lengths.append(segment.size)
segments.append(segment)
min_length = min(lengths) # TODO: put criterion on max number of points diff
segments = [s[:min_length] for s in segments]
return segments
def avg(mat):
"""
Returns the vector corresponding to mat averaged accross 2nd and 3rd dims.
Assumes that the matrix is all filled (no NaN since avg of avg).
"""
# if __debug__:
# print(mat.shape)
if mat.ndim > 1:
return avg(np.average(mat, axis=1))
else:
return mat
def avg_waves(waves):
"""
Transforms the input list into a numpy array and returns the average across the first dimension
:param list waves:
:return:
"""
matrix = np.array(waves)
return matrix.mean(0) # TODO: check dimension
def sd(mat):
"""
Returns the vector corresponding to mat averaged accross 2nd and 3rd dims.
Assumes that the matrix is all filled (no NaN since avg of avg).
"""
# if __debug__:
# print(mat.shape)
if mat.ndim > 2:
return sd(np.concatenate([mat[:,:,k] for k in range(mat.shape[2])], axis=1))
elif mat.ndim > 1:
return np.std(mat, axis=1)
else:
return mat
def out_of_place_shuffle(src_array):
"""
A function to clean the numpy shuffle function
and not modify in place
"""
if (src_array == 0).all():
return src_array
tmp = src_array.copy()
np.random.shuffle(tmp)
if (tmp == src_array).all():
raise BadRandomError('srcArray: {}\ntmpArray: {}'.format(src_array, tmp))
return tmp
def shuffle(mat):
"""
Returns the randomly shuffled version of the input array
The shuffle is performed on a First dimension basis
"""
if (mat == 0).all():
return mat
out = mat.copy()
if mat.ndim == 1:
out[:] = np.nan # For safety
out = out_of_place_shuffle(mat)
elif mat.ndim == 2:
out[:, :] = np.nan
for y in range(mat.shape[1]):
out[:, y] = out_of_place_shuffle(mat[:, y])
elif mat.ndim == 3:
out[:, :, :] = np.nan
for z in range(mat.shape[2]):
for y in range(mat.shape[1]):
out[:, y, z] = out_of_place_shuffle(mat[:, y, z])
else:
raise NotImplementedError('Number of dimension {} is not implemented'.format(mat.ndim))
if (mat == out).all():
raise BadRandomError('srcArray: {}\ntmp: {}'.format(mat, out))
return out
def linearise(array):
return array.reshape(array.size)
def get_uniques(items):
return sorted(list(set(items)))
def make_uniques(values):
return sorted(list(set(values)))
def flip_odd_rows(mat):
out = mat.copy()
for i in range(1, mat.shape[1], 2): # TEST: check that rows/col correct
out[:, i] = mat[::-1, i] # TEST: check that rows/col correct
return out
def make_mask_from_indices(size, idx_true=None):
"""
creates a boolean mask from a list of indices
:param int size: the total size of the array
:param list idx_true: a list of indices to be returned as True in the mask
:return:
"""
# TODO: make work for n dimensional? is this something the np.ma module could do better?
if idx_true is None:
idx_true = list(range(size))
mask = []
for i in range(size):
if i in idx_true:
mask += [True]
else:
mask += [False]
return np.array(mask)
|
{"hexsha": "e0485bec65d56b984c8b4e363e80c4ab4a1d4cbe", "size": 10436, "ext": "py", "lang": "Python", "max_stars_repo_path": "margrie_libs/margrie_libs/signal_processing/mat_utils.py", "max_stars_repo_name": "Sepidak/spikeGUI", "max_stars_repo_head_hexsha": "25ae60160308c0a34e7180f3e39a1c4dc6aad708", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "margrie_libs/margrie_libs/signal_processing/mat_utils.py", "max_issues_repo_name": "Sepidak/spikeGUI", "max_issues_repo_head_hexsha": "25ae60160308c0a34e7180f3e39a1c4dc6aad708", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-08-09T21:51:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-09T21:51:45.000Z", "max_forks_repo_path": "margrie_libs/margrie_libs/signal_processing/mat_utils.py", "max_forks_repo_name": "Sepidak/spikeGUI", "max_forks_repo_head_hexsha": "25ae60160308c0a34e7180f3e39a1c4dc6aad708", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-10-16T14:07:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-16T17:09:03.000Z", "avg_line_length": 34.7866666667, "max_line_length": 147, "alphanum_fraction": 0.6340551936, "include": true, "reason": "import numpy", "num_tokens": 2505}
|
%==============================================================================
% This code is part of the Matlab-based toolbox
% LagLDDDM - A Lagrangian Gauss--Newton--Krylov Solver for Mass- and
% Intensity-Preserving Diffeomorphic Image Registration
%
% For details and license info see
% - https://github.com/C4IR/FAIR.m/tree/master/add-ons/LagLDDMM
%
% function [Sc,dS,d2S] = diffusionST(vc,omega,m,varargin)
%
% Matrix-free spatio-temporal diffusion regularization energy for vc, where
% vc is cell-centered
%
% S(v) = 0.5 * \int_{\omega} alpha(1)*v(x)'*A*v(x)+ alpha(2)*v(x)'*B*v(x) dx,
%
% where A is the spatial gradient operator and B the time derivative operator.
%
% Input:
%
% vc instationary velocity field (cell-centered)
% omega spatial domain
% m number of discretization points
% varargin optional parameters (see below)
%
% Optional Input:
%
% tspan time span (default: [0 1])
% nt number of time points for velocity (default: computed from input)
%
%
% Output:
%
% Sc current value (0.5 * hd * vc'* A *vc + 0.5*dt*vc'*B*vc)
% dS derivative (hd * vc'*A )
% d2S Hessian, struct A
% ==================================================================================
function [Sc,dS,d2S] = mfDiffusionST(vc,omega,m,varargin)
if nargin == 0
help(mfilename);
return;
end
if strcmp(vc,'para')
Sc = 'cell-centered'; % grid
dS = 1; % matrixFree
d2S = @spectralPrecondPCG; % solver
return;
end
alpha = [1 1e-3];
tspan = [0 1];
nt = [];
for k=1:2:length(varargin) % overwrites default parameter
eval([varargin{k},'=varargin{',int2str(k+1),'};']);
end;
dim = numel(omega)/2;
if isempty(nt) % roughly estimate nt
nt = round(numel(vc)/(prod(m)*dim))-1;
end
d2S.regularizer = regularizer;
d2S.alpha = alpha;
d2S.B = @(omega,m) getDiffusionMatrixST(omega,tspan,m,nt,alpha);
d2S.d2S = @(u,omega,m) diffusionOperatorST(u,omega,tspan,m,nt,alpha);
d2S.diag = @(omega,m) diag(omega,tspan,m,nt,alpha);
d2S.solver = @spectralPrecondPCG;
d2S.res = vc;
dS = d2S.d2S(vc,omega,m)';
Sc = .5*dS*vc;
function B = getDiffusionMatrixST(omega,tspan,m,nt,alpha)
h = (omega(2:2:end)-omega(1:2:end))./m;
hd = prod(h);
% compute time-stepsize
dt = abs(tspan(2)-tspan(1))/nt;
% build gradient matrix for one transformation
Bx = getSpaceGradientMatrix(omega,m);
% a = sqrt(alpha(1).*hd.*dt)*ones(nt+1,1);
a = sqrt(alpha(1).*hd.*dt*[1/2;ones(nt-1,1);1/2]);
% apply spatial regularization to all transformations and sum up in
% time
Bx = kron(sdiag(a(:)),Bx);
% get time regularization matrix
b = sqrt(alpha(2)*hd.*dt);
Bt = b * getTimeDiffusionMatrix(nt,dt,prod(m),length(omega)/2);
% build B
B = [Bx;Bt];
function D = sdiag(v)
D = diag(sparse(v(:)));
% get diagonal of d2S (interesting in matrix free mode)
function D = diag(omega,tspan,m,nt,alpha)
dim = numel(omega)/2;
one = @(i,j) One(omega,m,i,j);
hd = prod((omega(2:2:end)-omega(1:2:end))./m);
% compute time stepsize
dt = abs(tspan(1)-tspan(2))/nt;
if dim == 2
Dx = [ one(1,1) + one(1,2);
one(2,1) + one(2,2) ];
Dx = kron(hd*dt*ones(nt+1,1),Dx);
Dt = [1/2;ones(nt-1,1);1/2]/dt^2;
Dt = kron(hd*dt*Dt , ones(prod(m)*dim,1));
D = alpha(1)*Dx + alpha(2)*Dt;
else
Dx = [ ...
one(1,1)+one(1,2)+one(1,3);
one(2,2)+one(2,1)+one(2,3);
one(3,3)+one(3,1)+one(3,2)];
Dx = kron(hd*dts(:),Dx);
Dt = zeros(mTime+1,1);
Dt(1:end-1) = (1./dt(:));
Dt(2:end ) = Dt(2:end) + (1./dt(:));
Dt = kron(hd*Dt, ones(prod(mSpace)*dim,1));
D = alpha(1)*Dx + alpha(2)*Dt;
end;
% helper for computation of diag(d2S)
function o = One(omega,m,i,j)
h = (omega(2:2:end)-omega(1:2:end))./m;
o = ones(m)/h(j)^2;
switch j
case 1, o(2:end-1,:,:) = 2*o(2:end-1,:,:);
case 2, o(:,2:end-1,:) = 2*o(:,2:end-1,:);
case 3, o(:,:,2:end-1) = 2*o(:,:,2:end-1);
end;
o = o(:);
% matrix free implementation of spatio-temporal diffusion operator
function Ay = diffusionOperatorST(vc,omega,tspan,m,nt,alpha)
dim = numel(omega)/2;
h = (omega(2:2:end)-omega(1:2:end))./m;
hd = prod(h(1:dim));
dt = abs(tspan(1)-tspan(2))/nt;
w = dt*[1/2;ones(nt-1,1);1/2];
switch dim
case 2
d1 = @(Y) (Y(2:end,:)-Y(1:end-1,:))/h(1);
d2 = @(Y) (Y(:,2:end)-Y(:,1:end-1))/h(2);
d1T = @(Y) reshape([-Y(1,:);Y(1:end-1,:)-Y(2:end,:);Y(end,:)],[],1)/h(1);
d2T = @(Y) reshape([-Y(:,1),Y(:,1:end-1)-Y(:,2:end),Y(:,end)],[],1)/h(2);
vc = reshape(vc,[],nt+1);
Ay = zeros(prod(m)*dim,nt+1);
% spatial diffusion
for k=1:nt+1
vct = reshape(vc(:,k),[m dim]);
Ay(:,k) = w(k) * hd* alpha(1) * ....
[ (d1T(d1(vct(:,:,1))) + d2T(d2(vct(:,:,1)))); ...
(d1T(d1(vct(:,:,2))) + d2T(d2(vct(:,:,2))))];
end
Ay = Ay(:);
case 3
d1 = @(Y) (Y(2:end,:,:)-Y(1:end-1,:,:))/h(1);
d2 = @(Y) (Y(:,2:end,:)-Y(:,1:end-1,:))/h(2);
d3 = @(Y) (Y(:,:,2:end)-Y(:,:,1:end-1))/h(3);
d1T = @(Y) reshape(d1t(Y),[],1)/h(1);
d2T = @(Y) reshape(d2t(Y),[],1)/h(2);
d3T = @(Y) reshape(d3t(Y),[],1)/h(3);
vc = reshape(vc,[],nt+1);
Ay = zeros(prod(m)*dim,nt+1);
% spatial diffusion
for k=1:nt+1
vct = reshape(vc(:,k),[m dim]);
Ay(:,k) = w(k) * hd* alpha(1) * ....
[ ...
(d1T(d1(vct(:,:,:,1))) + d2T(d2(vct(:,:,:,1))) + d3T(d3(vct(:,:,:,1)))); ...
(d1T(d1(vct(:,:,:,2))) + d2T(d2(vct(:,:,:,2))) + d3T(d3(vct(:,:,:,2)))); ...
(d1T(d1(vct(:,:,:,3))) + d2T(d2(vct(:,:,:,3))) + d3T(d3(vct(:,:,:,3)))); ...
];
end
Ay = Ay(:);
otherwise
error('%s - dimension %d not supported.',mfilename,dim);
end
% time diffusion
if alpha(2)>0
Dt = @(Y) (Y(:,2:end)-Y(:,1:end-1))/dt;
DtT = @(Y) ([-Y(:,1),Y(:,1:end-1)-Y(:,2:end),Y(:,end)])/dt;
At = DtT(Dt(vc));
Ay = Ay + (alpha(2)* hd*dt)*At(:);
end
% partial derivative operator for x1 (mf)
function y = d1t(Y)
m = size(Y);
y = zeros(m+[1,0,0]);
y(1,:,:) = -Y(1,:,:);
y(2:end-1,:,:) = Y(1:end-1,:,:)-Y(2:end,:,:);
y(end,:,:) = Y(end,:,:);
% partial derivative operator for x2 (mf)
function y = d2t(Y)
m = size(Y);
y = zeros(m+[0,1,0]);
y(:,1,:) = -Y(:,1,:);
y(:,2:end-1,:) = Y(:,1:end-1,:)-Y(:,2:end,:);
y(:,end,:) = Y(:,end,:);
% partial derivative operator for x3 (mf)
function y = d3t(Y)
m = size(Y); if length(m) == 2, m = [m,1]; end;
y = zeros(m+[0,0,1]);
y(:,:,1) = -Y(:,:,1);
y(:,:,2:end-1) = Y(:,:,1:end-1)-Y(:,:,2:end);
y(:,:,end) = Y(:,:,end);
function A = getSpaceGradientMatrix(omega,m)
dim = length(omega)/2;
h = (omega(2:2:end)- omega(1:2:end)) ./m ;
I = @(i) speye(m(i));
% setup regularizer for y
%
% S(y) = INT |Dy(.,t)|^2 + INT |d/dt y(x,.)|^2 dx
switch dim
case 2
% build discrete derivative operators
D1=spdiags(ones(m(1),1)*[-1 1],0:1,m(1)-1,m(1)); D1=D1/(h(1));
D2=spdiags(ones(m(2),1)*[-1 1],0:1,m(2)-1,m(2)); D2=D2/(h(2));
% spatial regularization
A = [kron(I(2),D1); kron(D2,I(1))];
case 3
% build discrete derivative operators
D1=spdiags(ones(m(1),1)*[-1 1],0:1,m(1)-1,m(1)); D1=D1/(h(1));
D2=spdiags(ones(m(2),1)*[-1 1],0:1,m(2)-1,m(2)); D2=D2/(h(2));
D3=spdiags(ones(m(3),1)*[-1 1],0:1,m(3)-1,m(3)); D3=D3/(h(3));
% build gradient (scalar)
A = [kron(I(3),kron(I(2),D1)); ...
kron(I(3),kron(D2,I(1))); ...
kron(D3,kron(I(2),I(1)))];
end
A = kron(speye(dim), A); % nD gradient
function A = getTimeDiffusionMatrix(nt,dt,n,dim)
Dt=spdiags(ones(nt+1,1)*[-1/dt 1/dt],0:1,nt,nt+1);
A = kron(Dt, speye(n*dim)); % 2nd deriveative over time for two components
|
{"author": "C4IR", "repo": "FAIR.m", "sha": "975edebd37b833ae76696792870de5c05efcb9cb", "save_path": "github-repos/MATLAB/C4IR-FAIR.m", "path": "github-repos/MATLAB/C4IR-FAIR.m/FAIR.m-975edebd37b833ae76696792870de5c05efcb9cb/add-ons/LagLDDMM/mfDiffusionST.m"}
|
# importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib
import numpy as np
import cv2
from warnings import warn
from collections import deque
from sklearn.cluster import KMeans as ClusterFinder
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
vs = vertices.shape
if len(vs) == 2:
vertices= vertices.reshape((1, vs[0], vs[1]))
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, xyxyVecs, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
Color is RGB.
"""
for line in xyxyVecs:
x1, y1, x2, y2 = line.astype(int)
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, minLineLen, maxLineGap):
"""
`img` should be the output of a Canny transform.
Returns hough lines as starting and ending points [x1, y1, x2, y2].
"""
lines = cv2.HoughLinesP(
img, rho, theta, threshold, np.array([]),
minLineLength=minLineLen, maxLineGap=maxLineGap
)
if lines is None:
lines = np.empty((0, 1, 4))
return lines
def draw_hough_lines(xyxyVecs, rows, cols, **kwargs):
line_img = np.zeros((rows, cols, 3), dtype=np.uint8)
draw_lines(line_img, xyxyVecs, **kwargs)
return line_img
def fake_color(bw, color=[1., 1., 1.]):
r, g, b = color
return np.dstack((r * bw, b * bw, g * bw)).astype(bw.dtype)
def to_color_if_bw(bw, color=[1., 1., 1.]):
if len(bw.shape) == 2 or bw.shape[2] == 1:
bw = fake_color(bw)
return bw
def weighted_img(img, initial_img, alpha=0.8, beta=1., gamma=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * beta + img * alpha + gamma
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(to_color_if_bw(initial_img), beta, to_color_if_bw(img), alpha, gamma)
def draw_polygon(vertices, ax=None, **kwargs):
for (k, v) in dict(edgecolor='blue', lw=4, facecolor='none', linestyle='--').items():
if k not in kwargs:
kwargs[k] = v
if ax is None:
ax = plt.gca()
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
return ax.add_patch(Polygon(vertices, True, **kwargs))
def pixelwise_max(*imgs):
return np.stack(imgs).max(axis=0)
class Pipeline(object):
'''Pipeline for lane-line-finding.'''
def __init__(self,
horizon=.6, horizonRadius=None, hoodClearance=0,
gaussianRadius=5,
lowCannyThreshold=50, highCannyThreshold=150,
rho=20, theta=np.pi/120, houghThreshold=64, minLineLen=50, maxLineGap=20,
minAbsSlope=.5, maxAbsSlope=2,
):
'''
Parameters
----------
horizon : float, optional
How far down on the image the top edge of the search region trapezoid
should be. Specified as a number in (0.0, 1.0),
where 0.0 is at the top of the image and 1.0, the bottom.
hozironRadius : float, optional
Half-width of the top edge of the search region trapezoid. in units of (sub)pixels.
gaussianRadius : int, optional
Diameter of Gaussian pre-smoothing.
lowCannyThreshold : int, optional
highCannyThreshold : int, optional
Hysteresis thresholds for Canny edge detection.
rho : float, optional
Radius increment for assembling the Hough accumulator array. In units of pixels.
theta : float, optional
Angle increment for assembling the Hough accumulator array. In units of radians.
houghThreshold : int, optional
Minimum number of accumulated Hough votes for inclusion.
minLineLeng : float, optional
"Minimum length of line. Line segments shorter than this are rejected."
maxLineGap : float, optional
"Maximum allowed gap between line segments to treat them as single line."
minAbsSlope : float, optional
Lower threshold on line slope for filtering Hough candidates
maxAbsSlope : float, optional
Upper threshold on line slope for filtering Hough candidates
'''
self.horizon = horizon
self.horizonRadius = horizonRadius
self.hoodClearance = hoodClearance
self.gaussianRadius = gaussianRadius
self.lowCannyThreshold = lowCannyThreshold
self.highCannyThreshold = highCannyThreshold
self.rho = rho
self.theta = theta
self.houghThreshold = houghThreshold
self.minLineLen = minLineLen
self.maxLineGap = maxLineGap
self.minAbsSlope = minAbsSlope
self.maxAbsSlope = maxAbsSlope
self.debug = False
self.debugThickness = 4
self.debugColor = [0, 255, 0]
@property
def vertices(self):
if self.horizonRadius is None:
calibration = .04
# Calibrate for a horizonRadius of `calibration` at horizon=.6
x1 = (1 - self.horizon) * (.5 - calibration) / .4
self.horizonRadius = .5 - x1
x = self.cols
y = self.rows
return np.array([
(0, y * (1. - self.hoodClearance)),
(x * (.5 - self.horizonRadius), y * self.horizon),
(x * (.5 + self.horizonRadius), y * self.horizon),
(x, y * (1. - self.hoodClearance))
], dtype=np.int32)
def prepare(self, image):
self.rows, self.cols = image.shape[:2]
rgb = []
for k in range(3):
out = image[:, :, k]
out = gaussian_blur(out, self.gaussianRadius)
out = canny(out, self.lowCannyThreshold, self.highCannyThreshold)
rgb.append(out)
out = pixelwise_max(*rgb)
out = region_of_interest(out, self.vertices)
return out
def find_lines(self, preparedImage):
return [
LineSegment(l)
for l in hough_lines(
preparedImage, self.rho, self.theta, self.houghThreshold,
self.minLineLen, self.maxLineGap
)
]
def check_line(self, l):
return (
np.abs(l.m) >= self.minAbsSlope
and np.abs(l.m) <= self.maxAbsSlope
and np.abs(l.m) != np.inf
)
def deduplicate_lines(self, lines):
'''Remove near-duplicates from a collection of lines.'''
if len(lines) > 1:
points = [
(l.m, l.b)
for l in lines
if self.check_line(l)
]
if len(points) <= 1:
return self.deduplicate_lines([LineSegment(mb=pt) for pt in points])
clusterFinder = ClusterFinder(n_clusters=2, n_jobs=1)
clusterFinder.fit(np.vstack(points))
deduplicatedLines = [
LineSegment(xyxy=None, mb=mb)
for mb in clusterFinder.cluster_centers_
]
else:
deduplicatedLines = [l.copy() for l in lines]
# Ensure that the up-sloping line is always second.
if len(deduplicatedLines) == 2:
if deduplicatedLines[0].m > deduplicatedLines[1].m:
deduplicatedLines = deduplicatedLines[::-1]
return deduplicatedLines
def smooth_history(self, lines, nsmooth):
if not hasattr(self, 'lineHistory'):
self.lineHistory = deque(maxlen=nsmooth)
if len(lines) == 2:
self.lineHistory.append(lines)
mbs = [
[
(linePair[k].m, linePair[k].b)
for linePair in self.lineHistory
]
for k in range(2)
]
ms = [np.mean([mb[0] for mb in mbsk]) for mbsk in mbs]
bs = [np.mean([mb[1] for mb in mbsk]) for mbsk in mbs]
lines = [
LineSegment(mb=(m, b))
for (m, b) in zip(ms, bs)
]
return lines
def bake_lines(self, originalImage, lines, thickness=12, color=(232, 119, 34), alpha=1):
return weighted_img(
draw_hough_lines([l.xyxy for l in lines], self.rows, self.cols, thickness=thickness, color=color),
originalImage,
alpha=alpha
)
def __call__(self, image, smoothing=False, nsmooth=12):
# Preprocess the image.
preparedImage = self.prepare(np.copy(image))
# Find the lines.
allLines = self.find_lines(preparedImage)
lines = self.deduplicate_lines(allLines)
if smoothing:
lines = self.smooth_history(lines, nsmooth)
self.recut_lines(lines)
# Draw the lines.
if self.debug:
baked = np.copy(preparedImage)
filteredLines = [
l for l in allLines
if self.check_line(l)
]
baked = self.bake_lines(baked, filteredLines, color=self.debugColor, thickness=self.debugThickness)
# Put some info in the top left corner.
infoImg = pad_image(self.make_mb_distribution_plot_image(filteredLines, lines)[:, :, :3], baked.shape)
baked = weighted_img(infoImg, baked, alpha=1)
else:
baked = np.copy(image)
baked = self.bake_lines(baked, lines, alpha=.5)
return baked
def recut_lines(self, lines):
vy = self.vertices[:, 1]
for l in lines:
l.extend_to_horizontal_borders(top=max(vy), bottom=min(vy))
return lines
def pretty_show(self, image):
'''Make a nicer annotated figure.'''
fig, ax = plt.subplots()
preparedImage = self.prepare(image)
allLines = self.find_lines(preparedImage)
lines = self.recut_lines(self.deduplicate_lines(allLines))
ax.imshow(image)
for l in lines:
l.plot_line(ax, lw=8, linestyle='-', color='orange', alpha=.9)
for l in allLines:
l.plot_line(ax, lw=1, linestyle='-', color='magenta', alpha=.5)
v = self.vertices.squeeze()
draw_polygon(v, ax, alpha=.1, edgecolor='black', facecolor='orange')
ax.grid(True)
ax.set_ylim((image.shape[0], 0))
ax.set_xlim((0, image.shape[1]));
ax.set_xticks([])
ax.set_yticks([])
return fig, ax
def process_video(self, inpath, outpath, audio=False, subsection=None, show=True, smoothing=True, nsmooth=12):
from moviepy.editor import VideoFileClip
inclip = VideoFileClip(inpath)
if subsection is not None:
inclip = inclip.subclip(*subsection)
outclip = inclip.fl_image(lambda img: self(img, smoothing=smoothing, nsmooth=nsmooth))
outclip.write_videofile(outpath, audio=audio)
if show:
from IPython.display import HTML
return HTML("""
<video width="960" height="540" controls loop autoplay>
<source src="{0}">
</video>
""".format(outpath))
def make_mb_distribution_plot_image(self, lines, linesdedup):
'''Make a plot with some extra information to put in the corner of the video.'''
import matplotlib as mpl
for k in 'xtick', 'ytick', 'axes':
mpl.rc(k, labelsize=10)
l2p = lambda ls: np.stack([
(l.m, l.b)
for l in ls
if self.check_line(l)
])
fig, ax = plt.subplots(figsize=(4, 3))
ax.scatter(l2p(lines)[:, 0], l2p(lines)[:, 1], color='yellow', s=24)
ax.scatter(l2p(linesdedup)[:, 0], l2p(linesdedup)[:, 1], color='red', s=128, marker='*')
ax.set_xlabel('slope $m$', color='white'); ax.set_ylabel('intercept $b$', color='white')
ax.set_xlim(-self.maxAbsSlope, self.maxAbsSlope)
ax.set_ylim(-2000, 2000)
ax.patch.set_facecolor('black')
fig.patch.set_facecolor('black')
fig.tight_layout()
out = fig2data(fig)
plt.close(fig)
return out
def pad_image(small, targetShape, padValue=0):
if len(targetShape) == 2:
targetShape = list(targetShape)
targetShape.append(3)
out = np.ones(targetShape, dtype=small.dtype) * padValue
out[:small.shape[0], :small.shape[1], :] = to_color_if_bw(small)
return out
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ()
# Get the RGBA buffer from the figure
h, w = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
def save_image(data, path):
from PIL import Image
Image.fromarray(
(255.0 / data.max() * (data - data.min())).astype(np.uint8)
).save(path)
class LineSegment(object):
'''Math and plotting methods for lines.'''
def __init__(self, xyxy=None, mb=None):
if xyxy is not None:
self.xyxy = np.asarray(xyxy).ravel()
x1, y1, x2, y2 = self.xyxy
self.m = (y2 - y1) / (x2 - x1)
self.b = y1 - self.m * x1
elif mb is not None:
m, b = mb
assert m != np.inf, 'TODO: Need to do an r-theta parameterization.'
self.m = m
self.b = b
x1 = 1
x2 = 2
y1 = self.y(x1)
y2 = self.y(x2)
self.xyxy = np.array([x1, y1, x2, y2])
else:
bk()
raise ValueError('Need to pass xyxy or mb')
self._lineup()
def copy(self):
return LineSegment(self.xyxy)
def _lineup(self):
'''Ensure line segment starts with its lower point.'''
xyxy = np.asarray(self.xyxy).ravel()
x1, y1, x2, y2 = xyxy
if y1 > y2:
xyxy = np.array([x2, y2, x1, y1]).reshape(np.asarray(xyxy).shape)
self.xyxy = xyxy
def y(self, x):
return self.m * x + self.b
def x(self, y):
return (y - self.b) / self.m
def extend_to_horizontal_borders(self, top, bottom):
if self.m == 0:
return # Can't extend up or down!
self.xyxy[0] = self.x(bottom)
self.xyxy[1] = bottom
self.xyxy[2] = self.x(top)
self.xyxy[3] = top
def plot_line(self, ax, extraxy=None, **kwargs):
'''Pretty-plot a line on an axis.'''
l = self.xyxy
x = [l[0], l[2]]
y = [l[1], l[3]]
if extraxy is not None:
x.append(extraxy[0])
y.append(extraxy[1])
return ax.plot(x, y, **kwargs)
def bk():
from IPython.core.debugger import set_trace
set_trace()
|
{"hexsha": "a69fe4fa0b1f8bd0d0c577eebbb2844bf6887405", "size": 17506, "ext": "py", "lang": "Python", "max_stars_repo_path": "laneLines.py", "max_stars_repo_name": "tsbertalan/CarND-LaneLines-P1", "max_stars_repo_head_hexsha": "ba7bb0e1f60bb5c84425021b6bdb0d7162171fa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "laneLines.py", "max_issues_repo_name": "tsbertalan/CarND-LaneLines-P1", "max_issues_repo_head_hexsha": "ba7bb0e1f60bb5c84425021b6bdb0d7162171fa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "laneLines.py", "max_forks_repo_name": "tsbertalan/CarND-LaneLines-P1", "max_forks_repo_head_hexsha": "ba7bb0e1f60bb5c84425021b6bdb0d7162171fa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8031809145, "max_line_length": 114, "alphanum_fraction": 0.5963669599, "include": true, "reason": "import numpy", "num_tokens": 4443}
|
#!/usr/bin/env julia
push!(LOAD_PATH, ".")
using HelloWorld
using Test
@test HelloWorld.greet("John") == "Hello, John"
|
{"hexsha": "c01fac05c52b2f113d92cc358d3234ac43c75d37", "size": 120, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/hello_world/test.jl", "max_stars_repo_name": "vtavernier/jomw", "max_stars_repo_head_hexsha": "3de1c99579381be4485825f2942fb555da45be4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/hello_world/test.jl", "max_issues_repo_name": "vtavernier/jomw", "max_issues_repo_head_hexsha": "3de1c99579381be4485825f2942fb555da45be4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/hello_world/test.jl", "max_forks_repo_name": "vtavernier/jomw", "max_forks_repo_head_hexsha": "3de1c99579381be4485825f2942fb555da45be4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.1428571429, "max_line_length": 47, "alphanum_fraction": 0.6916666667, "num_tokens": 33}
|
import logging
import numpy as np
import xobjects as xo
import xtrack.linear_normal_form as lnf
import xpart as xp # To get the right Particles class depending on pyheatail interface state
logger = logging.getLogger(__name__)
def _check_lengths(**kwargs):
length = None
for nn, xx in kwargs.items():
if hasattr(xx, "__iter__"):
if hasattr(xx, 'shape') and len(xx.shape) == 0:
continue
if length is None:
length = len(xx)
else:
if length != len(xx):
raise ValueError(f"invalid length len({nn})={len(xx)}")
if 'num_particles' in kwargs.keys():
num_particles = kwargs['num_particles']
if num_particles is not None and length is None:
length = num_particles
if num_particles is not None and length != num_particles:
raise ValueError(
f"num_particles={num_particles} is inconsistent with array length")
if length is None:
length = 1
return length
def build_particles(_context=None, _buffer=None, _offset=None, _capacity=None,
mode=None,
particle_ref=None,
num_particles=None,
x=None, px=None, y=None, py=None, zeta=None, delta=None,
x_norm=None, px_norm=None, y_norm=None, py_norm=None,
tracker=None,
at_element=None,
match_at_s=None,
particle_on_co=None,
R_matrix=None,
scale_with_transverse_norm_emitt=None,
weight=None,
particles_class=None,
co_search_settings=None,
steps_r_matrix=None,
matrix_responsiveness_tol=None,
matrix_stability_tol=None,
symplectify=False,
):
"""
Function to create particle objects from arrays containing physical or
normalized coordinates.
Arguments:
- mode: choose between:
- `set`: reference quantities including mass0, q0, p0c, gamma0,
etc. are taken from the provided reference particle. Particles
coordinates are set according to the provided input x, px, y, py,
zeta, delta (zero is assumed as default for these variables).
- `shift`: reference quantities including mass0, q0, p0c, gamma0,
etc. are taken from the provided reference particle. Particles
coordinates are set from the reference particles and shifted
according to the provided input x, px, y, py, zeta, delta (zero
is assumed as default for these variables).
- `normalized_transverse`: reference quantities including mass0,
q0, p0c, gamma0, etc. are taken from the provided reference
particle. The longitudinal coordinates are set according to the
provided input `zeta`, `delta` (zero is assumed as default value
for these variable`. The transverse coordinates are computed from
normalized values `x_norm`, `px_norm`, `y_norm`, `py_norm` using
the closed-orbit information and the linear transfer map obtained
from the `tracker` or provided by the user.
The default mode is `set`. `normalized_transverse` is used if any
of x_norm, px_norm, y_norm, pynorm is provided.
- particle_ref: particle object defining the reference quantities
(mass0, 0, p0c, gamma0, etc.). Its coordinates (x, py, y, py, zeta,
delta) are ignored unless `mode`='shift' is selected.
- num_particles: Number of particles to be generated (used if provided
coordinates are all scalar)
- x: x coordinate of the particles (default is 0).
- px: px coordinate of the particles (default is 0).
- y: y coordinate of the particles (default is 0).
- py: py coordinate of the particles (default is 0).
- zeta: zeta coordinate of the particles (default is 0).
- delta: delta coordinate of the particles (default is 0).
- x_norm: transverse normalized coordinate x (in sigmas) used in
combination with the one turn matrix R_matrix and with the
transverse emittances provided in the argument
`scale_with_transverse_norm_emitt` to generate x, px, y, py (x, px,
y, py cannot be provided if x_norm, px_norm, y_norm, py_norm are
provided).
- x_norm: transverse normalized coordinate x (in sigmas).
- px_norm: transverse normalized coordinate px (in sigmas).
- y_norm: transverse normalized coordinate y (in sigmas).
- py_norm: transverse normalized coordinate py (in sigmas).
- tracker: tracker object used to find the closed orbit and the
one-turn matrix.
- particle_on_co: Particle on closed orbit
- R_matrix: 6x6 matrix defining the linearized one-turn map to be used
for the transformation of the normalized coordinates into physical
space.
- scale_with_transverse_norm_emitt: Tuple of two elements defining the
transverse normalized emittances used to rescale the provided
transverse normalized coordinates (x, px, y, py).
- weight: weights to be assigned to the particles.
- at_element: location within the line at which particles are generated.
It can be an index or an element name. It can be given only if
`at_tracker` is provided and `transverse_mode` is "normalized".
- match_at_s: s coordinate of a location in the drifts downstream the
specified `at_element` at which the particles are generated before
being backdrifted to the location specified by `at_element`.
No active element can be present in between.
- _context: xobjects context in which the particle object is allocated.
"""
assert mode in [None, 'set', 'shift', 'normalized_transverse']
Particles = xp.Particles # To get the right Particles class depending on pyheatail interface state
if particles_class is not None:
raise NotImplementedError
if (particle_ref is not None and particle_on_co is not None):
raise ValueError("`particle_ref` and `particle_on_co`"
" cannot be provided at the same time")
if particle_on_co is None and particle_ref is None:
if tracker is not None:
particle_ref = tracker.particle_ref
if particle_ref is None:
assert particle_on_co is not None, (
"`particle_ref` or `particle_on_co` must be provided!")
particle_ref = particle_on_co
if not isinstance(particle_ref._buffer.context, xo.ContextCpu):
particle_ref = particle_ref.copy(_context=xo.ContextCpu())
# Move other input parameters to cpu if needed
# Generated by:
# for nn in 'x px y py zeta delta x_norm px_norm y_norm py_norm'.split():
# print(f'{nn} = ({nn}.get() if hasattr({nn}, "get") else {nn})')
x = (x.get() if hasattr(x, "get") else x)
px = (px.get() if hasattr(px, "get") else px)
y = (y.get() if hasattr(y, "get") else y)
py = (py.get() if hasattr(py, "get") else py)
zeta = (zeta.get() if hasattr(zeta, "get") else zeta)
delta = (delta.get() if hasattr(delta, "get") else delta)
x_norm = (x_norm.get() if hasattr(x_norm, "get") else x_norm)
px_norm = (px_norm.get() if hasattr(px_norm, "get") else px_norm)
y_norm = (y_norm.get() if hasattr(y_norm, "get") else y_norm)
py_norm = (py_norm.get() if hasattr(py_norm, "get") else py_norm)
if tracker is not None and tracker.iscollective:
logger.warning('Ignoring collective elements in particles generation.')
tracker = tracker._supertracker
if tracker is not None:
if matrix_responsiveness_tol is None:
matrix_responsiveness_tol = tracker.matrix_responsiveness_tol
if matrix_stability_tol is None:
matrix_stability_tol = tracker.matrix_stability_tol
if matrix_responsiveness_tol is None:
matrix_responsiveness_tol=lnf.DEFAULT_MATRIX_RESPONSIVENESS_TOL
if matrix_stability_tol is None:
matrix_stability_tol=lnf.DEFAULT_MATRIX_STABILITY_TOL
if zeta is None:
zeta = 0
if delta is None:
delta = 0
if (x_norm is not None or px_norm is not None
or y_norm is not None or py_norm is not None):
assert (x is None and px is None
and y is None and py is None)
if mode is None:
mode = 'normalized_transverse'
else:
assert mode == 'normalized_transverse'
if mode is None:
mode = 'set'
if mode == 'normalized_transverse':
if x_norm is None: x_norm = 0
if px_norm is None: px_norm = 0
if y_norm is None: y_norm = 0
if py_norm is None: py_norm = 0
else:
if x is None: x = 0
if px is None: px = 0
if y is None: y = 0
if py is None: py = 0
assert particle_ref._capacity == 1
ref_dict = {
'q0': particle_ref.q0,
'mass0': particle_ref.mass0,
'p0c': particle_ref.p0c[0],
'gamma0': particle_ref.gamma0[0],
'beta0': particle_ref.beta0[0],
}
part_dict = ref_dict.copy()
if at_element is not None or match_at_s is not None:
# Only this case is covered if not starting at element 0
assert tracker is not None
assert mode == 'normalized_transverse'
if isinstance(at_element, str):
at_element = tracker.line.element_names.index(at_element)
if match_at_s is not None:
import xtrack as xt
assert at_element is not None, (
'If `match_at_s` is provided, `at_element` needs to be provided and'
'needs to correspond to the corresponding element in the sequence'
)
# Match at a position where there is no marker and backtrack to the previous marker
expected_at_element = np.where(np.array(
tracker.line.get_s_elements())<=match_at_s)[0][-1]
assert at_element == expected_at_element or (
at_element < expected_at_element and
all([isinstance(tracker.line.element_dict[nn], xt.Drift)
for nn in tracker.line.element_names[at_element:expected_at_element]])), (
"`match_at_s` can only be placed in the drifts upstream of the "
"specified `at_element`. No active element can be present in between."
)
(tracker_rmat, _
) = xt.twiss_from_tracker._build_auxiliary_tracker_with_extra_markers(
tracker=tracker, at_s=[match_at_s], marker_prefix='xpart_rmat_')
at_element_tracker_rmat = tracker_rmat.line.element_names.index(
'xpart_rmat_0')
else:
tracker_rmat = tracker
at_element_tracker_rmat = at_element
if mode == 'normalized_transverse':
if particle_on_co is None:
assert tracker is not None
particle_on_co = tracker.find_closed_orbit(
particle_co_guess=Particles(
x=0, px=0, y=0, py=0, zeta=0, delta=0.,
**ref_dict),
co_search_settings=co_search_settings)
else:
assert particle_on_co._capacity == 1
if not isinstance(particle_on_co._buffer.context, xo.ContextCpu):
particle_on_co = particle_on_co.copy(_context=xo.ContextCpu())
assert particle_on_co.at_element[0] == 0
assert particle_on_co.s[0] == 0
assert particle_on_co.state[0] == 1
if at_element_tracker_rmat is not None:
# Match in a different position of the line
assert at_element_tracker_rmat > 0
part_co_ctx = particle_on_co.copy(_context=tracker_rmat._buffer.context)
tracker_rmat.track(part_co_ctx, num_elements=at_element_tracker_rmat)
particle_on_co = part_co_ctx.copy(_context=xo.ContextCpu())
if R_matrix is None:
# R matrix at location defined by particle_on_co.at_element
R_matrix = tracker_rmat.compute_one_turn_matrix_finite_differences(
particle_on_co=particle_on_co, steps_r_matrix=steps_r_matrix)
num_particles = _check_lengths(num_particles=num_particles,
zeta=zeta, delta=delta, x_norm=x_norm, px_norm=px_norm,
y_norm=y_norm, py_norm=py_norm)
if scale_with_transverse_norm_emitt is not None:
assert len(scale_with_transverse_norm_emitt) == 2
nemitt_x = scale_with_transverse_norm_emitt[0]
nemitt_y = scale_with_transverse_norm_emitt[1]
gemitt_x = nemitt_x/particle_ref.beta0/particle_ref.gamma0
gemitt_y = nemitt_y/particle_ref.beta0/particle_ref.gamma0
x_norm_scaled = np.sqrt(gemitt_x) * x_norm
px_norm_scaled = np.sqrt(gemitt_x) * px_norm
y_norm_scaled = np.sqrt(gemitt_y) * y_norm
py_norm_scaled = np.sqrt(gemitt_y) * py_norm
else:
x_norm_scaled = x_norm
px_norm_scaled = px_norm
y_norm_scaled = y_norm
py_norm_scaled = py_norm
WW, WWinv, Rot = lnf.compute_linear_normal_form(R_matrix,
symplectify=symplectify,
responsiveness_tol=matrix_responsiveness_tol,
stability_tol=matrix_stability_tol)
# Transform long. coordinates to normalized space
XX_long = np.zeros(shape=(6, num_particles), dtype=np.float64)
XX_long[4, :] = zeta - particle_on_co.zeta
XX_long[5, :] = delta - particle_on_co.delta
XX_norm_scaled = np.dot(WWinv, XX_long)
XX_norm_scaled[0, :] = x_norm_scaled
XX_norm_scaled[1, :] = px_norm_scaled
XX_norm_scaled[2, :] = y_norm_scaled
XX_norm_scaled[3, :] = py_norm_scaled
# Transform to physical coordinates
XX = np.dot(WW, XX_norm_scaled)
XX[0, :] += particle_on_co.x
XX[1, :] += particle_on_co.px
XX[2, :] += particle_on_co.y
XX[3, :] += particle_on_co.py
XX[4, :] += particle_on_co.zeta
XX[5, :] += particle_on_co.delta
elif mode == 'set':
if R_matrix is not None:
logger.warning('R_matrix provided but not used in this mode!')
num_particles = _check_lengths(num_particles=num_particles,
zeta=zeta, delta=delta, x=x, px=px,
y=y, py=py)
XX = np.zeros(shape=(6, num_particles), dtype=np.float64)
XX[0, :] = x
XX[1, :] = px
XX[2, :] = y
XX[3, :] = py
XX[4, :] = zeta
XX[5, :] = delta
elif mode == "shift":
if R_matrix is not None:
logger.warning('R_matrix provided but not used in this mode!')
num_particles = _check_lengths(num_particles=num_particles,
zeta=zeta, delta=delta, x=x, px=px,
y=y, py=py)
XX = np.zeros(shape=(6, num_particles), dtype=np.float64)
XX[0, :] = x + particle_ref.x
XX[1, :] = px + particle_ref.px
XX[2, :] = y + particle_ref.y
XX[3, :] = py + particle_ref.py
XX[4, :] = zeta + particle_ref.zeta
XX[5, :] = delta + particle_ref.delta
else:
raise ValueError('What?!')
part_dict['x'] = XX[0, :]
part_dict['px'] = XX[1, :]
part_dict['y'] = XX[2, :]
part_dict['py'] = XX[3, :]
part_dict['zeta'] = XX[4, :]
part_dict['delta'] = XX[5, :]
part_dict['weight'] = np.zeros(num_particles, dtype=np.int64)
if _context is None and _buffer is None and tracker is not None:
_context = tracker._buffer.context
particles = Particles(_context=_context, _buffer=_buffer, _offset=_offset,
_capacity=_capacity,**part_dict)
particles.particle_id = particles._buffer.context.nparray_to_context_array(
np.arange(0, num_particles, dtype=np.int64))
if weight is not None:
particles.weight[:num_particles] = weight
if match_at_s is not None:
# Backtrack to at_element
length_aux_drift = -match_at_s + tracker.line.get_s_position(at_element)
assert length_aux_drift <= 0
auxdrift = xt.Drift(length=length_aux_drift,
_context=tracker._buffer.context)
auxdrift.track(particles)
if at_element is not None:
if match_at_s is not None:
particles.s[:num_particles] = particle_on_co.s[0] + length_aux_drift
else:
assert particle_on_co.at_element[0] == at_element
particles.s[:num_particles] = particle_on_co.s[0]
particles.at_element[:num_particles] = at_element
particles.start_tracking_at_element = at_element
return particles
|
{"hexsha": "61205360fe00af0d87992b8aa534023f062d2ee3", "size": 17110, "ext": "py", "lang": "Python", "max_stars_repo_path": "xpart/build_particles.py", "max_stars_repo_name": "pkicsiny/xpart", "max_stars_repo_head_hexsha": "cddf3eb65ffc198c22dd37204139ce3177a9bd96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xpart/build_particles.py", "max_issues_repo_name": "pkicsiny/xpart", "max_issues_repo_head_hexsha": "cddf3eb65ffc198c22dd37204139ce3177a9bd96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xpart/build_particles.py", "max_forks_repo_name": "pkicsiny/xpart", "max_forks_repo_head_hexsha": "cddf3eb65ffc198c22dd37204139ce3177a9bd96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2469135802, "max_line_length": 102, "alphanum_fraction": 0.6219170076, "include": true, "reason": "import numpy", "num_tokens": 4014}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.