text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
from forse.tools.nn_tools import *
from forse.tools.img_tools import *
from forse.tools.mix_tools import *
from keras.models import Sequential, Model, load_model
from keras.layers import UpSampling2D, Conv2D, Activation, BatchNormalization
from keras.layers import Reshape, Dense, Input
from keras.layers import LeakyReLU, Dropout, Flatten, ZeroPadding2D
from keras.optimizers import Adam
from keras import losses
import numpy as np
import os
from keras import backend as K
# Modified from:
# https://github.com/eriklindernoren/Keras-GAN.git
class DCGAN:
def __init__(self, output_directory, img_size):
self.img_size = img_size
self.channels = 1
self.kernel_size = 5
self.output_directory = output_directory
def smooth_accuracy(self, y_true, y_pred):
return K.mean(K.equal(K.round(y_true), K.round(y_pred)))
def build_generator(self):
img_shape = (self.img_size[0], self.img_size[1], self.channels)
model = Sequential()
model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same")) # 64x64x64
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same", strides=2)) #32x32x128
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(256, kernel_size=self.kernel_size, padding="same", strides=2)) #16x16x256
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding="same"))
model.add(Activation("tanh"))
img_in = Input(shape=img_shape)
img_out = model(img_in)
return Model(img_in, img_out)
def build_discriminator(self):
img_shape = (self.img_size[0], self.img_size[1], self.channels)
model = Sequential()
model.add(Conv2D(64, kernel_size=self.kernel_size, strides=1, input_shape=img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(128, kernel_size=self.kernel_size, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(256, kernel_size=self.kernel_size, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Flatten())
model.add(Dropout(0.25))
model.add(Dense(1, activation='sigmoid'))
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
def build_gan(self):
img_shape = (self.img_size[0], self.img_size[1], self.channels)
optimizer = Adam(0.0002, 0.5)
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.generator = self.build_generator()
self.generator.compile(loss='binary_crossentropy', optimizer=optimizer)
z = Input(shape=img_shape)
img = self.generator(z)
self.discriminator.trainable = False
valid = self.discriminator(img)
self.combined = Model(z, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def train(self, epochs, patches_file, batch_size=32, save_interval=100, seed=4324):
self.build_gan()
X_train, X_test, Y_train, Y_test = load_training_set(patches_file, seed=seed)
print("Training Data Shape: ", X_train.shape)
half_batch = batch_size // 2
accs = []
self.discriminator.summary()
for epoch in range(epochs):
ind_batch = np.random.randint(0, X_train.shape[0], batch_size)
g_loss = self.combined.train_on_batch(X_train[ind_batch], np.ones((batch_size, 1)))
target_real = np.ones((half_batch, 1))
target_fake = np.zeros((half_batch, 1))
idxX = np.random.randint(0, X_train.shape[0], half_batch)
idxY = np.random.randint(0, X_train.shape[0], half_batch)
imgs = Y_train[idxY]
gen_imgs = self.generator.predict(X_train[idxX])
d_loss_real = self.discriminator.train_on_batch(imgs, target_real)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, target_fake)
if epoch % (save_interval) == 0:
print(epoch)
gen_imgs_test = self.generator.predict(X_test)
save_path = self.output_directory + "/models"
if not os.path.exists(save_path):
os.makedirs(save_path)
self.discriminator.save(save_path + '/discrim_'+str(epoch)+'.h5')
self.generator.save(save_path + '/generat_'+str(epoch)+'.h5')
self.discriminator.save(save_path + '/discrim_'+str(epoch)+'.h5')
self.generator.save(save_path + '/generat_'+str(epoch)+'.h5')
|
{"hexsha": "fd07f0162854861fa2803bb8d173efcca8b7848a", "size": 5530, "ext": "py", "lang": "Python", "max_stars_repo_path": "forse/networks/dcgan.py", "max_stars_repo_name": "ai4cmb/ForSE", "max_stars_repo_head_hexsha": "8ceab3b2e47f077b9d5dbaee879a5385c3a76073", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "forse/networks/dcgan.py", "max_issues_repo_name": "ai4cmb/ForSE", "max_issues_repo_head_hexsha": "8ceab3b2e47f077b9d5dbaee879a5385c3a76073", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "forse/networks/dcgan.py", "max_forks_repo_name": "ai4cmb/ForSE", "max_forks_repo_head_hexsha": "8ceab3b2e47f077b9d5dbaee879a5385c3a76073", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.5087719298, "max_line_length": 110, "alphanum_fraction": 0.6430379747, "include": true, "reason": "import numpy", "num_tokens": 1318}
|
#!/bin/bash
"""
Create timeseries averages for the NOAA water vapour data.
"""
from datetime import datetime
from pathlib import Path
import numpy
import h5py
import pandas
from wagl.geobox import GriddedGeoBox
from wagl.hdf5.compression import H5CompressionFilter
from wagl.hdf5 import read_h5_table, write_h5_image
def build_index(indir):
"""
Read the INDEX table for each file and build a full history
index.
The records are sorted in ascending time (earliest to most recent)
"""
df = pandas.DataFrame(columns=['filename', 'band_name', 'timestamp'])
for fname in Path(indir).glob("pr_wtr.eatm.*.h5"):
with h5py.File(str(fname), 'r') as fid:
tmp_df = read_h5_table(fid, 'INDEX')
tmp_df['filename'] = fid.filename
df = df.append(tmp_df)
df.sort_values('timestamp', inplace=True)
df.set_index('timestamp', inplace=True)
return df
def calculate_average(dataframe):
"""
Given a dataframe with the columns:
* filename
* band_name
Calculate the 3D/timeseries average from all input records.
Each 2D dataset has dimensions (73y, 144x), and type float32.
"""
dims = (dataframe.shape[0], 73, 144)
data = numpy.zeros(dims, dtype="float32")
# load all data into 3D array (dims are small so just read all)
for i, rec in enumerate(dataframe.iterrows()):
row = rec[1]
with h5py.File(row.filename, "r") as fid:
ds = fid[row.band_name]
ds.read_direct(data[i])
no_data = float(ds.attrs['missing_value'])
# check for nodata and convert to nan
# do this for each dataset in case the nodata value changes
data[i][data[i] == no_data] = numpy.nan
# get the geobox, chunks
with h5py.File(row.filename, "r") as fid:
ds = fid[row.dataset_name]
geobox = GriddedGeoBox.from_dataset(ds)
chunks = ds.chunks
mean = numpy.nanmean(data, axis=0)
return mean, geobox, chunks
def prwtr_average(indir, outdir, compression=H5CompressionFilter.LZF,
filter_opts=None):
"""
Take the 4 hourly daily average from all files.
"""
df = build_index(indir)
# grouping
groups = df.groupby([df.index.month, df.index.day, df.index.hour])
# create directories as needed
out_fname = Path(outdir).joinpath("pr_wtr_average.h5")
if not out_fname.parent.exists():
out_fname.parent.mkdir(parents=True)
# create output file
with h5py.File(str(out_fname), 'w') as fid:
# the data is ordered so we can safely use BAND-1 = Jan-1
for band_index, item in enumerate(groups):
grp_name, grp_df = item
# synthesised leap year timestamp (use year 2000)
fmt = "2000 {:02d} {:02d} {:02d}"
dtime = datetime.strptime(fmt.format(*grp_name), "%Y %m %d %H")
# mean
mean, geobox, chunks = calculate_average(grp_df)
# dataset name format "%B-%d/%H%M" eg FEBRUARY-06/1800 for Feb 6th 1800 hrs
dname = "AVERAGE/{}".format(dtime.strftime("%B-%d/%H%M").upper())
# dataset description
description = ("Average data for {year_month} {hour}00 hours, "
"over the timeperiod {dt_min} to {dt_max}")
description = description.format(
year_month=dtime.strftime("%B-%d"),
hour=dtime.strftime("%H"),
dt_min=grp_df.index.min(),
dt_max=grp_df.index.max()
)
# dataset attributes
attrs = {
"description": description,
"timestamp": dtime,
"date_format": "2000 %B-%d/%H%M",
"band_name": "BAND-{}".format(band_index +1),
"geotransform": geobox.transform.to_gdal(),
"crs_wkt": geobox.crs.ExportToWkt()
}
# create empty or copy the user supplied filter options
if not filter_opts:
f_opts = dict()
else:
f_opts = filter_opts.copy()
# use original chunks if none are provided
if 'chunks' not in f_opts:
f_opts['chunks'] = chunks
# write
write_h5_image(mean, dname, fid, attrs=attrs,
compression=compression, filter_opts=f_opts)
|
{"hexsha": "141bd2c1ab3e5eae94262dfdddbbdf92d17d05e7", "size": 4420, "ext": "py", "lang": "Python", "max_stars_repo_path": "average_water_vapour.py", "max_stars_repo_name": "ASVincent/swfo", "max_stars_repo_head_hexsha": "17ef3c32047a5069c4db04fa04368a9268f19d93", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "average_water_vapour.py", "max_issues_repo_name": "ASVincent/swfo", "max_issues_repo_head_hexsha": "17ef3c32047a5069c4db04fa04368a9268f19d93", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "average_water_vapour.py", "max_forks_repo_name": "ASVincent/swfo", "max_forks_repo_head_hexsha": "17ef3c32047a5069c4db04fa04368a9268f19d93", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2627737226, "max_line_length": 87, "alphanum_fraction": 0.592081448, "include": true, "reason": "import numpy", "num_tokens": 1081}
|
from covariant_compositional_networks_tf2.CCN_Model import CCN_Model
import tensorflow as tf
from functools import reduce
from operator import mul
from ordered_set import OrderedSet
import numpy as np
from sklearn.metrics import accuracy_score
from graphColoring import randomNPGraph, checkIfGraphConnected
channels_in = 2
feature_vector_shape=[1]
k=2
model = CCN_Model(lr =2e-4, lr_decay_rate=0.95, lr_min=3e-6, loss = tf.losses.logcosh, nonlinearity=tf.nn.tanh,
feature_vector_shape=feature_vector_shape, num_layers=5, k=k, channels_in=[channels_in,4,5,4,3,1])
helperVar = reduce(mul, [channels_in] + feature_vector_shape)
inp = [[tf.Variable(tf.reshape(tf.range(helperVar, dtype=tf.float32) + 1, [channels_in] + [1] * k + feature_vector_shape )),
tf.Variable(tf.reshape(tf.range(helperVar, dtype=tf.float32) + helperVar + 1, [channels_in] + [1] * k + feature_vector_shape)),
tf.Variable(tf.reshape(tf.range(helperVar, dtype=tf.float32) + helperVar * 2 + 1, [channels_in]+ [1] * k + feature_vector_shape))],
# 2 feature vectors
# for k =2 its [ [[[1,2,3]]], [[[5,6,7]]] ]
np.array([[1, 1, 0],
[1, 1, 1],
[0, 1, 1]]), # adjacency matrix of DIRECTED graph - node[0] will gather inputs from [0] and [1]
# and node[1] only from [1]
[OrderedSet([0]), OrderedSet([1]), OrderedSet([2])]] # parts - P(0) = {0}, and P(1) = {1} - cummulative receptive field
def complete_graph(nodes):
node_features = [tf.Variable(tf.reshape(tf.ones(helperVar, dtype=tf.float32), [channels_in] + [1] * k + feature_vector_shape ))] * nodes
adjM = np.ones((nodes, nodes))
parts = [OrderedSet([i]) for i in range(nodes)]
return [node_features, adjM, parts]
def uncomplete_graph(nodes):
node_features = [tf.Variable(tf.reshape(tf.ones(helperVar, dtype=tf.float32), [channels_in] + [1] * k + feature_vector_shape ))] * nodes
while True:
adjM = randomNPGraph(nodes, 0.5, diagonal = True, undirected = True)
if checkIfGraphConnected(adjM) and not np.array_equal(adjM, np.ones(adjM.shape)):
break
parts = [OrderedSet([i]) for i in range(nodes)]
return [node_features, adjM, parts]
def gen_graphs(num, nodes, p=.5):
graphs = []
for _ in range(num):
if np.random.rand()>p:
G, y = complete_graph(nodes), tf.constant([1.])
graphs.append(G + [y])
else:
G, y = uncomplete_graph(nodes), tf.constant([0.])
graphs.append(G + [y])
return graphs
def train_test_split(graphs, train_fraction=0.8):
split_idx = int(np.round(train_fraction*len(graphs), decimals=0))
g_train, g_test = graphs[:split_idx], graphs[split_idx:]
return g_train, g_test
def data_preparation(n = 200, nodes = 4):
graphs = gen_graphs(n, nodes)
idxs = np.arange(len(graphs), dtype=np.int)
return train_test_split(graphs, train_fraction=0.8)
#result = model.predict(inp[0], inp[1], inp[2])
#resultSum = np.sum(result)
#y = tf.constant([2.0])
#print(y)
#print(result)
def train_and_test(model, data, epochs=100):
g_train, g_test = data
for epoch in range(epochs):
for i in range(g_train):
features, adjM, parts, y = g_train[i]
model.fit(features, y, adjM, parts)
predicted = []
target = []
for i in range(len(g_test)):
features, adjM, parts, y = g_train[i]
target.append(y)
predicted.append(model.predict(features, adjM, parts))
print("Accuracy of the model is {}".format(accuracy_score(y_test, predicted)))
if __name__=='__main__':
train_and_test(model, data_preparation())
# for i in range(100):
#
# #model.fit(inp[0], y, inp[1], inp[2])
# for i in range(len(y)):
#
# result = model.predict(inp[0], inp[1], inp[2])
# print(result)
#list(reversed(inp[0]))
# inpSwap = [inp[0][1], inp[0][2], inp[0][0]]
# adjMOther = np.array([[1, 1, 1], [1, 1, 1], [1,1,1]])
# result = model.predict(inp[0], adjMOther, inp[2])
# print(result)
|
{"hexsha": "587fce2a9fa2bd4c8aa54c3be218882bc8cf144c", "size": 4050, "ext": "py", "lang": "Python", "max_stars_repo_path": "covariant_compositional_networks_tf2/tests/testModel_2.py", "max_stars_repo_name": "PiotrKaszuba/Covariant_Compositional_Networks_Tf2", "max_stars_repo_head_hexsha": "dcd287a5f063bf2c3a37120e3247a5ea91ac5aea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "covariant_compositional_networks_tf2/tests/testModel_2.py", "max_issues_repo_name": "PiotrKaszuba/Covariant_Compositional_Networks_Tf2", "max_issues_repo_head_hexsha": "dcd287a5f063bf2c3a37120e3247a5ea91ac5aea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-09-25T20:57:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:38:52.000Z", "max_forks_repo_path": "covariant_compositional_networks_tf2/tests/testModel_2.py", "max_forks_repo_name": "PiotrKaszuba/Covariant_Compositional_Networks_Tf2", "max_forks_repo_head_hexsha": "dcd287a5f063bf2c3a37120e3247a5ea91ac5aea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5263157895, "max_line_length": 140, "alphanum_fraction": 0.6414814815, "include": true, "reason": "import numpy", "num_tokens": 1168}
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, WeightedRandomSampler
# set to True to enable training
# if set to False, training is skipped,
# and the weights are loaded from the session storage.
# this way, you can train the network once, and then
# set train = False, and experiment with it without
# training it again
train = True
# how many characters to predict at once when training
seq_length = 100
hidden_size = 250
num_layers = 1
# training 10 epochs took me about 15 minutes with GPU acceleration
n_epochs = 10
lr = 0.01
# path in session storage to save state to
PATH = './shakespeare_net_pth'
# use GPU if available, else use CPU
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
print(device)
class CustomDataset(Dataset):
def __init__(self, data_file: str):
self.data = open(data_file, 'r').read()
vocab = sorted(set(self.data))
self.vocab_size = len(vocab)
self.char2idx = {ch: idx for idx, ch in enumerate(vocab)}
self.idx2char = {idx: ch for idx, ch in enumerate(vocab)}
def __len__(self):
return len(self.data)
def __getitem__(self, i):
x = self.char2idx[self.data[i]]
x = torch.tensor([x])
x = F.one_hot(x, num_classes=self.vocab_size)
# FloatTensor because the input needs to be type Float
x = x.type(torch.FloatTensor)
# return next character, or current character if there is no
# next character
t = self.char2idx[self.data[i + (i < (self.__len__() - 1))]]
t = torch.tensor([t])
return (x.to(device), t.to(device))
class Model(nn.Module):
def __init__(self, vocab_size, hidden_size, num_layers = 1):
super(Model, self).__init__()
self.n_layers = num_layers
self.vocab_size = vocab_size
# input shape: (seq_length, 1, vocab_size)
# output shape: (seq_length, 1, hidden_size)
self.lstm = nn.LSTM(self.vocab_size,
hidden_size,
num_layers,
batch_first=False
)
# input shape: (N, *, hidden_size)
# output shape: (N, *, vocab_size)
self.linear = nn.Linear(hidden_size, vocab_size, bias=True)
def forward(self, input, states_0=None):
output, (hn, cn) = self.lstm(input, states_0)
scores = self.linear(output)
return scores, (hn, cn)
def generate_sample(self, x, length=500):
x = x.view(1, 1, self.vocab_size)
h = torch.zeros(self.n_layers, 1, hidden_size).to(device)
c = torch.zeros(self.n_layers, 1, hidden_size).to(device)
text = ''
for i in range(length):
scores, (h, c) = self.forward(x, (h, c))
probs = F.softmax(scores, dim=2).view(self.vocab_size)
pred = torch.tensor(list(WeightedRandomSampler(probs, 1, replacement=True)))
x = F.one_hot(pred, num_classes=self.vocab_size)
x = x.view(1, 1, self.vocab_size).type(torch.FloatTensor).to(device)
next_character = idx2char[pred.item()]
text += next_character
return text
def init_state(self):
return (
torch.zeros(num_layers, 1, hidden_size).to(device),
torch.zeros(num_layers, 1, hidden_size).to(device)
)
dataset = CustomDataset(data_file='drive/MyDrive/colab/shakespeare.txt')
char2idx = dataset.char2idx
idx2char = dataset.idx2char
vocab_size = dataset.vocab_size
loader = DataLoader(dataset=dataset,
# we are not actually batching the data,
# we are just doing one training example at a
# time. this is just a trick
# so we can use Datalaoder to cut up
# the data nicely for us so we don't have
# to do it ourselves
batch_size=seq_length,
# data shuffling can be useful in many instances,
# but in this case it would fuck up everything
# since each example is a single character (or 100)
# because of the 'batching', and we need to preserve
# context
shuffle=False
)
model = Model(vocab_size=vocab_size,
hidden_size=hidden_size,
num_layers=num_layers
).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)
# TRAINING
sample_input = None
n_batches = len(dataset) // seq_length
print(f'n_batches: {n_batches}')
if train:
for epoch in range(n_epochs):
state = model.init_state()
i = 0
for char, next_char in loader:
#x = x.view(seq_length, 1, vocab_size).to(device)
pred, (h, c) = model(char, state)
loss = criterion(pred.squeeze(dim=1), next_char.squeeze(dim=1))
loss.backward()
optimizer.step()
optimizer.zero_grad()
state = (h.detach(), c.detach())
i += 1
if i % 100 == 0:
print(f'Epoch: {epoch+1} / {n_epochs} batch {i+1} / {n_batches} Loss: {loss.item()}')
if i % 500 == 0:
sample_input = char[0]
sample = model.generate_sample(sample_input, length=500)
print(sample)
torch.save(model.state_dict(), PATH)
print(f'finished training and saved state to {PATH}')
# load state
model = Model(vocab_size=vocab_size,
hidden_size=hidden_size,
num_layers=num_layers
).to(device)
model.load_state_dict(torch.load(PATH))
print(sample)
# generate a bunch of text and save it to output.txt
with open('output.txt', 'w') as file:
for i in range(100):
sample = model.generate_sample(char[1], length=1000)
file.write(sample)
file.write('\n---------------------------------------\n')
|
{"hexsha": "1ca682958c7d5bca43cf5c045fdaaaa899a0ff90", "size": 6129, "ext": "py", "lang": "Python", "max_stars_repo_path": "shakespeare.py", "max_stars_repo_name": "christofferaakre/shakespeare", "max_stars_repo_head_hexsha": "c2563d19232465edbda2edaeebb7b93f491512d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-28T04:05:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-28T04:05:50.000Z", "max_issues_repo_path": "shakespeare.py", "max_issues_repo_name": "christofferaakre/shakespeare", "max_issues_repo_head_hexsha": "c2563d19232465edbda2edaeebb7b93f491512d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shakespeare.py", "max_forks_repo_name": "christofferaakre/shakespeare", "max_forks_repo_head_hexsha": "c2563d19232465edbda2edaeebb7b93f491512d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.05, "max_line_length": 101, "alphanum_fraction": 0.5916136401, "include": true, "reason": "import numpy", "num_tokens": 1435}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <boost/test/unit_test.hpp>
#include "ignite/common/utils.h"
#include "ignite/ignite.h"
#include "ignite/ignition.h"
#include "ignite/test_utils.h"
using namespace ignite;
using namespace boost::unit_test;
/*
* Test setup fixture.
*/
struct CacheStoreTestSuiteFixture
{
/* Nodes started during the test. */
Ignite node1;
/*
* Constructor.
*/
CacheStoreTestSuiteFixture() :
#ifdef IGNITE_TESTS_32
node1(ignite_test::StartNode("cache-store-32.xml", "node1"))
#else
node1(ignite_test::StartNode("cache-store.xml", "node1"))
#endif
{
// No-op.
}
/*
* Destructor.
*/
~CacheStoreTestSuiteFixture()
{
GetCache().RemoveAll();
Ignition::StopAll(true);
}
/**
* Cache accessor.
*/
cache::Cache<int64_t, std::string> GetCache()
{
return node1.GetOrCreateCache<int64_t, std::string>("cache1");
}
};
void FillStore(cache::Cache<int64_t, std::string>& cache, int64_t n)
{
for (int64_t i = 0; i < n; ++i)
cache.Put(i, common::LexicalCast<std::string>(i));
cache.Clear();
}
BOOST_FIXTURE_TEST_SUITE(CacheStoreTestSuite, CacheStoreTestSuiteFixture)
BOOST_AUTO_TEST_CASE(LoadCacheSingleNodeNoPredicate)
{
const int64_t entriesNum = 100;
cache::Cache<int64_t, std::string> cache = GetCache();
BOOST_CHECK(cache.IsEmpty());
FillStore(cache, entriesNum);
BOOST_CHECK(cache.IsEmpty());
cache.LoadCache();
BOOST_CHECK(!cache.IsEmpty());
BOOST_CHECK_EQUAL(cache.Size(cache::CachePeekMode::PRIMARY), entriesNum);
std::string val42 = cache.Get(42);
BOOST_CHECK_EQUAL(val42, "42");
}
BOOST_AUTO_TEST_CASE(LoadCacheSeveralNodesNoPredicate)
{
BOOST_TEST_CHECKPOINT("Starting additional node");
#ifdef IGNITE_TESTS_32
Ignite node2 = ignite_test::StartNode("cache-store-32.xml", "node2");
#else
Ignite node2 = ignite_test::StartNode("cache-store.xml", "node2");
#endif
const int64_t entriesNum = 100;
cache::Cache<int64_t, std::string> cache = GetCache();
BOOST_CHECK(cache.IsEmpty());
FillStore(cache, entriesNum);
BOOST_CHECK(cache.IsEmpty());
cache.LoadCache();
BOOST_CHECK(!cache.IsEmpty());
BOOST_CHECK_EQUAL(cache.Size(cache::CachePeekMode::PRIMARY), entriesNum);
std::string val42 = cache.Get(42);
BOOST_CHECK_EQUAL(val42, "42");
}
BOOST_AUTO_TEST_CASE(LocalLoadCacheSingleNodeNoPredicate)
{
const int64_t entriesNum = 100;
cache::Cache<int64_t, std::string> cache = GetCache();
BOOST_CHECK(cache.IsEmpty());
FillStore(cache, entriesNum);
BOOST_CHECK(cache.IsEmpty());
cache.LocalLoadCache();
BOOST_CHECK(!cache.IsEmpty());
BOOST_CHECK_EQUAL(cache.Size(cache::CachePeekMode::PRIMARY), entriesNum);
std::string val42 = cache.Get(42);
BOOST_CHECK_EQUAL(val42, "42");
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "bff260c5b8de395b17e6b3d2c4ba12a33d6b7877", "size": 3710, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "modules/platforms/cpp/core-test/src/cache_store_test.cpp", "max_stars_repo_name": "geertjanw/ignite", "max_stars_repo_head_hexsha": "521149998a76d78a72628cf49d1ffad162ed5a01", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4339.0, "max_stars_repo_stars_event_min_datetime": "2015-08-21T21:13:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:56:44.000Z", "max_issues_repo_path": "modules/platforms/cpp/core-test/src/cache_store_test.cpp", "max_issues_repo_name": "geertjanw/ignite", "max_issues_repo_head_hexsha": "521149998a76d78a72628cf49d1ffad162ed5a01", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1933.0, "max_issues_repo_issues_event_min_datetime": "2015-08-24T11:37:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T08:37:08.000Z", "max_forks_repo_path": "modules/platforms/cpp/core-test/src/cache_store_test.cpp", "max_forks_repo_name": "geertjanw/ignite", "max_forks_repo_head_hexsha": "521149998a76d78a72628cf49d1ffad162ed5a01", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2140.0, "max_forks_repo_forks_event_min_datetime": "2015-08-21T22:09:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:57:34.000Z", "avg_line_length": 23.6305732484, "max_line_length": 77, "alphanum_fraction": 0.6913746631, "num_tokens": 890}
|
[STATEMENT]
lemma split_list_first_unique:
assumes "u\<^sub>1 @ [a] @ u\<^sub>2 = v\<^sub>1 @ [a] @ v\<^sub>2" "a \<notin> set u\<^sub>1" "a \<notin> set v\<^sub>1"
shows "u\<^sub>1 = v\<^sub>1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. u\<^sub>1 = v\<^sub>1
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. u\<^sub>1 = v\<^sub>1
[PROOF STEP]
obtain w where "u\<^sub>1 = v\<^sub>1 @ w \<and> w @ [a] @ u\<^sub>2 = [a] @ v\<^sub>2 \<or>
u\<^sub>1 @ w = v\<^sub>1 \<and> [a] @ u\<^sub>2 = w @ [a] @ v\<^sub>2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>w. u\<^sub>1 = v\<^sub>1 @ w \<and> w @ [a] @ u\<^sub>2 = [a] @ v\<^sub>2 \<or> u\<^sub>1 @ w = v\<^sub>1 \<and> [a] @ u\<^sub>2 = w @ [a] @ v\<^sub>2 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms(1) append_eq_append_conv2
[PROOF STATE]
proof (prove)
using this:
u\<^sub>1 @ [a] @ u\<^sub>2 = v\<^sub>1 @ [a] @ v\<^sub>2
(?xs @ ?ys = ?zs @ ?ts) = (\<exists>us. ?xs = ?zs @ us \<and> us @ ?ys = ?ts \<or> ?xs @ us = ?zs \<and> ?ys = us @ ?ts)
goal (1 subgoal):
1. (\<And>w. u\<^sub>1 = v\<^sub>1 @ w \<and> w @ [a] @ u\<^sub>2 = [a] @ v\<^sub>2 \<or> u\<^sub>1 @ w = v\<^sub>1 \<and> [a] @ u\<^sub>2 = w @ [a] @ v\<^sub>2 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
u\<^sub>1 = v\<^sub>1 @ w \<and> w @ [a] @ u\<^sub>2 = [a] @ v\<^sub>2 \<or> u\<^sub>1 @ w = v\<^sub>1 \<and> [a] @ u\<^sub>2 = w @ [a] @ v\<^sub>2
goal (1 subgoal):
1. u\<^sub>1 = v\<^sub>1
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
u\<^sub>1 = v\<^sub>1 @ w \<and> w @ [a] @ u\<^sub>2 = [a] @ v\<^sub>2 \<or> u\<^sub>1 @ w = v\<^sub>1 \<and> [a] @ u\<^sub>2 = w @ [a] @ v\<^sub>2
goal (1 subgoal):
1. u\<^sub>1 = v\<^sub>1
[PROOF STEP]
using assms(2, 3)
[PROOF STATE]
proof (prove)
using this:
u\<^sub>1 = v\<^sub>1 @ w \<and> w @ [a] @ u\<^sub>2 = [a] @ v\<^sub>2 \<or> u\<^sub>1 @ w = v\<^sub>1 \<and> [a] @ u\<^sub>2 = w @ [a] @ v\<^sub>2
a \<notin> set u\<^sub>1
a \<notin> set v\<^sub>1
goal (1 subgoal):
1. u\<^sub>1 = v\<^sub>1
[PROOF STEP]
by (auto) (metis hd_append2 list.sel(1) list.set_sel(1))+
[PROOF STATE]
proof (state)
this:
u\<^sub>1 = v\<^sub>1
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1116, "file": "Partial_Order_Reduction_Extensions_List_Extensions", "length": 8}
|
/*
* Copyright (c) 2020-2022 The reone project contributors
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <boost/test/unit_test.hpp>
#include "../../src/game/script/routines.h"
#include "../../src/script/expressiontree.h"
using namespace std;
using namespace reone;
using namespace reone::game;
using namespace reone::script;
BOOST_AUTO_TEST_SUITE(expression_tree)
BOOST_AUTO_TEST_CASE(should_decompile_program__minimal) {
// given
auto program = ScriptProgram("");
program.add(Instruction(InstructionType::RETN));
auto routines = Routines();
routines.initForKotOR();
// when
auto tree = ExpressionTree::fromProgram(program, routines);
// then
auto &globals = tree.globals();
BOOST_CHECK_EQUAL(0ll, globals.size());
auto &functions = tree.functions();
BOOST_CHECK_EQUAL(1ll, functions.size());
auto &startFunc = functions[0];
BOOST_CHECK_EQUAL("_start", startFunc->name);
BOOST_CHECK_EQUAL(0ll, startFunc->inputs.size());
BOOST_CHECK_EQUAL(0ll, startFunc->outputs.size());
BOOST_CHECK_EQUAL(static_cast<int>(VariableType::Void), static_cast<int>(startFunc->returnType));
BOOST_CHECK_EQUAL(1ll, startFunc->block->expressions.size());
}
BOOST_AUTO_TEST_CASE(should_decompile_program__starting_conditional_without_globals) {
// given
auto routines = Routines();
routines.initForKotOR();
auto program = ScriptProgram("");
program.add(Instruction(InstructionType::RSADDI));
program.add(Instruction::newJSR(8));
program.add(Instruction(InstructionType::RETN));
program.add(Instruction::newCONSTI(1));
program.add(Instruction::newCPDOWNSP(-8, 4));
program.add(Instruction::newMOVSP(-4));
program.add(Instruction(InstructionType::RETN));
// when
auto tree = ExpressionTree::fromProgram(program, routines);
// then
auto &globals = tree.globals();
BOOST_CHECK_EQUAL(0ll, globals.size());
auto &functions = tree.functions();
BOOST_CHECK_EQUAL(2ll, functions.size());
auto startingConditionalFunc = functions[0];
BOOST_CHECK_EQUAL("StartingConditional", startingConditionalFunc->name);
BOOST_CHECK_EQUAL(0ll, startingConditionalFunc->inputs.size());
BOOST_CHECK_EQUAL(1ll, startingConditionalFunc->outputs.size());
BOOST_CHECK_EQUAL(static_cast<int>(VariableType::Int), static_cast<int>(startingConditionalFunc->outputs[0].type));
BOOST_CHECK_EQUAL(static_cast<int>(VariableType::Void), static_cast<int>(startingConditionalFunc->returnType));
auto startFunc = functions[1];
BOOST_CHECK_EQUAL("_start", startFunc->name);
}
BOOST_AUTO_TEST_CASE(should_decompile_program__main_with_globals) {
// given
auto routines = Routines();
routines.initForKotOR();
auto program = ScriptProgram("");
program.add(Instruction::newJSR(8));
program.add(Instruction(InstructionType::RETN));
program.add(Instruction(InstructionType::RSADDI));
program.add(Instruction::newCONSTI(1));
program.add(Instruction::newCPDOWNSP(-8, 4));
program.add(Instruction::newMOVSP(-4));
program.add(Instruction(InstructionType::SAVEBP));
program.add(Instruction::newJSR(8));
program.add(Instruction(InstructionType::RESTOREBP));
program.add(Instruction(InstructionType::RETN));
program.add(Instruction(InstructionType::RETN));
// when
auto tree = ExpressionTree::fromProgram(program, routines);
// then
auto &globals = tree.globals();
auto globalsVec = vector<const ExpressionTree::ParameterExpression *>(globals.begin(), globals.end());
BOOST_CHECK_EQUAL(1ll, globalsVec.size());
BOOST_CHECK_EQUAL(static_cast<int>(VariableType::Int), static_cast<int>(globalsVec[0]->variableType));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionTree::ParameterLocality::Global), static_cast<int>(globalsVec[0]->locality));
auto &functions = tree.functions();
BOOST_CHECK_EQUAL(3ll, functions.size());
auto mainFunc = functions[0];
BOOST_CHECK_EQUAL("main", mainFunc->name);
BOOST_CHECK_EQUAL(0ll, mainFunc->inputs.size());
BOOST_CHECK_EQUAL(0ll, mainFunc->outputs.size());
BOOST_CHECK_EQUAL(static_cast<int>(VariableType::Void), static_cast<int>(mainFunc->returnType));
auto globalsFunc = functions[1];
BOOST_CHECK_EQUAL("_globals", globalsFunc->name);
auto startFunc = functions[2];
BOOST_CHECK_EQUAL("_start", startFunc->name);
}
BOOST_AUTO_TEST_CASE(should_decompile_program__conditionals) {
// given
auto routines = Routines();
routines.initForKotOR();
auto program = ScriptProgram("");
program.add(Instruction::newJSR(8));
program.add(Instruction(InstructionType::RETN));
program.add(Instruction::newCONSTI(2));
program.add(Instruction::newCONSTI(1));
program.add(Instruction::newCPTOPSP(-8, 4));
program.add(Instruction::newJZ(18));
program.add(Instruction(InstructionType::DIVII));
program.add(Instruction::newJNZ(8));
program.add(Instruction(InstructionType::RETN));
program.add(Instruction(InstructionType::RETN));
program.add(Instruction::newMOVSP(-8));
program.add(Instruction(InstructionType::RETN));
// when
auto tree = ExpressionTree::fromProgram(program, routines);
// then
auto &globals = tree.globals();
BOOST_CHECK_EQUAL(0ll, globals.size());
auto &functions = tree.functions();
BOOST_CHECK_EQUAL(2ll, functions.size());
auto mainFunc = functions[0];
BOOST_CHECK_EQUAL("main", mainFunc->name);
BOOST_CHECK_EQUAL(0ll, mainFunc->inputs.size());
BOOST_CHECK_EQUAL(0ll, mainFunc->outputs.size());
BOOST_CHECK_EQUAL(static_cast<int>(VariableType::Void), static_cast<int>(mainFunc->returnType));
BOOST_CHECK_EQUAL(11, mainFunc->block->expressions.size());
// loc_jnz:
// return;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Label), static_cast<int>(mainFunc->block->expressions[7]->type));
auto jnzLabelExpr = static_cast<ExpressionTree::LabelExpression *>(mainFunc->block->expressions[7]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Return), static_cast<int>(mainFunc->block->expressions[8]->type));
// loc_jz:
// return;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Label), static_cast<int>(mainFunc->block->expressions[9]->type));
auto jzLabelExpr = static_cast<ExpressionTree::LabelExpression *>(mainFunc->block->expressions[9]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Return), static_cast<int>(mainFunc->block->expressions[10]->type));
// int a = 2;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[0]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Parameter), static_cast<int>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[0])->left->type));
auto aExpr = static_cast<ExpressionTree::ParameterExpression *>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[0])->left);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Constant), static_cast<int>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[0])->right->type));
// int b = 1;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[1]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Parameter), static_cast<int>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[1])->left->type));
auto bExpr = static_cast<ExpressionTree::ParameterExpression *>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[1])->left);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Constant), static_cast<int>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[1])->right->type));
// int c = a;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[2]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Parameter), static_cast<int>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[2])->left->type));
auto cExpr = static_cast<ExpressionTree::ParameterExpression *>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[2])->left);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Parameter), static_cast<int>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[2])->right->type));
BOOST_CHECK_EQUAL(aExpr, static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[2])->right);
// if(c == 0) { goto loc_jz; }
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Conditional), static_cast<int>(mainFunc->block->expressions[3]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Equal), static_cast<int>(static_cast<ExpressionTree::ConditionalExpression *>(mainFunc->block->expressions[3])->test->type));
auto jzIfTrue = static_cast<ExpressionTree::ConditionalExpression *>(mainFunc->block->expressions[3])->ifTrue;
BOOST_CHECK(jzIfTrue);
BOOST_CHECK_EQUAL(1ll, jzIfTrue->expressions.size());
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Goto), static_cast<int>(jzIfTrue->expressions[0]->type));
BOOST_CHECK_EQUAL(jzLabelExpr, static_cast<ExpressionTree::GotoExpression *>(jzIfTrue->expressions[0])->label);
auto jzIfFalse = static_cast<ExpressionTree::ConditionalExpression *>(mainFunc->block->expressions[3])->ifFalse;
BOOST_CHECK(!jzIfFalse);
// int d = a / b;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[4]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Parameter), static_cast<int>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[4])->left->type));
auto dExpr = static_cast<ExpressionTree::ParameterExpression *>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[4])->left);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Divide), static_cast<int>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[4])->right->type));
auto aDivBExpr = static_cast<ExpressionTree::BinaryExpression *>(static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[4])->right);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Parameter), static_cast<int>(aDivBExpr->left->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Parameter), static_cast<int>(aDivBExpr->right->type));
BOOST_CHECK_EQUAL(aExpr, aDivBExpr->left);
BOOST_CHECK_EQUAL(bExpr, aDivBExpr->right);
// if(d != 0) { goto loc_jnz; }
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Conditional), static_cast<int>(mainFunc->block->expressions[5]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::NotEqual), static_cast<int>(static_cast<ExpressionTree::ConditionalExpression *>(mainFunc->block->expressions[5])->test->type));
auto jnzIfTrue = static_cast<ExpressionTree::ConditionalExpression *>(mainFunc->block->expressions[5])->ifTrue;
BOOST_CHECK(jnzIfTrue);
BOOST_CHECK_EQUAL(1ll, jnzIfTrue->expressions.size());
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Goto), static_cast<int>(jnzIfTrue->expressions[0]->type));
BOOST_CHECK_EQUAL(jnzLabelExpr, static_cast<ExpressionTree::GotoExpression *>(jnzIfTrue->expressions[0])->label);
auto jnzIfFalse = static_cast<ExpressionTree::ConditionalExpression *>(mainFunc->block->expressions[5])->ifFalse;
BOOST_CHECK(!jnzIfFalse);
// return;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Return), static_cast<int>(mainFunc->block->expressions[6]->type));
auto startFunc = functions[1];
BOOST_CHECK_EQUAL("_start", startFunc->name);
}
BOOST_AUTO_TEST_CASE(should_decompile_program__loop) {
// given
auto routines = Routines();
routines.initForKotOR();
auto program = ScriptProgram("");
program.add(Instruction::newJSR(8));
program.add(Instruction(InstructionType::RETN));
// int a = 0;
program.add(Instruction(InstructionType::RSADDI));
program.add(Instruction::newCONSTI(0));
program.add(Instruction::newCPDOWNSP(-8, 4));
program.add(Instruction::newMOVSP(-4));
// loc_loop:
// a++;
program.add(Instruction::newINCISP(-4));
// if(a < 10) { goto loc_loop; }
program.add(Instruction::newCONSTI(10));
program.add(Instruction::newCPTOPSP(-8, 4));
program.add(Instruction(InstructionType::LTII));
program.add(Instruction::newJNZ(-22));
// return;
program.add(Instruction::newMOVSP(-4));
program.add(Instruction(InstructionType::RETN));
// when
auto tree = ExpressionTree::fromProgram(program, routines);
// then
auto &globals = tree.globals();
BOOST_CHECK_EQUAL(0ll, globals.size());
auto &functions = tree.functions();
BOOST_CHECK_EQUAL(2ll, functions.size());
auto mainFunc = functions[0];
BOOST_CHECK_EQUAL("main", mainFunc->name);
BOOST_CHECK_EQUAL(0ll, mainFunc->inputs.size());
BOOST_CHECK_EQUAL(0ll, mainFunc->outputs.size());
BOOST_CHECK_EQUAL(static_cast<int>(VariableType::Void), static_cast<int>(mainFunc->returnType));
auto startFunc = functions[1];
BOOST_CHECK_EQUAL("_start", startFunc->name);
}
BOOST_AUTO_TEST_CASE(should_decompile_program__vectors) {
// given
auto routines = Routines();
routines.initForKotOR();
auto program = ScriptProgram("");
program.add(Instruction::newJSR(8));
program.add(Instruction(InstructionType::RETN));
// vector v1 = Vector(1.0f, 2.0f, 3.0f);
program.add(Instruction::newCONSTF(3.0f));
program.add(Instruction::newCONSTF(2.0f));
program.add(Instruction::newCONSTF(1.0f));
program.add(Instruction::newACTION(142, 3));
// vector v2 = Vector(-3.0f, -2.0f, -1.0f);
program.add(Instruction::newCONSTF(-1.0f));
program.add(Instruction::newCONSTF(-2.0f));
program.add(Instruction::newCONSTF(-3.0f));
program.add(Instruction::newACTION(142, 3));
// vector v3 = v1 + v2;
program.add(Instruction::newCPTOPSP(-24, 24));
program.add(Instruction(InstructionType::ADDVV));
program.add(Instruction::newMOVSP(-12));
// vector v4 = v1 - v2;
program.add(Instruction::newCPTOPSP(-24, 24));
program.add(Instruction(InstructionType::SUBVV));
program.add(Instruction::newMOVSP(-12));
// vector v5 = 2.0f * v1;
program.add(Instruction::newCONSTF(2.0f));
program.add(Instruction::newCPTOPSP(-24, 12));
program.add(Instruction(InstructionType::MULFV));
program.add(Instruction::newMOVSP(-12));
// vector v6 = 2.0f / v1;
program.add(Instruction::newCONSTF(2.0f));
program.add(Instruction::newCPTOPSP(-24, 12));
program.add(Instruction(InstructionType::DIVFV));
program.add(Instruction::newMOVSP(-12));
// vector v7 = v1 * 2.0f;
program.add(Instruction::newCPTOPSP(-24, 12));
program.add(Instruction::newCONSTF(2.0f));
program.add(Instruction(InstructionType::MULVF));
program.add(Instruction::newMOVSP(-12));
// vector v8 = v1 / 2.0f;
program.add(Instruction::newCPTOPSP(-24, 12));
program.add(Instruction::newCONSTF(2.0f));
program.add(Instruction(InstructionType::DIVVF));
program.add(Instruction::newMOVSP(-12));
// float f = VectorToAngle(v1);
program.add(Instruction::newCPTOPSP(-24, 12));
program.add(Instruction::newACTION(145, 1));
program.add(Instruction::newMOVSP(-4));
// return;
program.add(Instruction(InstructionType::RETN));
// when
auto tree = ExpressionTree::fromProgram(program, routines);
// then
auto &globals = tree.globals();
BOOST_CHECK_EQUAL(0ll, globals.size());
auto &functions = tree.functions();
BOOST_CHECK_EQUAL(2ll, functions.size());
auto mainFunc = functions[0];
BOOST_CHECK_EQUAL("main", mainFunc->name);
BOOST_CHECK_EQUAL(0ll, mainFunc->inputs.size());
BOOST_CHECK_EQUAL(0ll, mainFunc->outputs.size());
BOOST_CHECK_EQUAL(static_cast<int>(VariableType::Void), static_cast<int>(mainFunc->returnType));
BOOST_CHECK_EQUAL(71ll, mainFunc->block->expressions.size());
// vector v1 = Vector(1.0f, 2.0f, 3.0f);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[0]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[1]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[2]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[3]->type));
auto v1Assign = static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[3]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Action), static_cast<int>(v1Assign->right->type));
auto v1Action = static_cast<ExpressionTree::ActionExpression *>(v1Assign->right);
BOOST_CHECK_EQUAL(142, v1Action->action);
BOOST_CHECK_EQUAL(3ll, v1Action->arguments.size());
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[4]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[5]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[6]->type));
// vector v2 = Vector(-3.0f, -2.0f, -1.0f);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[7]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[8]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[9]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[10]->type));
auto v2Assign = static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[10]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Action), static_cast<int>(v2Assign->right->type));
auto v2Action = static_cast<ExpressionTree::ActionExpression *>(v2Assign->right);
BOOST_CHECK_EQUAL(142, v2Action->action);
BOOST_CHECK_EQUAL(3ll, v2Action->arguments.size());
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[11]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[12]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[13]->type));
// vector v3 = v1 + v2;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[14]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[15]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[16]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[17]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[18]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[19]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[20]->type));
auto v3Assign = static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[20]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Add), static_cast<int>(v3Assign->right->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[21]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[22]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[23]->type));
// vector v4 = v1 - v2;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[24]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[25]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[26]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[27]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[28]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[29]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[30]->type));
auto v4Assign = static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[30]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Subtract), static_cast<int>(v4Assign->right->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[31]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[32]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[33]->type));
// vector v5 = 2.0f * v1;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[34]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[35]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[36]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[37]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[38]->type));
auto v5Assign = static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[38]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Multiply), static_cast<int>(v5Assign->right->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[39]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[40]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[41]->type));
// vector v6 = 2.0f / v1;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[42]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[43]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[44]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[45]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[46]->type));
auto v6Assign = static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[46]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Divide), static_cast<int>(v6Assign->right->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[47]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[48]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[49]->type));
// vector v7 = v1 * 2.0f;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[50]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[51]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[52]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[53]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[54]->type));
auto v7Assign = static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[54]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Multiply), static_cast<int>(v7Assign->right->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[55]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[56]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[57]->type));
// vector v8 = v1 / 2.0f;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[58]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[59]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[60]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[61]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[62]->type));
auto v8Assign = static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[62]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Divide), static_cast<int>(v8Assign->right->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[63]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[64]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[65]->type));
// float f = VectorToAngle(v1);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[66]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[67]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[68]->type));
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Assign), static_cast<int>(mainFunc->block->expressions[69]->type));
auto fAssign = static_cast<ExpressionTree::BinaryExpression *>(mainFunc->block->expressions[69]);
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Action), static_cast<int>(fAssign->right->type));
auto fAction = static_cast<ExpressionTree::ActionExpression *>(fAssign->right);
BOOST_CHECK_EQUAL(145, fAction->action);
BOOST_CHECK_EQUAL(1ll, fAction->arguments.size());
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Vector), static_cast<int>(fAction->arguments[0]->type));
// return;
BOOST_CHECK_EQUAL(static_cast<int>(ExpressionType::Return), static_cast<int>(mainFunc->block->expressions[70]->type));
auto startFunc = functions[1];
BOOST_CHECK_EQUAL("_start", startFunc->name);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "0bda89a102dc37ba06bf28be2e2988b0b2093121", "size": 28553, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/script/expressiontree.cpp", "max_stars_repo_name": "seedhartha/revan", "max_stars_repo_head_hexsha": "b9a98007ca2f510b42894ecd09fb623571b433dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37.0, "max_stars_repo_stars_event_min_datetime": "2020-06-27T18:50:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-02T14:13:51.000Z", "max_issues_repo_path": "test/script/expressiontree.cpp", "max_issues_repo_name": "seedhartha/revan", "max_issues_repo_head_hexsha": "b9a98007ca2f510b42894ecd09fb623571b433dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/script/expressiontree.cpp", "max_forks_repo_name": "seedhartha/revan", "max_forks_repo_head_hexsha": "b9a98007ca2f510b42894ecd09fb623571b433dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-07-14T13:32:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-14T13:32:15.000Z", "avg_line_length": 59.3617463617, "max_line_length": 183, "alphanum_fraction": 0.7401323854, "num_tokens": 7034}
|
[STATEMENT]
lemma in_atlas_order_le: "c \<in> c_manifold.atlas charts l" if "l \<le> k" "c \<in> atlas"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c \<in> c_manifold.atlas charts l
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. c \<in> c_manifold.atlas charts l
[PROOF STEP]
interpret l: c_manifold charts l
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c_manifold charts l
[PROOF STEP]
using \<open>l \<le> k\<close>
[PROOF STATE]
proof (prove)
using this:
l \<le> k
goal (1 subgoal):
1. c_manifold charts l
[PROOF STEP]
by (rule c_manifold_order_le)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. c \<in> l.atlas
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c \<in> l.atlas
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
l \<le> k
c \<in> atlas
goal (1 subgoal):
1. c \<in> l.atlas
[PROOF STEP]
by (auto simp: l.atlas_def atlas_def smooth_compat_le[OF _ \<open>l \<le> k\<close>])
[PROOF STATE]
proof (state)
this:
c \<in> l.atlas
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 494, "file": "Smooth_Manifolds_Differentiable_Manifold", "length": 8}
|
#*- coding:UTF-8 -*-
"""
## ==========================================================================
##
## author : Liang He, heliang@mail.tsinghua.edu.cn
## Xianhong Chen, chenxianhong@mail.tsinghua.edu.cn
## descrption : sre10 demo
## comparison of LDA and LPLDA
## LDA: linear discriminant analysis
## LPLDA: local pairwise linear discriminant analysis
## created : 20180612
## last revised :
##
## Liang He, +86-13426228839, heliang@mail.tsinghua.edu.cn
## Aurora Lab, Department of Electronic Engineering, Tsinghua University
## ==========================================================================
"""
import numpy as np
import eval_ndx_score
import misc_function
import LDA
import LPLDA
import sys
def TestPrepare():
data_path = './data/'
misc_function.generate_lambda_label(
data_path + "sre050608_swb_male_lambda_ivec.csv",
data_path + "sre050608_swb_male_lambda.ndx",
data_path + "sre050608_swb_male_lambda.ndx.label")
misc_function.generate_trial_mask(
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx',
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort',
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort.mask')
misc_function.generate_trial_label(
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort',
data_path + 'nist_sre10_trial_coreext_coreext_key.ndx',
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort.label')
def TestCosine():
print("begin test cosine ...")
data_path = './data/'
## load ivec
model_ids, model_ivec = misc_function.load_ivectors(
data_path + 'nist_sre10_c5_coreext_male_train_ivec.csv')
test_ids, test_ivec = misc_function.load_ivectors(
data_path + 'nist_sre10_c5_coreext_male_test_ivec.csv')
## load mask and label
trial_mask = np.loadtxt(
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort.mask')
trial_label = np.loadtxt(
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort.label')
## length norm
model_ivec /= np.sqrt(np.sum(model_ivec ** 2, axis=1))[:, np.newaxis]
test_ivec /= np.sqrt(np.sum(test_ivec ** 2, axis=1))[:, np.newaxis]
## cosine
score_matrix = np.dot(np.asarray(model_ivec), np.asarray(test_ivec.T))
score = np.asarray(score_matrix.reshape(-1,1))
## trial score
trial_score = score[trial_mask == 1]
## eval
[eer, mindcf_sre08, mindcf_sre10, mindcf_sre12, mindcf_sre14,
mindcf_sre16] = eval_ndx_score.eval_ndx_score_label(
trial_score, trial_label)
with open('./results/sre10_demo_result.txt','a+') as f:
print("------ Aurora Lab ------", file = f)
print("eer = {0:.2f} %".format(100 * eer), file = f)
print("mindcf_sre08 = {0:.4f}".format(mindcf_sre08), file = f)
print("mindcf_sre10 = {0:.4f}".format(mindcf_sre10), file = f)
print("mindcf_sre12 = {0:.4f}".format(mindcf_sre12), file = f)
print("mindcf_sre14 = {0:.4f}".format(mindcf_sre14), file = f)
print("mindcf_sre16 = {0:.4f}".format(mindcf_sre16), file = f)
print("comment : cosine", file = f)
print("{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}, {4:.4f}, {5:.4f}".format(
100 * eer, mindcf_sre08, mindcf_sre10, mindcf_sre12,
mindcf_sre14, mindcf_sre16), file = f)
def TestLDA():
print("begin test LDA ...")
data_path = './data/'
lda_dim = 150
## load ivec
dev_ids, dev_ivec = misc_function.load_ivectors(
data_path+'sre050608_swb_male_lambda_ivec.csv')
model_ids, model_ivec = misc_function.load_ivectors(
data_path + 'nist_sre10_c5_coreext_male_train_ivec.csv')
test_ids, test_ivec = misc_function.load_ivectors(
data_path + 'nist_sre10_c5_coreext_male_test_ivec.csv')
## load mask and label
lambda_label = np.loadtxt(
data_path + 'sre050608_swb_male_lambda.ndx.label')
trial_mask = np.loadtxt(
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort.mask')
trial_label = np.loadtxt(
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort.label')
dev_ivec = dev_ivec[lambda_label > -1]
lambda_label = lambda_label[lambda_label > -1]
# remove mean
m = np.mean(dev_ivec, axis=0)
dev_ivec = dev_ivec - m
model_ivec = model_ivec - m
test_ivec = test_ivec - m
## LDA
lda = LDA.LinearDiscriminantAnalysis(n_components=lda_dim)
lda.fit(np.asarray(dev_ivec), np.asarray(lambda_label))
model_ivec = lda.transform(np.asarray(model_ivec))
test_ivec = lda.transform(np.asarray(test_ivec))
## length norm
model_ivec /= np.sqrt(np.sum(model_ivec ** 2, axis=1))[:, np.newaxis]
test_ivec /= np.sqrt(np.sum(test_ivec ** 2, axis=1))[:, np.newaxis]
## cosine
score_matrix = np.dot(np.asarray(model_ivec), np.asarray(test_ivec.T))
score = np.asarray(score_matrix.reshape(-1,1))
## trial score
trial_score = score[trial_mask == 1]
## eval
[eer, mindcf_sre08, mindcf_sre10, mindcf_sre12, mindcf_sre14,
mindcf_sre16] = eval_ndx_score.eval_ndx_score_label(
trial_score, trial_label)
with open('./results/sre10_demo_result.txt','a+') as f:
print("------ Aurora Lab ------", file = f)
print("eer = {0:.2f} %".format(100 * eer), file = f)
print("mindcf_sre08 = {0:.4f}".format(mindcf_sre08), file = f)
print("mindcf_sre10 = {0:.4f}".format(mindcf_sre10), file = f)
print("mindcf_sre12 = {0:.4f}".format(mindcf_sre12), file = f)
print("mindcf_sre14 = {0:.4f}".format(mindcf_sre14), file = f)
print("mindcf_sre16 = {0:.4f}".format(mindcf_sre16), file = f)
print("comment : LDA", file = f)
print("{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}, {4:.4f}, {5:.4f}".format(
100 * eer, mindcf_sre08, mindcf_sre10, mindcf_sre12,
mindcf_sre14, mindcf_sre16), file = f)
def TestLPLDA():
print("begin test LPLDA ...")
data_path = './data/'
lda_dim = 150
## load ivec
dev_ids, dev_ivec = misc_function.load_ivectors(
data_path+'sre050608_swb_male_lambda_ivec.csv')
model_ids, model_ivec = misc_function.load_ivectors(
data_path + 'nist_sre10_c5_coreext_male_train_ivec.csv')
test_ids, test_ivec = misc_function.load_ivectors(
data_path + 'nist_sre10_c5_coreext_male_test_ivec.csv')
## load mask and label
lambda_label = np.loadtxt(
data_path + 'sre050608_swb_male_lambda.ndx.label')
trial_mask = np.loadtxt(
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort.mask')
trial_label = np.loadtxt(
data_path + 'nist_sre10_trial_coreext_coreext_c5_male.ndx.sort.label')
dev_ivec = dev_ivec[lambda_label > -1]
lambda_label = lambda_label[lambda_label > -1]
# remove mean
m = np.mean(dev_ivec, axis=0)
dev_ivec = dev_ivec - m
model_ivec = model_ivec - m
test_ivec = test_ivec - m
## LPLDA
lda = LPLDA.LocalPairwiseLinearDiscriminantAnalysis(
n_components=lda_dim)
lda.fit(np.asarray(dev_ivec), np.asarray(lambda_label))
model_ivec = lda.transform(np.asarray(model_ivec))
test_ivec = lda.transform(np.asarray(test_ivec))
## length norm
model_ivec /= np.sqrt(np.sum(model_ivec ** 2, axis=1))[:, np.newaxis]
test_ivec /= np.sqrt(np.sum(test_ivec ** 2, axis=1))[:, np.newaxis]
## cosine
score_matrix = np.dot(np.asarray(model_ivec), np.asarray(test_ivec.T))
score = np.asarray(score_matrix.reshape(-1,1))
## trial score
trial_score = score[trial_mask == 1]
## eval
[eer, mindcf_sre08, mindcf_sre10, mindcf_sre12, mindcf_sre14,
mindcf_sre16] = eval_ndx_score.eval_ndx_score_label(
trial_score, trial_label)
with open('./results/sre10_demo_result.txt','a+') as f:
print("------ Aurora Lab ------", file = f)
print("eer = {0:.2f} %".format(100 * eer), file = f)
print("mindcf_sre08 = {0:.4f}".format(mindcf_sre08), file = f)
print("mindcf_sre10 = {0:.4f}".format(mindcf_sre10), file = f)
print("mindcf_sre12 = {0:.4f}".format(mindcf_sre12), file = f)
print("mindcf_sre14 = {0:.4f}".format(mindcf_sre14), file = f)
print("mindcf_sre16 = {0:.4f}".format(mindcf_sre16), file = f)
print("comment : LPLDA", file = f)
print("{0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}, {4:.4f}, {5:.4f}".format(
100 * eer, mindcf_sre08, mindcf_sre10, mindcf_sre12,
mindcf_sre14, mindcf_sre16), file = f)
if __name__=='__main__':
TestPrepare()
TestCosine()
TestLDA()
TestLPLDA()
print('done')
|
{"hexsha": "4b611c88f3daa29e7fb63feb960dc18a11700f53", "size": 9026, "ext": "py", "lang": "Python", "max_stars_repo_path": "sre10_demo.py", "max_stars_repo_name": "sanphiee/LPLDA", "max_stars_repo_head_hexsha": "95941de0a84010dc8c8bdd12e39a276331c0d286", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-07-24T01:51:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T04:01:56.000Z", "max_issues_repo_path": "sre10_demo.py", "max_issues_repo_name": "sanphiee/LPLDA", "max_issues_repo_head_hexsha": "95941de0a84010dc8c8bdd12e39a276331c0d286", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-25T13:06:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-02T08:18:34.000Z", "max_forks_repo_path": "sre10_demo.py", "max_forks_repo_name": "sanphiee/LPLDA", "max_forks_repo_head_hexsha": "95941de0a84010dc8c8bdd12e39a276331c0d286", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-07-25T01:50:52.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-06T06:04:23.000Z", "avg_line_length": 38.2457627119, "max_line_length": 82, "alphanum_fraction": 0.6230888544, "include": true, "reason": "import numpy", "num_tokens": 2675}
|
import numpy as np
def int_type(img):
if img.max() > 255:
dtype = np.uint16
else:
dtype = np.uint8
return dtype
def normalize(img, maxval=255, pmin=0, pmax=100):
img = img.astype(np.float32)
mn, mx = [np.percentile(img, p) for p in [pmin, pmax]]
img = np.clip((img - mn) / (mx - mn), 0, 1) * maxval
return img
|
{"hexsha": "64f5ef014f453035f157c0c817ad5a7cd3367d9e", "size": 358, "ext": "py", "lang": "Python", "max_stars_repo_path": "care_batch/utils.py", "max_stars_repo_name": "amedyukhina/care_batch", "max_stars_repo_head_hexsha": "7670eb7bbd9339dcc580cf8686c79900253392eb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "care_batch/utils.py", "max_issues_repo_name": "amedyukhina/care_batch", "max_issues_repo_head_hexsha": "7670eb7bbd9339dcc580cf8686c79900253392eb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "care_batch/utils.py", "max_forks_repo_name": "amedyukhina/care_batch", "max_forks_repo_head_hexsha": "7670eb7bbd9339dcc580cf8686c79900253392eb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0588235294, "max_line_length": 58, "alphanum_fraction": 0.5754189944, "include": true, "reason": "import numpy", "num_tokens": 121}
|
import pandas as pd
import numpy as np
#import os
from Clean_function import clean_note
from collections import OrderedDict
from progressbar import Percentage, ProgressBar,Bar,ETA
from sklearn.model_selection import train_test_split
import tensorflow as tf
import pickle
maxlen=2500
min_word_frequency=5
from tensorflow import keras
#Here we will put the text and label Token "updater"
import pandas as pd
from nltk.tokenize import word_tokenize
from progressbar import Percentage, ProgressBar,Bar,ETA
from collections import OrderedDict
#Writing to TF records
#We write the inputs and labels! as integer sequences has a BUNCH of advantges to writing it as multi hot encoded.
from progressbar import Percentage, ProgressBar,Bar,ETA
import tensorflow as tf
import pickle
import os.path
def write_tf_records(writer,text_list,label_array,maxlen):
'''
Args:
1. writer: a tf data "writer object" we will use this to "write" the tfrecords on HD
2. text_list: A list of Medical Notes integer coded
3. label_array: A List of Integer coded ICD 9 codes
6. maxlen the maximum lenght of a note we want to deal with. Longer notes are truncated
'''
pbar=ProgressBar(widgets=[Bar('=', '[', ']'), ' ', Percentage(), ' ', ETA()],
maxval=len(text_list)).start()
for j in range(0,len(text_list)):
review=text_list[j]
target=label_array[j]
ex = tf.train.SequenceExample()
label_indexes = ex.feature_lists.feature_list['label_indexes']
for token_index in target:
label_indexes.feature.add().int64_list.value.append(token_index)
token_indexes = ex.feature_lists.feature_list['token_indexes']
for token_index in review:
token_indexes.feature.add().int64_list.value.append(token_index)
writer.write(ex.SerializeToString())
pbar.update(j)
def initial_tokenizer_text(ICD_file_en='/data/ICD9/icd9.txt'
,tokenizer_name='/tokenizer/Text_Tokenizer.pkl'
,min_word_frequency=5):
#We set up an initial TOkenizer on the Text Descriptions(Italian and english )
#Get English Descriptions
all_names=pd.read_table(ICD_file_en,encoding = "ISO-8859-1")
ICD9_des_en=all_names['LONG DESCRIPTION'].tolist()+all_names['SHORT DESCRIPTION'].tolist()
ICD9_des_en=[clean_note(tx) for tx in ICD9_des_en]
init_text=ICD9_des_en
print('Initial Set Up Tokenizer')
extra_words=[word_tokenize(review) for review in init_text]
extra_words = [word for review in extra_words for word in review]
extra_words=list(set(extra_words))
vocabulary = extra_words
#i + 1 because 0 wil represent the padding!
word2idx=OrderedDict(((x,i+1) for i,x in enumerate(vocabulary)))
t=keras.preprocessing.text.Tokenizer(oov_token='UNK__TO_123')
t.word_index = word2idx
t.word_index['UNK__TO_123']=len(vocabulary)+2
# Write word2index (tokenizer) to disk
pickle.dump(t, open(tokenizer_name, 'wb'))
def initial_tokenizer_label(tokenizer_name='/tokenizer/Label_Tokenizer.pkl'):
#we use the keras tokenizer, it is basically a wrapper for the dictionary with some nice added functionality.
t=keras.preprocessing.text.Tokenizer()
t.word_index={}
# Finally we save the tokenizer.
#we leave 0 empty and start the real tokens from 1
pickle.dump(t, open(tokenizer_name, 'wb'))
def Update_Tokenizer_Text(initial_tokenizer,new_text,min_word_frequency=5):
vocabulary_old=list(initial_tokenizer.word_index.keys())
#tokenize the new texts, check for most common words.
reviews = [word_tokenize(review) for review in new_text]
len_reviews = [len(review) for review in reviews]
# Flatten nested list
reviews = [word for review in reviews for word in review]
# Compute the frequency of each word
word_frequency = pd.value_counts(reviews)
# Keep only words with frequency higher than minimum
potential_vocabulary = word_frequency[word_frequency>=min_word_frequency].index.tolist()
#we add words to the vocabulary, that appear at least min_word_frequency in the new docs and which we dont
#already have.
vocab_add=[item for item in potential_vocabulary if item not in vocabulary_old]
print('Words added:')
print(len(vocab_add))
#only if there is something to add we add.
if len(vocab_add)>0:
#then we add them, while perserving order
vocab_new=vocabulary_old+vocab_add
word2idx_new=OrderedDict(((x,i+1) for i,x in enumerate(vocab_new)))
initial_tokenizer.word_index = word2idx_new
return initial_tokenizer
def Update_Tokenizer_Labels(initial_tokenizer,labels):
if len(initial_tokenizer.word_index)>0:
vocabulary_old=list(initial_tokenizer.word_index.keys())
potential_vocabulary = [word for review in labels for word in review]
#remove duplicates
potential_vocabulary=list(set(potential_vocabulary))
#only add labels we havent seen yet
vocab_add=[item for item in potential_vocabulary if item not in vocabulary_old]
vocab_add=[word for word in vocab_add if type(word)==str ]
print('Labels added:')
print(len(vocab_add))
#add it IF THERE IS SOMETHING TO ADD
if len(vocab_add)>0:
vocab_new=vocabulary_old+vocab_add
#use ordered dict to keep original order
word2idx_new=OrderedDict(((x,i+1) for i,x in enumerate(vocab_new)))
#modify the word index with the new index
initial_tokenizer.word_index = word2idx_new
#return tokenizer with new index.
else:
print('Empty dictionary: Initial Tokenizer ? ')
potential_vocabulary = [word for review in labels for word in review]
#remove duplicates
potential_vocabulary=list(set(potential_vocabulary))
vocab_add=[word for word in potential_vocabulary if type(word)==str ]
vocab_new=vocab_add
print('Labels added:')
print(len(vocab_add))
#use ordered dict to keep original order
word2idx_new=OrderedDict(((x,i+1) for i,x in enumerate(vocab_new)))
#modify the word index with the new index
initial_tokenizer.word_index = word2idx_new
#return tokenizer with new index.
return initial_tokenizer
def update_write(tokenizer_text_path,tokenizer_label_path,In_Text,labels,output_name):
#First we check for existance of tf records file if yes we break and require a new name
if os.path.isfile(output_name+'_train.tfrecords')==True:
return print("File already exists please choose different name.")
print('Cleaning Notes')
In_Text=[clean_note(tx) for tx in In_Text]
print('Update Text Tokenizer')
initial_tokenizer = pickle.load(open(tokenizer_text_path, 'rb'))
new_tokenizer=Update_Tokenizer_Text(initial_tokenizer=initial_tokenizer,min_word_frequency=min_word_frequency,new_text=In_Text)
pickle.dump(new_tokenizer, open(tokenizer_text_path, 'wb'))
#initial Label tokenizer
print('Update Label Tokenizer')
initial_tokenizer_label = pickle.load(open(tokenizer_label_path, 'rb'))
new_tokenizer_label=Update_Tokenizer_Labels(initial_tokenizer_label,labels)
#Then tokenization, words, labels.
pickle.dump(new_tokenizer_label, open(tokenizer_label_path, 'wb'))
#initial Tokenizer might be empty, we include the handling here.
print('Encode the Data')
In_Text,labels=new_tokenizer.texts_to_sequences(In_Text),new_tokenizer_label.texts_to_sequences(labels)
#Shorten texts to max lengths. ( they are already tokenized, dont do this on the strings:D )
In_Text=[x[-maxlen:] for x in In_Text]
#splitting into train test etc.
train_text,val_text,train_labels,val_labels = train_test_split(In_Text, labels, test_size=0.1, random_state=1)
train_text,test_text,train_labels,test_labels = train_test_split(train_text, train_labels, test_size=0.1, random_state=1)
#define the 3 writers
writer_train = tf.python_io.TFRecordWriter( output_name+ '_train.tfrecords')
writer_validation = tf.python_io.TFRecordWriter( output_name+ '_validation.tfrecords')
writer_test = tf.python_io.TFRecordWriter( output_name+ '_test.tfrecords')
#Finally write each TF records file dont forget to close it!
print('Writing Tf Records Train')
write_tf_records(writer_train,train_text,train_labels,maxlen)
writer_train.close()
print('Writing Tf Records Validation')
write_tf_records(writer_validation,val_text,val_labels,maxlen)
writer_validation.close()
print('Writing Tf Records Test')
write_tf_records(writer_test,test_text,test_labels,maxlen)
writer_test.close()
|
{"hexsha": "0d3c4949c6abfb53eecd2750caee270923ab9564", "size": 8863, "ext": "py", "lang": "Python", "max_stars_repo_path": "update_and_write.py", "max_stars_repo_name": "PiSchool/icd9-labelling", "max_stars_repo_head_hexsha": "1007643c7b96b9ba72d73678a75ffc68e5a2d882", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "update_and_write.py", "max_issues_repo_name": "PiSchool/icd9-labelling", "max_issues_repo_head_hexsha": "1007643c7b96b9ba72d73678a75ffc68e5a2d882", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "update_and_write.py", "max_forks_repo_name": "PiSchool/icd9-labelling", "max_forks_repo_head_hexsha": "1007643c7b96b9ba72d73678a75ffc68e5a2d882", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.8066037736, "max_line_length": 131, "alphanum_fraction": 0.7186054383, "include": true, "reason": "import numpy", "num_tokens": 2043}
|
SUBROUTINE TAVISPAK3X(EA,FCN,A,B,C,LUERR,IERR)
IMPLICIT REAL*8 (A-H,O-Z)
C
C THIS ROUTINE SOLVES FOR THE A,B,C COEFFICIENTS OF THE FUNCTION
C
C FCN = A*COS(EA) + B*SIN(EA) - C
C
C WHICH IS USED TO APPROXIMATE THE RAM AND SHADOW FUNCTIONS. THREE
C PAIRS OF ECCENTRIC ANOMALY AND FUNCTION VALUES ARE INPUT AND THE
C CURVE PASSES THROUGH THEM.
C
C VARIABLE DIM TYPE I/O DESCRIPTION
C -------- --- ---- --- -----------
C
C EA 3 R*8 I EA(I) IS THE ECCENTRIC ANOMALY VALUE ASSOCIATED
C WITH THE RAM OR SHADOW FUNCTION VALUE FCN(I).
C
C FCN 3 R*8 I FCN(I) IS THE RAM OR SHADOW FUNCTION VALUE. THE
C THREE (EA(I),FCN(I)) PAIRS ARE USED TO DEFINE
C THE A*COS(EA) + B*SIN(EA) - C CURVE.
C
C A 1 R*8 O THE 'A' COEFFICIENT IN THE EXPRESSION
C
C B 1 R*8 O THE 'B' COEFFICIENT IN THE EXPRESSION
C
C C 1 R*8 O THE 'C' COEFFICIENT IN THE EXPRESSION
C
C IERR 1 I*4 O = 0, NO ERRORS.
C = OTHERWISE, NUMERICAL ERROR. NO A,B,C RETURNED.
C
C***********************************************************************
C
C CODED BY C PETRUZZO GSFC/742 1/86
C MODIFIED....
C
C***********************************************************************
C
REAL*8 EA(3),FCN(3)
REAL*8 A13/-1.D0/, A23/-1.D0/, A33/-1.D0/
REAL*8 DEGRAD / 57.29577951308232D0 /
C
DET(A11,A21,A31,A12,A22,A32) =
* A11*A22*A33 + A12*A23*A31 + A13*A21*A32
* -A31*A22*A13 - A21*A12*A33 - A11*A32*A23
C
IBUG = 0
LUBUG = 19
C
IF(IBUG.NE.0) WRITE(LUBUG,8501) (EA(I)*DEGRAD,I=1,3),FCN
8501 FORMAT(' TAVISPAK3X DEBUG. INPUT='/,
* ' EA=',3G16.8/,
* ' FCN=',3G16.8)
C
C THE SHADOW OR RAM FUNCTION DEFINES WHEN A TARGET SATISFIES THE
C AVAILABILITY REQUIREMENT. WHEN THE FUNCTION IS POSITIVE, IT IS
C AVAILABLE. WHEN NEGATIVE, IT IS NOT AVAILABLE. WE APPROXIMATE THE
C FUNCTION BY FCN = A*COS(EA) + B*SIN(EA) - C. WE HAVE THREE (EA,FCN)
C PAIRS, SO WE CAN SOLVE FOR A, B, AND C.
C
C
C INITIALIZE
C
IERR = 0
COS1 = DCOS(EA(1))
COS2 = DCOS(EA(2))
COS3 = DCOS(EA(3))
SIN1 = DSIN(EA(1))
SIN2 = DSIN(EA(2))
SIN3 = DSIN(EA(3))
C
C SOLVE FOR THE A, B, AND C CONSTANTS.
C
D = DET( COS1, COS2, COS3, SIN1, SIN2, SIN3 )
IF(DABS(D).LT.1.D-15) THEN
IERR = 9
IF(IBUG.NE.0) WRITE(LUBUG,8607) D
8607 FORMAT(/,' TAVISPAK3X. BAD D-VALUE. VALUE=',G23.16)
GO TO 9999
END IF
AN = DET( FCN(1), FCN(2), FCN(3), SIN1, SIN2, SIN3 )
BN = DET( COS1, COS2, COS3, FCN(1), FCN(2), FCN(3) )
A = AN/D
B = BN/D
C = A * COS1 + B * SIN1 - FCN(1)
IF(IBUG.NE.0) THEN
TEST1 = A*COS1 + B*SIN1 - C - FCN(1)
TEST2 = A*COS2 + B*SIN2 - C - FCN(2)
TEST3 = A*COS3 + B*SIN3 - C - FCN(3)
WRITE(LUBUG,8602) D,AN,BN,A,B,C,TEST1,TEST2,TEST3
8602 FORMAT(' TAVISPAK3X DEBUG 8602.'/,
* ' D,AN,BN=',3G16.8/,
* ' A,B,C= ',3G16.8/,
* ' TEST1,TEST2,TEST3=',3G16.8)
END IF
C
IF(IBUG.NE.0) WRITE(LUBUG,8606) A,B,C
8606 FORMAT(' TAVISPAK3X RETURNING. A,B,C=',3G15.7)
C
9999 CONTINUE
RETURN
END
|
{"hexsha": "7a7291d853467b0101291399b1e873c1fc3ff0da", "size": 3298, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "gsc-13083/tavispak3x.for", "max_stars_repo_name": "SteveDoyle2/nasa-cosmic", "max_stars_repo_head_hexsha": "c8015a9851a04f0483b978d92c2cbaee31c81fe3", "max_stars_repo_licenses": ["BSD-Source-Code"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2015-03-14T07:26:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-16T12:23:17.000Z", "max_issues_repo_path": "gsc-13083/tavispak3x.for", "max_issues_repo_name": "SteveDoyle2/nasa-cosmic", "max_issues_repo_head_hexsha": "c8015a9851a04f0483b978d92c2cbaee31c81fe3", "max_issues_repo_licenses": ["BSD-Source-Code"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gsc-13083/tavispak3x.for", "max_forks_repo_name": "SteveDoyle2/nasa-cosmic", "max_forks_repo_head_hexsha": "c8015a9851a04f0483b978d92c2cbaee31c81fe3", "max_forks_repo_licenses": ["BSD-Source-Code"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2016-02-12T22:18:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-08T17:46:54.000Z", "avg_line_length": 32.3333333333, "max_line_length": 72, "alphanum_fraction": 0.5269860522, "num_tokens": 1284}
|
import inspect
import random
import re
import statistics
from bisect import bisect_left
from functools import lru_cache
from typing import Dict, List, Set, Tuple
import numpy as np
import pandas as pd
class _StringSpans:
__slots__ = ("string", "spans")
def __init__(self, string, spans):
self.string = string
self.spans = spans
def __getitem__(self, i):
return self.string[self.spans[2 * i] : self.spans[2 * i + 1]]
def __iter__(self):
return iter(self[i] for i in range(len(self.spans) // 2))
class TokenizedString:
def __init__(
self,
string: str,
spans: np.ndarray,
omitted_tokens: Set[int] = None,
default_delimiter=None,
):
assert spans[0] == 0 and spans[-1] == len(string)
self.string = string
self.spans = spans
self.omitted_tokens = set() if omitted_tokens is None else omitted_tokens
self.default_delimiter = default_delimiter
self._num_tokens = len(self.spans) // 2 - 1
self.tokens = _StringSpans(string, spans[1:])
self.delimiters = _StringSpans(string, spans)
if self.default_delimiter is None:
if self._num_tokens >= 2:
self.default_delimiter = statistics.mode([self.delimiters[i] for i in range(1, self._num_tokens)])
else:
self.default_delimiter = " "
def save(self):
assert not self.omitted_tokens
return {"string": self.string, "spans": self.spans.tolist(), "default_delimiter": self.default_delimiter}
@classmethod
def load(cls, d):
return cls(d["string"], np.array(d["spans"], dtype=int), default_delimiter=d["default_delimiter"])
def _update(self, string=None, spans=None, omitted_tokens=None, default_delimiter=None):
return TokenizedString(
string=string if string is not None else self.string,
spans=spans if spans is not None else self.spans,
omitted_tokens=omitted_tokens if omitted_tokens is not None else self.omitted_tokens,
default_delimiter=default_delimiter if default_delimiter is not None else self.default_delimiter,
)
@staticmethod
def tokenize(s: str, token_regexes: str = "[^ ]+", default_delimiter=None) -> "TokenizedString":
if isinstance(token_regexes, str):
token_regexes = [token_regexes]
pattern = "|".join(f"(?:{r})" for r in token_regexes)
spans = [0]
for m in re.finditer(pattern, s):
spans.append(m.start())
spans.append(m.end())
spans.append(len(s))
return TokenizedString(
s,
np.array(spans, dtype=int),
default_delimiter,
)
@staticmethod
def from_str(s: str) -> "TokenizedString":
return TokenizedString(s, np.array([0, 0, len(s), len(s)], dtype=int))
@staticmethod
def from_tokens_and_delimiters(tokens: List[str], delimiters: List[str] = None) -> "TokenizedString":
if delimiters is None:
delimiters = [" "] * (len(tokens) + 1)
s = [delimiters[0]]
offset = len(delimiters[0])
spans = [0, offset]
for t, d in zip(tokens, delimiters[1:]):
s.append(t)
offset += len(t)
spans.append(offset)
s.append(d)
offset += len(d)
spans.append(offset)
return TokenizedString(string="".join(s), spans=np.array(spans, dtype=int))
@staticmethod
def empty(default_delimiter=" ") -> "TokenizedString":
return TokenizedString("", np.array([0, 0], dtype=int), default_delimiter=default_delimiter)
def insert(self, i, token, *, before_delimiter=None, delimiters=None):
assert 0 <= i <= self._num_tokens
if i < self._num_tokens and i in self.omitted_tokens:
cur_start = self.spans[2 * i + 1]
cur_end = self.spans[2 * i + 2]
string = self.string[:cur_start] + token + self.string[cur_end:]
spans = self.spans.copy()
index_change = (cur_start + len(token)) - cur_end
spans[2 * i + 2 :] += index_change
omitted_tokens = self.omitted_tokens - {i}
return self._update(string, spans, omitted_tokens)
if before_delimiter is None:
before_delimiter = i != self._num_tokens
if delimiters is None:
if i == 0 and before_delimiter:
if self._num_tokens > 0:
delimiters = ["", self.default_delimiter if self.delimiters[0] == "" else self.delimiters[0]]
else:
delimiters = ["", self.delimiters[0]]
elif i == self._num_tokens and not before_delimiter:
if self._num_tokens > 0:
delimiters = [self.default_delimiter if self.delimiters[-1] == "" else self.delimiters[-1], ""]
else:
delimiters = [self.delimiters[-1], ""]
elif before_delimiter:
if self.delimiters[i - 1] == self.delimiters[i]:
delimiters = [self.delimiters[i - 1], self.delimiters[i]]
else:
delimiters = [self.default_delimiter, self.default_delimiter if i < self._num_tokens else ""]
elif not before_delimiter:
if self.delimiters[i] == self.delimiters[i + 1]:
delimiters = [self.delimiters[i], self.delimiters[i + 1]]
else:
delimiters = [self.default_delimiter if i > 0 else "", self.default_delimiter]
else:
raise AssertionError
string = "".join(
(
self.string[: self.spans[2 * i]],
delimiters[0],
token,
delimiters[1],
self.string[self.spans[2 * i + 1] :],
)
)
token_start = self.spans[2 * i] + len(delimiters[0])
token_end = token_start + len(token)
second_delimiter_end = token_end + len(delimiters[1])
spans = np.empty(self.spans.size + 2, dtype=int)
spans[: 2 * i + 1] = self.spans[: 2 * i + 1]
spans[2 * i + 1] = token_start
spans[2 * i + 2] = token_end
spans[2 * i + 3] = second_delimiter_end
if i < self._num_tokens:
index_change = second_delimiter_end - self.spans[2 * i + 1]
spans[2 * i + 4 :] = self.spans[2 * i + 2 :] + index_change
return self._update(string, spans)
def omit(self, start, end=None):
if not end:
end = start + 1
assert 0 <= start < self._num_tokens
assert 0 < end <= self._num_tokens
return self._update(omitted_tokens=self.omitted_tokens | set(range(start, end)))
def merge(self, start, end):
assert 0 <= start < self._num_tokens
assert 0 < end <= self._num_tokens
spans = np.concatenate(
(
self.spans[: 2 * start + 2],
self.spans[2 * end :],
)
)
return self._update(spans=spans)
@lru_cache(maxsize=1)
def untokenize(self):
if not self.omitted_tokens:
return self.string
s = []
prev_i = -1
new_delimiter = self.delimiters[0]
for i in sorted(self.omitted_tokens):
if i > 0 and prev_i < i - 1:
s.append(new_delimiter)
s.append(self.string[self.spans[2 * prev_i + 2] : self.spans[2 * i]])
new_delimiter = self.delimiters[i]
if new_delimiter != self.delimiters[i + 1]:
if i == 0 or i == self._num_tokens - 1:
new_delimiter = ""
else:
new_delimiter = self.default_delimiter
prev_i = i
if new_delimiter is not None:
s.append(new_delimiter)
if prev_i + 1 < self._num_tokens:
s.append(self.string[self.spans[2 * prev_i + 3] :])
return "".join(s)
def __len__(self):
return self._num_tokens
def __getitem__(self, item):
return self.tokens[item]
def __bool__(self):
return bool(len(self))
def __str__(self):
return self.untokenize()
def __repr__(self):
return f"<TokenizedString string='{self.string}' spans={self.spans.tolist()} omitted_tokens={self.omitted_tokens} default_delimiter='{self.default_delimiter}'>"
def _fast_choice(population, weights, k, std_random):
weights = weights.copy()
samples = []
for _ in range(k):
cum_weights = weights.cumsum()
i = bisect_left(cum_weights, cum_weights[-1] * std_random.random())
weights[i] = 0
samples.append(population[i])
return samples
def _materialize_represenation(repr):
repr = {p: v.untokenize() if isinstance(v, TokenizedString) else v for p, v in repr.items()}
attrs = {(source, attr): v for (source, attr, attr_or_val), v in repr.items() if attr_or_val == "attr"}
vals = {(source, attr): v for (source, attr, attr_or_val), v in repr.items() if attr_or_val == "val"}
return attrs, vals
def perturb_record_pair(
record_pair,
perturbations,
string_representation=None,
random_state: np.random.Generator = None,
num_injection_sampling: int = None,
injection_only_append_to_same_attr: bool = False,
) -> Tuple[pd.DataFrame, List[Dict[Tuple[str, str], str]], List[int]]:
if string_representation is None:
string_representation = {}
representation = string_representation.copy()
for source, attr in record_pair.columns.to_list():
if (source, attr, "attr") not in representation:
representation[(source, attr, "attr")] = attr
if (source, attr, "val") not in representation:
representation[(source, attr, "val")] = record_pair[source, attr].iloc[0]
if random_state is None:
random_state = np.random.default_rng()
std_random = random.Random(random_state.integers(1e9))
relevant_target_attrs = {}
for source, attr in record_pair.columns.to_list():
injection_type = record_pair.dtypes[source, attr]
target_source = "a" if source == "b" else "b"
relevant_target_attrs[(source, attr, "attr")] = list(c for c in record_pair[target_source].columns if c != attr)
relevant_target_attrs[(source, attr, "val")] = [
target_attr
for target_attr in list(record_pair[target_source].columns)
if str(injection_type) == str(record_pair.dtypes[target_source, target_attr])
or pd.api.types.is_string_dtype(record_pair.dtypes[target_source, target_attr])
]
relevant_targets = {}
for (source, attr, attr_or_val), target_attrs in relevant_target_attrs.items():
targets = []
target_weights = []
target_source = "a" if source == "b" else "b"
if injection_only_append_to_same_attr:
target_weights.append(1.0)
targets.append((target_source, attr, attr_or_val, len(representation[(target_source, attr, attr_or_val)])))
else:
for target_attr in target_attrs:
target_pp = (target_source, target_attr, attr_or_val)
if isinstance(representation[target_pp], TokenizedString):
num_target_j = len(representation[target_pp]) + 1
else:
num_target_j = 1
target_attr_weight = max(1, len(target_attrs) - 1) if attr == target_attr else 1
target_weights.extend([target_attr_weight / num_target_j for _ in range(num_target_j)])
targets.extend(
[(target_source, target_attr, attr_or_val, target_j) for target_j in range(num_target_j)]
)
target_weights = np.array(target_weights)
target_weights = target_weights / target_weights.sum() if target_weights.size else target_weights
relevant_targets[(source, attr, attr_or_val)] = (targets, target_weights)
perturbed_representations = []
groups = []
for group_i, (exclusions, injections) in enumerate(perturbations):
with_exclusions = representation.copy()
token_exclusions = [p for p in exclusions if p[3] is not None]
value_exclusions = [p for p in exclusions if p[3] is None]
for source, attr, attr_or_val, j in token_exclusions:
with_exclusions[(source, attr, attr_or_val)] = with_exclusions[(source, attr, attr_or_val)].omit(j)
for source, attr, attr_or_val, _ in value_exclusions:
existing_value = with_exclusions[(source, attr, attr_or_val)]
if isinstance(existing_value, TokenizedString):
new_value = TokenizedString.empty(default_delimiter=existing_value.default_delimiter)
elif isinstance(existing_value, str):
new_value = ""
else:
new_value = None
with_exclusions[(source, attr, attr_or_val)] = new_value
if not injections or not injection_only_append_to_same_attr:
perturbed_representations.append(with_exclusions)
groups.append(group_i)
if injections:
max_injection_sampling = 0
for (source, attr, attr_or_val, j) in injections:
target_source = "a" if source == "b" else "b"
target_attrs = relevant_target_attrs[(source, attr, attr_or_val)]
suggested_injection_sampling = 0
for target_attr in target_attrs:
target_value = with_exclusions[(target_source, target_attr, attr_or_val)]
if isinstance(target_value, TokenizedString):
suggested_injection_sampling += min(3, len(target_value))
else:
suggested_injection_sampling += 1
suggested_injection_sampling = min(10, suggested_injection_sampling)
max_injection_sampling = max(max_injection_sampling, suggested_injection_sampling)
if num_injection_sampling is None:
num_injection_sampling = max_injection_sampling
if injection_only_append_to_same_attr:
num_injection_sampling = 1
injection_targets_used = {p: set() for p in injections}
sampled_targets = {}
for p in injections:
targets, target_weights = relevant_targets[p[:3]]
sampled_targets[p] = []
while len(sampled_targets[p]) < num_injection_sampling and targets:
sampled_targets[p].extend(
_fast_choice(
targets,
weights=target_weights,
k=min(len(targets), num_injection_sampling - len(sampled_targets[p])),
std_random=std_random,
)
)
for sampling_i in range(num_injection_sampling):
perturbed = with_exclusions.copy()
if injection_only_append_to_same_attr:
injections = sorted(injections, reverse=True, key=lambda inj: inj[3])
else:
std_random.shuffle(injections)
for p in injections:
source, attr, attr_or_val, j = p
pp = p[:3]
if isinstance(representation[pp], TokenizedString):
injection_value = representation[pp][j]
else:
injection_value = representation[pp]
if pd.isna(injection_value):
continue
if not sampled_targets[p]:
continue
target = sampled_targets[p][sampling_i]
injection_targets_used[p].add(target)
target_pp = target[:3]
if isinstance(perturbed[target_pp], TokenizedString):
perturbed[target_pp] = perturbed[target_pp].insert(
target[3], str(injection_value), before_delimiter=std_random.random() < 0.5
)
elif isinstance(perturbed[target_pp], str):
perturbed[target_pp] = TokenizedString.from_str(str(injection_value))
elif pd.isna(perturbed[target_pp]):
if isinstance(injection_value, str):
perturbed[target_pp] = TokenizedString.from_str(str(injection_value))
else:
perturbed[target_pp] = injection_value
else:
perturbed[target_pp] = injection_value
perturbed_representations.append(perturbed)
groups.append(group_i)
all_attrs, all_vals = zip(*[_materialize_represenation(repr) for repr in perturbed_representations])
record_pairs = pd.DataFrame(all_vals, columns=record_pair.columns).astype(record_pair.dtypes)
return record_pairs, all_attrs, groups
def get_predictions_scores_for_perturbed_record_pairs(
record_pairs, attr_strings, groups, predict_proba, show_progress
) -> np.ndarray:
# Avoid running prediction on duplicates
num_groups = groups[-1] + 1
dtypes = record_pairs.dtypes
record_pairs = record_pairs.assign(attr_strings=[str(x) for x in attr_strings], group=groups)
record_pairs = (
record_pairs.groupby(by=record_pairs.columns[:-1].to_list(), as_index=False, dropna=False)
.agg(list)
.astype(dtypes)
)
groups_per_unique_pair = record_pairs["group"]
record_pairs = record_pairs[record_pairs.columns[:-2]]
records_a = record_pairs["a"].rename_axis(index="rid")
records_b = record_pairs["b"].rename_axis(index="rid")
record_id_pairs = pd.DataFrame({"a.rid": range(len(record_pairs)), "b.rid": range(len(record_pairs))}).rename_axis(
index="pid"
)
predict_proba_kwargs = {}
if "show_progress" in inspect.signature(predict_proba).parameters:
predict_proba_kwargs["show_progress"] = show_progress
if "attr_strings" in inspect.signature(predict_proba).parameters:
predict_proba_kwargs["attr_strings"] = attr_strings
all_predictions = np.array(predict_proba(records_a, records_b, record_id_pairs, **predict_proba_kwargs))
predictions = [float("-inf")] * num_groups
for p, groups_for_pair in zip(all_predictions, groups_per_unique_pair):
for g in groups_for_pair:
predictions[g] = max(predictions[g], p)
predictions = np.array(predictions)
return predictions
|
{"hexsha": "630c99857f6d521043b8c6fb2af9896f87dff57a", "size": 18769, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lemon/_lemon_utils.py", "max_stars_repo_name": "NilsBarlaug/lemon", "max_stars_repo_head_hexsha": "ee82f20253c50eb5a958fc5507b0df8ca51fa317", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-10-04T06:58:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T12:47:43.000Z", "max_issues_repo_path": "src/lemon/_lemon_utils.py", "max_issues_repo_name": "NilsBarlaug/lemon", "max_issues_repo_head_hexsha": "ee82f20253c50eb5a958fc5507b0df8ca51fa317", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lemon/_lemon_utils.py", "max_forks_repo_name": "NilsBarlaug/lemon", "max_forks_repo_head_hexsha": "ee82f20253c50eb5a958fc5507b0df8ca51fa317", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4326710817, "max_line_length": 168, "alphanum_fraction": 0.5973680004, "include": true, "reason": "import numpy", "num_tokens": 4180}
|
"""
Distance functions to define how "far" apart two vectors are.
"""
import numpy as np
from squidward.utils import exactly_1d
np.seterr(over="raise")
# ---------------------------------------------------------------------------------------------------------------------
# Radial Basis Function
# ---------------------------------------------------------------------------------------------------------------------
class RBF(object):
"""Class for radial basis fucntion distance measure."""
def __init__(self, lengthscale, var_k):
"""
Description
----------
Radial basis function (rbf) distance measure between vectors/arrays.
kSE(x,x′)=σ2exp(−(x−x′)/2ℓ^2)
Parameters
----------
lengthscale: Float
The lengthscale of the rbf function that detrmins the radius around
which the value of an observation imapcts other observations.
var_k: Float
The kernel variance or amplitude. This can be thought of as the maximum
value that the rbf function can take.
Returns
----------
distance object
"""
self.lengthscale = lengthscale
self.var_k = var_k
if lengthscale <= 0.0:
raise Exception("Lengthscale parameter must be greater than zero.")
if var_k <= 0.0:
raise Exception("Kernel variance parameter must be greater than zero.")
def __call__(self, alpha, beta):
"""
Description
----------
Calls the kernel object.
Parameters
----------
alpha: array_like
The first vector to compare.
beta: array_like
The second vector to compare.
Returns
----------
A array representing the covariance between points in
the vectors alpha and beta.
"""
alpha, beta = exactly_1d(alpha), exactly_1d(beta)
distance = np.sum((alpha - beta)**2)
amp = -0.5/self.lengthscale**2
return self.var_k*np.exp(amp*distance)
# ---------------------------------------------------------------------------------------------------------------------
# Linear Kernel
# --------------------------------------------------------------------------------------------------------------------
class Linear(object):
"""Class for radial basis fucntion distance measure."""
def __init__(self, c, var_b, var_k):
"""
Description
----------
Linear distance measure between vectors/arrays.
kLin(x,x′)=σ2b+σ2v(x−c)(x′−c)
Parameters
----------
c: Float
The kernel offset.
var_b: Float
The constant variance.
var_k: Float
The kernel variance.
Returns
----------
distance object
"""
self.c = c
self.var_b = var_b
self.var_k = var_k
assert self.var_b > 0.0, "Invalid argument"
assert self.var_k > 0.0, "Invalid argument"
def __call__(self, alpha, beta):
"""
Description
----------
Calls the kernel object.
Parameters
----------
alpha: array_like
The first vector to compare.
beta: array_like
The second vector to compare.
Returns
----------
A array representing the covariance between points in
the vectors alpha and beta.
"""
alpha, beta = exactly_1d(alpha), exactly_1d(beta)
return self.var_b + self.var_k*(alpha - self.c)*(beta - self.c)
|
{"hexsha": "27d06e12d208fe9ab0fb15bb0434c5ac914173ea", "size": 3620, "ext": "py", "lang": "Python", "max_stars_repo_path": "squidward/kernels/distance.py", "max_stars_repo_name": "looyclark/Gaussian_Processes", "max_stars_repo_head_hexsha": "f02aa64bfbca8b3086e403a81178e6ae4702b48a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "squidward/kernels/distance.py", "max_issues_repo_name": "looyclark/Gaussian_Processes", "max_issues_repo_head_hexsha": "f02aa64bfbca8b3086e403a81178e6ae4702b48a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "squidward/kernels/distance.py", "max_forks_repo_name": "looyclark/Gaussian_Processes", "max_forks_repo_head_hexsha": "f02aa64bfbca8b3086e403a81178e6ae4702b48a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1935483871, "max_line_length": 119, "alphanum_fraction": 0.4784530387, "include": true, "reason": "import numpy", "num_tokens": 704}
|
import os
import numpy as np
import pandas as pd
import matplotlib.mlab as ml
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from mpl_toolkits.axes_grid1 import make_axes_locatable
import networkx
import matplotlib.pyplot as plt
import shapely.geometry.linestring as shapely
from shapely.geometry.point import Point
from shapely.ops import split
from shapely.wkt import loads
import copy
from geopandas import GeoDataFrame, sjoin
from gtfspy.routing.journey_data import JourneyDataManager, DiffDataManager
from gtfspy.routing.journey_data_analyzer import JourneyDataAnalyzer
from scripts.all_to_all_settings import *
from scripts.all_to_all_analyzer import AllToAllDifferenceAnalyzer
from gtfspy.mapviz import *
from gtfspy.colormaps import *
from gtfspy.util import makedirs
from gtfspy.gtfs import GTFS
from gtfspy.shapes import get_shape_between_stops
from gtfspy.route_types import *
from gtfspy.stats import get_section_stats
from gtfspy.networks import route_to_route_network
"""
1. SELECT the longest trip (in time) for each route that is active at the desired time
2. cluster stops by name and location
3. split shapes by stop section
4. select representative shape for each stop-to-stop section
5. deal with skip-stop service
6. route ranking for ordering, this could be done by looping trough all route sections route for route->and assign the
route code for each stop. direction is determined by the sections stop id's, assuming smaller to larger.
if the case is the opposite, the new routes will be added to the left in the queue
7. offset routes
"""
if True:
gtfs_name = "lm_daily"
else:
gtfs_name = "old_daily"
class RouteMapMaker:
def __init__(self, gtfs_name):
if isinstance(gtfs_name, str):
self.gtfs = GTFS(FEED_DICT[gtfs_name]["gtfs_dir"])
else:
self.gtfs = gtfs_name
self.bunching_value = 99
self.line_spacing = 0.0001
self.shapes = False
self.crs_wgs = {'init': 'epsg:4326'}
#self.crs_eurefin = {'init': 'epsg:3067'}
def cluster_shapes(self):
"""
:return:
"""
# get unique stop-to-stop shapes, with trips aggregated
# split by nearby stops
# match splitted, and aggregate trips
# identify branches: large overlap but crosses buffer, insert pseudo stop at branch
# split everything again
# match splitted
#this query returns shapes for of the maximum trips, both directions
df = self.gtfs.execute_custom_query_pandas(
"""WITH
a AS (
SELECT routes.name AS name, shape_id, route_I, trip_I, routes.type, direction_id,
max(end_time_ds-start_time_ds) AS trip_duration, count(*) AS n_trips
FROM trips
LEFT JOIN routes
USING(route_I)
WHERE start_time_ds >= 7*3600 AND start_time_ds < 8*3600
GROUP BY routes.route_I, direction_id
),
b AS(
SELECT q1.trip_I AS trip_I, q1.stop_I AS from_stop_I, q2.stop_I AS to_stop_I, q1.seq AS seq,
q1.shape_break AS from_shape_break, q2.shape_break AS to_shape_break FROM
(SELECT stop_I, trip_I, shape_break, seq FROM stop_times) q1,
(SELECT stop_I, trip_I, shape_break, seq AS seq FROM stop_times) q2
WHERE q1.seq=q2.seq-1 AND q1.trip_I=q2.trip_I AND q1.trip_I IN (SELECT trip_I FROM a)
),
c AS(
SELECT b.*, name, direction_id, route_I, a.shape_id, group_concat(lat) AS lats,
group_concat(lon) AS lons, count(*) AS n_coords FROM b, a, shapes
WHERE b.trip_I = a.trip_I AND shapes.shape_id=a.shape_id
AND b.from_shape_break <= shapes.seq AND b.to_shape_break >= shapes.seq
GROUP BY route_I, direction_id, b.seq
ORDER BY route_I, b.seq
)
SELECT from_stop_I, to_stop_I, group_concat(trip_I) AS trip_ids,
group_concat(direction_id) AS direction_ids, lats, lons FROM c
WHERE n_coords > 1
GROUP BY from_stop_I, to_stop_I
ORDER BY count(*) DESC""")
df["geometry"] = df.apply(lambda row:
shapely.LineString([(float(lon), float(lat)) for lon, lat in
zip(row["lons"].split(","), row["lats"].split(","))]), axis=1)
gdf = GeoDataFrame(df, crs=self.crs_wgs, geometry=df["geometry"])
#gdf = gdf.to_crs(self.crs_eurefin)
gdf = gdf.to_crs(self.crs_wgs)
gdf = gdf.drop(["lats", "lons"], axis=1)
stops_set = set(gdf["from_stop_I"]) | set(gdf["to_stop_I"])
gdf["orig_parent_stops"] = list(zip(gdf['from_stop_I'], gdf['to_stop_I']))
clustered_stops = self.cluster_stops(stops_set)
cluster_dict = clustered_stops[["new_stop_I", "stop_I", "geometry"]].set_index('stop_I').T.to_dict('list')
geom_dict = clustered_stops[["new_stop_I", "geometry"]].set_index("new_stop_I").T.to_dict('list')
gdf["to_stop_I"] = gdf.apply(lambda row: cluster_dict[row["to_stop_I"]][0], axis=1)
gdf["from_stop_I"] = gdf.apply(lambda row: cluster_dict[row["from_stop_I"]][0], axis=1)
# to/from_stop_I: cluster id
# orig_parent_stops: old id
# child_stop_I: cluster id
splitted_gdf = self.split_shapes_by_nearby_stops(clustered_stops, gdf)
splitted_gdf['child_stop_I'] = splitted_gdf.apply(lambda row: ",".join([str(int(x)) for x in row.child_stop_I]), axis=1)
splitted_gdf_grouped = splitted_gdf.groupby(['child_stop_I'])
splitted_gdf_grouped = splitted_gdf_grouped.agg({'orig_parent_stops': lambda x: tuple(x),
'geometry': lambda x: x.iloc[0]}, axis=1)
splitted_gdf = splitted_gdf_grouped.reset_index()
splitted_gdf['value'] = splitted_gdf.apply(lambda row: 1, axis=1)
#splitted_gdf = splitted_gdf.set_geometry(splitted_gdf["geometry"], crs=self.crs_eurefin)
splitted_gdf = self.match_shapes(splitted_gdf)
splitted_gdf["rand"] = np.random.randint(1, 10, splitted_gdf.shape[0])
print(splitted_gdf)
self.plot_geopandas(splitted_gdf, alpha=0.3)
def split_shapes_by_nearby_stops(self, stops, shapes, buffer=0.01):
"""
Splits shapes by stops, within buffer
:param stops: GeoDataFrame
:param shapes:
:return:
"""
# stops within buffer
# splitter
# retain the "parent" stop section
#stops['geometry'] = stops.apply(lambda row: str(row.geometry), axis=1)
#stops = stops.groupby(['new_stop_I', 'geometry'])['stop_I'].apply(list).reset_index()
#stops["geometry"] = stops.apply(lambda row: loads(row.geometry), axis=1)
stops_grouped = stops.groupby(['new_stop_I'])
stops_grouped = stops_grouped.agg({'stop_I': lambda x: tuple(x),
'geometry': lambda x: x.iloc[0]}, axis=1)
stops = stops_grouped.reset_index()
#stops = stops.set_geometry(stops["geometry"], crs=self.crs_eurefin)
stops["point_geom"] = stops["geometry"]
shapes["buffer"] = shapes["geometry"].buffer(buffer)
shapes["line_geom"] = shapes["geometry"]
shapes = shapes.set_geometry(shapes["buffer"])
gdf_joined = sjoin(shapes, stops, how="left", op='intersects')
gdf_joined = gdf_joined.set_geometry(gdf_joined["line_geom"])
gdf_joined = gdf_joined.drop(["buffer", "line_geom"], axis=1)
#gdf_joined['geometry'] = gdf_joined.apply(lambda row: str(row.geometry), axis=1)
gdf_grouped = gdf_joined.groupby(["orig_parent_stops", 'from_stop_I', 'to_stop_I'])
gdf_grouped = gdf_grouped.agg({'point_geom': lambda x: tuple(x),
'new_stop_I': lambda x: tuple(x),
'geometry': lambda x: x.iloc[0]}, axis=1)
gdf_joined = gdf_grouped.reset_index()
gdf_joined = gdf_joined.apply(lambda row: self.split_shape_by_points(row), axis=1)
new_list = []
for row in gdf_joined.to_dict('records'):
for shape, stop_tuple in zip(row['shape_parts'], row['child_stop_Is']):
new_row = copy.deepcopy(row)
new_row["shape_part"] = shape
new_row["child_stop_I"] = stop_tuple
new_list.append(new_row)
gdf_joined = pd.DataFrame(new_list)
gdf_joined = gdf_joined.set_geometry(gdf_joined["shape_part"])
return gdf_joined[['child_stop_I', 'orig_parent_stops', 'geometry']]
def check_shape_orientation(self, shape, from_stop_point, to_stop_point):
"""
Checks that the shape goes from the from stop to the to stop and not the opposite direction
:param shape:
:param from_stop_point:
:param to_stop_point:
:return:
"""
# def split_shape_by_points(self, shape, shape_parents, points, point_ids):
def split_shape_by_points(self, row):
"""
:param shape:
:param shape_parents:
:param points:
:param point_ids:
:return:
"""
shape = row["geometry"]
shape_parents = [row["from_stop_I"], row["to_stop_I"]]
points = row["point_geom"]
point_ids = row["new_stop_I"]
# TODO: change this to also output the cluster point ids for the end points so that matching is possible directly
if not isinstance(points[0], shapely.Point):
row["shape_parts"] = [shape]
row["child_stop_Is"] = [shape_parents]
return row
# finds the distance on the shape that corresponds to the closest distance to the point
distance_dict = {shape.project(point): {"point": point, "id": id} for point, id in zip(points, point_ids)}
shape_parts = []
stop_sections = []
rest_of_shape = copy.deepcopy(shape)
previous_stop = shape_parents[0]
# loops trough the points in the order they are compared to the shape
if len(distance_dict) >= 3:
for key in sorted(distance_dict)[1:-1]:
if distance_dict[key]["id"] not in shape_parents:
new_point = shape.interpolate(key)
# TODO: this step only works with a modified version of split(), replace with a custom function
geometries = split(rest_of_shape, new_point)
stop_sections.append((int(previous_stop), int(distance_dict[key]["id"])))
previous_stop = distance_dict[key]["id"]
if len(geometries) == 2:
rest_of_shape = geometries[1]
shape_parts.append(geometries[0])
else:
rest_of_shape = geometries[0]
shape_parts.append(rest_of_shape)
stop_sections.append((previous_stop, shape_parents[1]))
#if len(shape_parts) > 1:
# assert not all(x == shape_parts[0] for x in shape_parts)
#shape_parts = row["new_stop_I"]
#stop_sections = row["new_stop_I"]
row["shape_parts"] = shape_parts
row["child_stop_Is"] = stop_sections
return row
# return (shape_parts, stop_sections)
def match_shapes(self, shapes, buffer=0.01):
"""
checks if shapes are completely within each others buffers, aggregates routes for these
:return:
"""
# buffer for spatial self join
first_points = shapes["geometry"].apply(lambda x: Point(x.coords[0]))
last_points = shapes["geometry"].apply(lambda x: Point(x.coords[-1]))
points = pd.concat([first_points, last_points])
point_df = points.to_frame(name='geometry')
#point_df = point_df.set_geometry(point_df["geometry"], crs=self.crs_eurefin)
point_df = point_df.set_geometry(point_df["geometry"], crs=self.crs_wgs)
#buffer = point_df.buffer(30)
#buffer = GeoDataFrame(crs=self.crs_eurefin, geometry=point_df.buffer(buffer))
buffer = GeoDataFrame(crs=self.crs_wgs, geometry=point_df.buffer(buffer))
buffer["everything"] = 1
gdf_poly = buffer.dissolve(by="everything")
polygons = None
for geoms in gdf_poly["geometry"]:
polygons = [polygon for polygon in geoms]
#single_parts = GeoDataFrame(crs=self.crs_eurefin, geometry=polygons)
single_parts = GeoDataFrame(crs=self.crs_wgs, geometry=polygons)
single_parts['new_stop_I'] = single_parts.index
gdf_joined = sjoin(shapes, single_parts, how="left", op='within')
return gdf_joined
def identify_branches(self, shapes, buffer=0.01):
"""
Checks for other shapes that exits the buffer of another buffer. In these cases a pseudo stop is created,
for further splitting of shapes
:param shapes:
:param buffer:
:return:
"""
def get_linestrings_for_stop_section(self, stop_tuple, trip_id, from_shape_brake, to_shape_brake):
try:
assert self.shapes
shapedict = get_shape_between_stops(self.gtfs.conn.cursor(),
trip_id,
stop_tuple[0],
stop_tuple[1],
(from_shape_brake, to_shape_brake))
assert not len(set(shapedict["lat"])) <= 1
assert not len(set(shapedict["lon"])) <= 1
return shapely.LineString([(lon, lat) for lat, lon in zip(shapedict["lat"], shapedict["lon"])])
except (ValueError, AssertionError):
lat0, lon0 = self.gtfs.get_stop_coordinates(stop_tuple[0])
lat1, lon1 = self.gtfs.get_stop_coordinates(stop_tuple[1])
if lat0 == lat1 and lon0 == lon1:
return
else:
return shapely.LineString([(lon0, lat0), (lon1, lat1)])
def route_parallels(self, line, route, all_routes, bunching_value=5, line_spacing=0.0001):
n_parallels = len(all_routes)
line_routes = []
if not line:
return
if n_parallels < bunching_value:
offsets = np.linspace(-1 * ((n_parallels - 1) * line_spacing) / 2,
((n_parallels - 1) * line_spacing) / 2, n_parallels)
try:
return line.parallel_offset(abs(offsets[all_routes.index(route)]), "left" if offsets[all_routes.index(route)] < 0 else "right")
except:
print(line, offsets[all_routes.index(route)])
else:
return line
def get_route_ranking(self, df):
route_order_for_stop_sections = {}
stop_section_shapes = {}
for row in df.itertuples():
section_tuple = (row.from_stop_I, row.to_stop_I)
alt_section_tuple = (row.to_stop_I, row.from_stop_I)
if not section_tuple in route_order_for_stop_sections and not alt_section_tuple in route_order_for_stop_sections:
route_order_for_stop_sections[section_tuple] = [row.route_I]
stop_section_shapes[section_tuple] = (row.trip_I, row.from_shape_break, row.to_shape_break)
elif section_tuple in route_order_for_stop_sections:
route_order_for_stop_sections[section_tuple].append(row.route_I)
elif alt_section_tuple in route_order_for_stop_sections:
route_order_for_stop_sections[alt_section_tuple].insert(0, row.route_I)
return route_order_for_stop_sections, stop_section_shapes
def get_geometry(self, stop_tuple, route, all_routes, cluster_dict):
#line = get_linestrings_for_stop_section(stop_tuple, trip_id, from_shape_break, to_shape_break)
#print(stop_tuple, cluster_dict[stop_tuple[0]], cluster_dict[stop_tuple[1]])
line = shapely.LineString([cluster_dict[stop_tuple[0]][0], cluster_dict[stop_tuple[1]][0]])
if stop_tuple[0] == stop_tuple[1]:
return
else:
return self.route_parallels(line, route, all_routes, bunching_value=self.bunching_value, line_spacing=self.line_spacing)
def cluster_stops(self, stops_set, distance=100):
"""
merges stops that are within distance together into one stop
:param stops_set: iterable that lists stop_I's
:param distance: int, distance to merge, meters
:return:
"""
df = self.gtfs.execute_custom_query_pandas("""SELECT * FROM stops
WHERE stop_I IN ({stops_set})""".format(stops_set=",".join([str(x) for x in stops_set])))
df["geometry"] = df.apply(lambda row: Point((row["lon"], row["lat"])), axis=1)
gdf = GeoDataFrame(df, crs=self.crs_wgs, geometry=df["geometry"])
gdf = gdf.to_crs(self.crs_eurefin)
gdf_poly = gdf.copy()
gdf_poly["geometry"] = gdf_poly["geometry"].buffer(distance/2)
gdf_poly["everything"] = 1
gdf_poly = gdf_poly.dissolve(by="everything")
polygons = None
for geoms in gdf_poly["geometry"]:
polygons = [polygon for polygon in geoms]
single_parts = GeoDataFrame(crs=self.crs_eurefin, geometry=polygons)
single_parts['new_stop_I'] = single_parts.index
gdf_joined = sjoin(gdf, single_parts, how="left", op='within')
single_parts["geometry"] = single_parts.centroid
gdf_joined = gdf_joined.drop('geometry', 1)
centroid_stops = single_parts.merge(gdf_joined, on="new_stop_I")
return centroid_stops
"""
change projection for accurate buffer distance
merge polygons,
select single parts
calculate centroids
"""
def plot_geopandas(self, gdf, **kwargs):
fig, ax = plt.subplots()
gdf.plot(column="rand", **kwargs)
plt.show()
def main():
"""
:return:
"""
RMM = RouteMapMaker(gtfs_name)
RMM.cluster_shapes()
if __name__ == "__main__":
main()
|
{"hexsha": "533918d205e73429774ab8b93fcbd2405fa0cea2", "size": 18317, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/routemap_cluster_the_shapes.py", "max_stars_repo_name": "jweckstr/westmetro_scripts", "max_stars_repo_head_hexsha": "a16385b00ac8d80f0068f348226ed89e2d0425a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/routemap_cluster_the_shapes.py", "max_issues_repo_name": "jweckstr/westmetro_scripts", "max_issues_repo_head_hexsha": "a16385b00ac8d80f0068f348226ed89e2d0425a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/routemap_cluster_the_shapes.py", "max_forks_repo_name": "jweckstr/westmetro_scripts", "max_forks_repo_head_hexsha": "a16385b00ac8d80f0068f348226ed89e2d0425a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4587378641, "max_line_length": 143, "alphanum_fraction": 0.6247747994, "include": true, "reason": "import numpy,import networkx", "num_tokens": 4348}
|
using LinearAlgebra, ForwardDiff, NLPModels
export Bisseccao, Newton_rc_bissec
function Bisseccao(g, a, b, max_bissec; λ = 0)
ϵ = 1e-4
status= :resolvido
iter = 0
while abs(g(λ)) > ϵ
if g(a)*g(b)==0 && g(b)==0
λ=b
else
λ=a
end
if g(a) * g(b) < 0
while abs(b-a) > ϵ
λ = (b + a) / 2
if g(λ) * g(a) < 0
b = λ
elseif g(λ) * g(b) < 0
a = λ
end
end
end
if g(a) * g(b) > 0
if b < 1000
b = b*5+1e-3
elseif a < 8000
a = a + 40
else
status= :bisseccao_falhou
break
end
end
if iter > max_bissec
println("Maximum bissection.")
break
end
iter +=1
end
return λ, status
end
function Newton_rc_bissec(
nlp::AbstractNLPModel;
η1::Float64 = 1e-2,
η2::Float64 = 0.75,
Δ::Float64 = 5.0,
max_bissec = 1000,
a::Float64 = 0.0,
b::Float64 = 1.0,
atol::Real = 1e-6,
rtol::Real = 1e-6,
max_eval::Int = 1000,
max_iter::Int = 0,
max_time::Float64 = 10.0
)
if !unconstrained(nlp)
error("Problem is not unconstrained")
end
x = copy(nlp.meta.x0)
n = nlp.meta.nvar
f(x) = obj(nlp, x)
∇f(x) = grad(nlp, x)
H(x) = hess(nlp, x)
fx = f(x)
∇fx = ∇f(x)
Hx = Matrix(H(x))
g(λ) = norm(inv(Hx + λ * I) * ∇f(x)) - Δ
d = - Hx \ ∇fx
ϵ = atol + rtol * norm(∇fx)
t₀ = time()
iter = 0
Δt = time() - t₀
solved = norm(∇fx) < ϵ # First order stationary
tired = neval_obj(nlp) ≥ max_eval > 0|| iter ≥ max_iter > 0 || Δt ≥ max_time > 0 # Excess time, iteration, evaluations
status = :unknown
@info log_header(
[:iter, :fx, :ngx, :nf, :Δt],
[Int, Float64, Float64, Int, Float64],
hdr_override=Dict(:fx => "f(x)", :ngx => "‖∇f(x)‖", :nf => "#f")
)
@info log_row(
Any[iter, fx, norm(∇fx), neval_obj(nlp), Δt]
)
while !(solved || tired)
if norm(d) < Δ
d = - Hx \ ∇f(x)
else
λ, stat = Bisseccao(g, a, b, max_bissec)
if stat == :bisseccao_falhou
@warn("Bissection fail")
break
end
d = - (Hx + λ * I) \ ∇f(x)
end
Ared = f(x) - f(x + d)
Pred = f(x) - (f(x) + dot(d, ∇f(x)) + dot(d, Hx * d) / 2)
ρ = Ared / Pred
if ρ < η1
Δ = Δ / 2
elseif ρ < η2
x = x + d
else
x = x + d
Δ = 2Δ
end
fx = f(x)
∇fx = ∇f(x)
Hx = Matrix(H(x))
iter += 1
Δt = time() - t₀
solved = norm(∇fx) < ϵ # First order stationary
tired = neval_obj(nlp) ≥ max_eval > 0|| iter ≥ max_iter > 0 || Δt ≥ max_time > 0 # Excess time, iteration, evaluations
@info log_row(
Any[iter, fx, norm(∇fx), neval_obj(nlp), Δt]
)
end
if solved
status = :first_order
elseif tired
if neval_obj(nlp) ≥ max_eval > 0
status = :max_eval
elseif iter ≥ max_iter > 0
status = :max_iter
elseif Δt ≥ max_time > 0
status = :max_time
end
end
return GenericExecutionStats(
status,
nlp,
solution=x,
objective=f(x),
dual_feas=norm(∇fx),
elapsed_time=Δt,
iter=iter
)
end
|
{"hexsha": "87deea35c473f1911df10821588679bc0386d3f2", "size": 3648, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Newton_rc_bissec.jl", "max_stars_repo_name": "RogerioOMDS/JSOSolverTemplate.jl", "max_stars_repo_head_hexsha": "fd57f4de0d9253003f15566fc502cca37abd890e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Newton_rc_bissec.jl", "max_issues_repo_name": "RogerioOMDS/JSOSolverTemplate.jl", "max_issues_repo_head_hexsha": "fd57f4de0d9253003f15566fc502cca37abd890e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Newton_rc_bissec.jl", "max_forks_repo_name": "RogerioOMDS/JSOSolverTemplate.jl", "max_forks_repo_head_hexsha": "fd57f4de0d9253003f15566fc502cca37abd890e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0886075949, "max_line_length": 126, "alphanum_fraction": 0.4320175439, "num_tokens": 1260}
|
import os
import imageio
import numpy as np
from elf.io import open_file
from elf.util import normalize_index
from ..data import ConcatDataset, ImageCollectionDataset, SegmentationDataset
from .util import get_trainer, get_normalizer
from .prediction import predict_with_halo
try:
import napari
except ImportError:
napari = None
# TODO implement prefab metrics
class SampleGenerator:
def __init__(self, trainer, max_samples, need_gt, n_threads):
self.need_gt = need_gt
self.n_threads = n_threads
dataset = trainer.val_loader.dataset
self.ndim = dataset.ndim
(n_samples, load_2d_from_3d, rois,
raw_paths, raw_key,
label_paths, label_key) = self.paths_from_ds(dataset)
if max_samples is None:
self.n_samples = n_samples
else:
self.n_samples = min(max_samples, n_samples)
self.load_2d_from_3d = load_2d_from_3d
self.rois = rois
self.raw_paths, self.raw_key = raw_paths, raw_key
self.label_paths, self.label_key = label_paths, label_key
if self.load_2d_from_3d:
shapes = [
open_file(rp, 'r')[self.raw_key].shape if roi is None else tuple(r.stop - r.start for r in roi)
for rp, roi in zip(self.raw_paths, self.rois)
]
lens = [shape[0] for shape in shapes]
self.offsets = np.cumsum(lens)
def paths_from_ds(self, dataset):
if isinstance(dataset, ConcatDataset):
datasets = dataset.datasets
(n_samples, load_2d_from_3d, rois,
raw_paths, raw_key,
label_paths, label_key) = self.paths_from_ds(datasets[0])
for ds in datasets[1:]:
ns, l2d3d, bb, rp, rk, lp, lk = self.paths_from_ds(ds)
assert rk == raw_key
assert lk == label_key
assert l2d3d == load_2d_from_3d
raw_paths.extend(rp)
label_paths.extend(lp)
rois.append(bb)
n_samples += ns
elif isinstance(dataset, ImageCollectionDataset):
raw_paths, label_paths = dataset.raw_images, dataset.label_images
raw_key, label_key = None, None
n_samples = len(raw_paths)
load_2d_from_3d = False
rois = [None] * n_samples
elif isinstance(dataset, SegmentationDataset):
raw_paths, label_paths = [dataset.raw_path], [dataset.label_path]
raw_key, label_key = dataset.raw_key, dataset.label_key
shape = open_file(raw_paths[0], 'r')[raw_key].shape
roi = getattr(dataset, 'roi', None)
if roi is not None:
roi = normalize_index(roi, shape)
shape = tuple(r.stop - r.start for r in roi)
rois = [roi]
if self.ndim == len(shape):
n_samples = len(raw_paths)
load_2d_from_3d = False
elif self.ndim == 2 and len(shape) == 3:
n_samples = shape[0]
load_2d_from_3d = True
else:
raise RuntimeError
else:
raise RuntimeError(f"No support for dataset of type {type(dataset)}")
return (n_samples, load_2d_from_3d, rois,
raw_paths, raw_key, label_paths, label_key)
def load_data(self, path, key, roi, z):
if key is None:
assert roi is None and z is None
return imageio.imread(path)
bb = np.s_[:, :, :] if roi is None else roi
if z is not None:
bb[0] = z if roi is None else roi[0].start + z
with open_file(path, 'r') as f:
ds = f[key]
ds.n_threads = self.n_threads
data = ds[bb]
return data
def load_sample(self, sample_id):
if self.load_2d_from_3d:
ds_id = 0
while True:
if sample_id < self.offsets[ds_id]:
break
ds_id += 1
offset = self.offsets[ds_id - 1] if ds_id > 0 else 0
z = sample_id - offset
else:
ds_id = sample_id
z = None
roi = self.rois[ds_id]
raw = self.load_data(self.raw_paths[ds_id], self.raw_key, roi, z)
if not self.need_gt:
return raw
gt = self.load_data(self.label_paths[ds_id], self.label_key, roi, z)
return raw, gt
def __iter__(self):
for sample_id in range(self.n_samples):
sample = self.load_sample(sample_id)
yield sample
def _predict(model, raw, trainer, gpu_ids, save_path, sample_id):
save_key = f"sample{sample_id}"
if save_path is not None and os.path.exists(save_path):
with open_file(save_path, 'r') as f:
if save_key in f:
print("Loading predictions for sample", sample_id, "from file")
ds = f[save_key]
ds.n_threads = 8
return ds[:]
normalizer = get_normalizer(trainer)
dataset = trainer.val_loader.dataset
ndim = dataset.ndim
if isinstance(dataset, ConcatDataset):
patch_shape = dataset.datasets[0].patch_shape
else:
patch_shape = dataset.patch_shape
if ndim == 2 and len(patch_shape) == 3:
patch_shape = patch_shape[1:]
assert len(patch_shape) == ndim
# choose a small halo and set the correct block shape
halo = (32, 32) if ndim == 2 else (8, 16, 16)
block_shape = tuple(psh - 2 * ha for psh, ha in zip(patch_shape, halo))
if save_path is None:
output = None
else:
f = open_file(save_path, 'a')
out_shape = (trainer.model.out_channels,) + raw.shape
chunks = (1,) + block_shape
output = f.create_dataset(save_key, shape=out_shape, chunks=chunks,
compression='gzip', dtype='float32')
gpu_ids = [int(gpu) if gpu != 'cpu' else gpu for gpu in gpu_ids]
pred = predict_with_halo(
raw, model, gpu_ids, block_shape, halo,
preprocess=normalizer,
output=output
)
if output is not None:
f.close()
return pred
def _visualize(raw, prediction, ground_truth):
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(raw)
viewer.add_image(prediction)
if ground_truth is not None:
viewer.add_labels(ground_truth)
def validate_checkpoint(
checkpoint,
gpu_ids,
save_path=None,
samples=None,
max_samples=None,
visualize=True,
metrics=None,
n_threads=None
):
"""Validate model for the given checkpoint visually and/or via metrics.
"""
if visualize and napari is None:
raise RuntimeError
trainer = get_trainer(checkpoint, device='cpu')
n_threads = trainer.train_loader.num_workers if n_threads is None else n_threads
model = trainer.model
model.eval()
need_gt = metrics is not None
if samples is None:
samples = SampleGenerator(trainer, max_samples, need_gt, n_threads)
else:
assert isinstance(samples, (list, tuple))
if need_gt:
assert all(len(sample, 2) for sample in samples)
else:
assert all(isinstance(sample, np.ndarray) for sample in samples)
results = []
for sample_id, sample in enumerate(samples):
raw, gt = sample if need_gt else sample, None
pred = _predict(model, raw, trainer, gpu_ids, save_path, sample_id)
if visualize:
_visualize(raw, pred, gt)
if metrics is not None:
res = metrics(gt, pred)
results.append(res)
return results
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', required=True,
help="Path to the checkpoint")
parser.add_argument('-g', '--gpus', type=str, nargs='+', required=True)
parser.add_argument('-n', '--max_samples', type=int, default=None)
parser.add_argument('-d', '--data', default=None)
parser.add_argument('-s', '--save_path', default=None)
parser.add_argument('-k', '--key', default=None)
parser.add_argument('-t', '--n_threads', type=int, default=None)
args = parser.parse_args()
# TODO implement loading data
assert args.data is None
validate_checkpoint(args.path, args.gpus, args.save_path,
max_samples=args.max_samples,
n_threads=args.n_threads)
|
{"hexsha": "31d1db6b1e2088ef15d7b0131c9ebd75e0651fc0", "size": 8516, "ext": "py", "lang": "Python", "max_stars_repo_path": "torch_em/util/validation.py", "max_stars_repo_name": "JonasHell/torch-em", "max_stars_repo_head_hexsha": "2e008e0cd2f0ea6681581374fce4f9f47b986d55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-03-09T21:31:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T05:24:26.000Z", "max_issues_repo_path": "torch_em/util/validation.py", "max_issues_repo_name": "JonasHell/torch-em", "max_issues_repo_head_hexsha": "2e008e0cd2f0ea6681581374fce4f9f47b986d55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-03-02T23:19:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T19:43:41.000Z", "max_forks_repo_path": "torch_em/util/validation.py", "max_forks_repo_name": "JonasHell/torch-em", "max_forks_repo_head_hexsha": "2e008e0cd2f0ea6681581374fce4f9f47b986d55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-05-18T08:29:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T12:16:20.000Z", "avg_line_length": 33.1361867704, "max_line_length": 111, "alphanum_fraction": 0.5969938938, "include": true, "reason": "import numpy", "num_tokens": 2021}
|
import sys
import matplotlib.pyplot as plt
from pathlib import Path
from loguru import logger
import numpy as np
sys.path.append("./")
from fcutils.plot.figure import clean_axes
from myterial import blue_grey
from analysis.visuals import plot_probe_electrodes
"""
Running this script will save a figure with the number of units on each recording
from all available recordings.
Figures saved at: D:\Dropbox (UCL)\Rotation_vte\Locomotion\analysis\all_units
"""
def plot_n_units_per_channel(rname, units, rsites, TARGETS):
"""
Plot the number of units on each channel from a single recording, highlighting
channels in specific regoins
"""
logger.info(f"Plotting n units per channel for {rname}")
f, axes = plt.subplots(figsize=(12, 12), ncols=2, sharey=True)
f.suptitle(rname)
f._save_name = f"activity_units_per_channel"
# draw probe
plot_probe_electrodes(rsites, axes[0], TARGETS)
# draw barplot of # units per channel
counts = units.groupby("site_id").count()["name"]
_colors = [
rsites.loc[rsites.site_id == n]["color"].iloc[0] for n in counts.index
]
_regions = [
rsites.loc[rsites.site_id == n]["brain_region"].iloc[0]
for n in counts.index
]
colors = [
c if r in TARGETS else ("k" if r in ("unknown", "OUT") else blue_grey)
for c, r in zip(_colors, _regions)
]
probe_coords = [
rsites.loc[rsites.site_id == n]["probe_coordinates"].iloc[0]
for n in counts.index
]
axes[1].scatter(
counts.values + np.random.normal(0, 0.02, size=len(counts.values)),
probe_coords,
color=colors,
s=100,
lw=1,
ec="k",
)
for x, y in zip(counts.values, probe_coords):
axes[1].plot([0, x], [y, y], color=[0.2, 0.2, 0.2], lw=2, zorder=-1)
# cleanup and save
axes[0].set(
ylabel="Probe position (um)",
xticks=[],
xlim=[0.5, 1.5],
ylim=[0, 8000],
)
axes[1].set(xlabel="# units per channel", ylim=[0, 8000])
clean_axes(f)
return f
if __name__ == "__main__":
import sys
sys.path.append("./")
from data.dbase import db_tables
import pandas as pd
save_fld = Path(
r"D:\Dropbox (UCL)\Rotation_vte\Locomotion\analysis\all_units"
)
recordings = (db_tables.Recording).fetch(as_dict=True)
for recording in recordings:
cf = recording["recording_probe_configuration"]
logger.info("Fetching ephys data")
units = db_tables.Unit.get_session_units(
recording["name"],
cf,
spikes=True,
firing_rate=False,
frate_window=100,
)
units["probe_configuration"] = [cf] * len(units)
rsites = pd.DataFrame(
(
db_tables.Probe.RecordingSite
& recording
& f'probe_configuration="{cf}"'
).fetch()
)
logger.info(f"Found {len(units)} units")
if not len(units):
continue
f = plot_n_units_per_channel(
recording["name"], units, rsites, ["CUN", "PRNc", "PRNr", "GRN"]
)
f.savefig(save_fld / f'{recording["name"]}_units_probe_position.png')
plt.close(f)
# plt.show()
|
{"hexsha": "655a18219a03f0f212253cdbabbdbb6d9e02cd00", "size": 3314, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/ephys/probe_n_units_per_channel.py", "max_stars_repo_name": "FedeClaudi/LocomotionControl", "max_stars_repo_head_hexsha": "1281f7894825096ad212407351463a2105c5152a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/ephys/probe_n_units_per_channel.py", "max_issues_repo_name": "FedeClaudi/LocomotionControl", "max_issues_repo_head_hexsha": "1281f7894825096ad212407351463a2105c5152a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/ephys/probe_n_units_per_channel.py", "max_forks_repo_name": "FedeClaudi/LocomotionControl", "max_forks_repo_head_hexsha": "1281f7894825096ad212407351463a2105c5152a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6166666667, "max_line_length": 86, "alphanum_fraction": 0.6001810501, "include": true, "reason": "import numpy", "num_tokens": 871}
|
One of the many Hotels in Davis. Amenties include Wifi Hot Spots Wireless Internet and cable TV.
University B&B is closed June 29, 2006.
|
{"hexsha": "d7980641e841efea31047973a2718ff3a537478d", "size": 141, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/University_Bed_and_Breakfast.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/University_Bed_and_Breakfast.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/University_Bed_and_Breakfast.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.1428571429, "max_line_length": 96, "alphanum_fraction": 0.7659574468, "num_tokens": 36}
|
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
import math
from utils.tools import make_positions
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(input, self.padding_idx) if positions is None else positions
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class LinearNorm(nn.Module):
""" LinearNorm Projection """
def __init__(self, in_features, out_features, bias=False):
super(LinearNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(self.linear.weight)
if bias:
nn.init.constant_(self.linear.bias, 0.0)
def forward(self, x):
x = self.linear(x)
return x
class ConvBlock(nn.Module):
""" Convolutional Block """
def __init__(self, in_channels, out_channels, kernel_size, dropout, activation=nn.ReLU()):
super(ConvBlock, self).__init__()
self.conv_layer = nn.Sequential(
ConvNorm(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
padding=int((kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
),
nn.BatchNorm1d(out_channels),
activation
)
self.dropout = dropout
self.layer_norm = nn.LayerNorm(out_channels)
def forward(self, enc_input, mask=None):
enc_output = enc_input.contiguous().transpose(1, 2)
enc_output = F.dropout(self.conv_layer(enc_output), self.dropout, self.training)
enc_output = self.layer_norm(enc_output.contiguous().transpose(1, 2))
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
return enc_output
class ConvNorm(nn.Module):
""" 1D Convolution """
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
add_bias_kv=False, add_zero_attn=False, self_attention=False,
encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, "Self-attention requires query, key and " \
"value to be of the same size"
if self.qkv_same_dim:
self.in_proj_weight = nn.Parameter(torch.Tensor(3 * embed_dim, embed_dim))
else:
self.k_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self.vdim))
self.q_proj_weight = nn.Parameter(torch.Tensor(embed_dim, embed_dim))
if bias:
self.in_proj_bias = nn.Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = nn.Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = nn.Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.enable_torch_version = False
if hasattr(F, "multi_head_attention_forward"):
self.enable_torch_version = True
else:
self.enable_torch_version = False
self.last_attn_probs = None
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
else:
nn.init.xavier_uniform_(self.k_proj_weight)
nn.init.xavier_uniform_(self.v_proj_weight)
nn.init.xavier_uniform_(self.q_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query, key, value,
key_padding_mask=None,
incremental_state=None,
need_weights=True,
static_kv=False,
attn_mask=None,
before_softmax=False,
need_head_weights=False,
enc_dec_attn_constraint_mask=None,
reset_attn_weight=None
):
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None:
if self.qkv_same_dim:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
self.in_proj_weight,
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask)
else:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
torch.empty([0]),
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
if incremental_state is not None:
print("Not implemented error.")
exit()
else:
saved_state = None
if self.self_attention:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k = self.in_proj_k(key)
v = self.in_proj_v(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if saved_state is not None:
print("Not implemented error.")
exit()
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
if len(attn_mask.shape) == 2:
attn_mask = attn_mask.unsqueeze(0)
elif len(attn_mask.shape) == 3:
attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
bsz * self.num_heads, tgt_len, src_len)
attn_weights = attn_weights + attn_mask
if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
-1e9,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-1e9,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
if reset_attn_weight is not None:
if reset_attn_weight:
self.last_attn_probs = attn_probs.detach()
else:
assert self.last_attn_probs is not None
attn_probs = self.last_attn_probs
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
else:
attn_weights = None
return attn, (attn_weights, attn_logits)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_q(self, query):
if self.qkv_same_dim:
return self._in_proj(query, end=self.embed_dim)
else:
bias = self.in_proj_bias
if bias is not None:
bias = bias[:self.embed_dim]
return F.linear(query, self.q_proj_weight, bias)
def in_proj_k(self, key):
if self.qkv_same_dim:
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
else:
weight = self.k_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[self.embed_dim:2 * self.embed_dim]
return F.linear(key, weight, bias)
def in_proj_v(self, value):
if self.qkv_same_dim:
return self._in_proj(value, start=2 * self.embed_dim)
else:
weight = self.v_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[2 * self.embed_dim:]
return F.linear(value, weight, bias)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
return attn_weights
class Swish(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class CustomSwish(nn.Module):
def forward(self, input_tensor):
return Swish.apply(input_tensor)
class TransformerFFNLayer(nn.Module):
def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act="gelu"):
super().__init__()
self.kernel_size = kernel_size
self.dropout = dropout
self.act = act
if padding == "SAME":
self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
elif padding == "LEFT":
self.ffn_1 = nn.Sequential(
nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
nn.Conv1d(hidden_size, filter_size, kernel_size)
)
self.ffn_2 = Linear(filter_size, hidden_size)
if self.act == "swish":
self.swish_fn = CustomSwish()
def forward(self, x, incremental_state=None):
# x: T x B x C
if incremental_state is not None:
assert incremental_state is None, "Nar-generation does not allow this."
exit(1)
x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
x = x * self.kernel_size ** -0.5
if incremental_state is not None:
x = x[-1:]
if self.act == "gelu":
x = F.gelu(x)
if self.act == "relu":
x = F.relu(x)
if self.act == "swish":
x = self.swish_fn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.ffn_2(x)
return x
class BatchNorm1dTBC(nn.Module):
def __init__(self, c):
super(BatchNorm1dTBC, self).__init__()
self.bn = nn.BatchNorm1d(c)
def forward(self, x):
"""
:param x: [T, B, C]
:return: [T, B, C]
"""
x = x.permute(1, 2, 0) # [B, C, T]
x = self.bn(x) # [B, C, T]
x = x.permute(2, 0, 1) # [T, B, C]
return x
class EncSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1,
relu_dropout=0.1, kernel_size=9, padding="SAME", norm="ln", act="gelu"):
super().__init__()
self.c = c
self.dropout = dropout
self.num_heads = num_heads
if num_heads > 0:
if norm == "ln":
self.layer_norm1 = LayerNorm(c)
elif norm == "bn":
self.layer_norm1 = BatchNorm1dTBC(c)
self.self_attn = MultiheadAttention(
self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False,
)
if norm == "ln":
self.layer_norm2 = LayerNorm(c)
elif norm == "bn":
self.layer_norm2 = BatchNorm1dTBC(c)
self.ffn = TransformerFFNLayer(
c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act)
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get("layer_norm_training", None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
if self.num_heads > 0:
residual = x
x = self.layer_norm1(x)
x, _, = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
residual = x
x = self.layer_norm2(x)
x = self.ffn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
return x
class Mish(nn.Module):
def forward(self, x):
return x * torch.tanh(F.softplus(x))
class DiffusionEmbedding(nn.Module):
""" Diffusion Step Embedding """
def __init__(self, d_denoiser):
super(DiffusionEmbedding, self).__init__()
self.dim = d_denoiser
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class ResidualBlock(nn.Module):
""" Residual Block """
def __init__(self, d_encoder, residual_channels, dropout):
super(ResidualBlock, self).__init__()
self.conv_layer = ConvNorm(
residual_channels,
2 * residual_channels,
kernel_size=3,
stride=1,
padding=int((3 - 1) / 2),
dilation=1,
)
self.diffusion_projection = LinearNorm(residual_channels, residual_channels)
self.conditioner_projection = ConvNorm(
d_encoder, 2 * residual_channels, kernel_size=1
)
self.output_projection = ConvNorm(
residual_channels, 2 * residual_channels, kernel_size=1
)
def forward(self, x, conditioner, diffusion_step, mask=None):
diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
conditioner = self.conditioner_projection(conditioner)
y = x + diffusion_step
y = self.conv_layer(y) + conditioner
gate, filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(filter)
y = self.output_projection(y)
residual, skip = torch.chunk(y, 2, dim=1)
return (x + residual) / math.sqrt(2.0), skip
|
{"hexsha": "5e6bc55e268372029e7f57bc85f8e7bb87ff1abd", "size": 25543, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/blocks.py", "max_stars_repo_name": "ishine/DiffSinger", "max_stars_repo_head_hexsha": "d5dbe05ee1c7da0878393c73129089a67d0fe935", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/blocks.py", "max_issues_repo_name": "ishine/DiffSinger", "max_issues_repo_head_hexsha": "d5dbe05ee1c7da0878393c73129089a67d0fe935", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/blocks.py", "max_forks_repo_name": "ishine/DiffSinger", "max_forks_repo_head_hexsha": "d5dbe05ee1c7da0878393c73129089a67d0fe935", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.618556701, "max_line_length": 115, "alphanum_fraction": 0.5806287437, "include": true, "reason": "import numpy", "num_tokens": 5782}
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
""" Utility for storing common lib and data structures """
import math
from collections import namedtuple
from itertools import product
import numpy as np
import simplejson as json
__author__ = 'Ari Saha (arisaha@icloud.com)'
__date__ = 'Wednesday, March 14th 2018, 2:31:37 pm'
APPS_DICT = {"web": 0.25, "video": 2.0, "voice": 0.1, "others": 0.05}
NEIGHBORING_APS = namedtuple('NEIGHBORING_APS', ['within_grid', 'rest'])
class AP:
def __init__(self,
ap_id=0,
location=None,
n_ues=None,
ues_meeting_sla=None,
max_connections=50,
uplink_bandwidth=25.0,
channel_bandwidth=10.0,
):
# Id of the AP
self.ap_id = ap_id
# location of the AP
self.location = location
# number of UEs currently connected to the AP
# Dictionary with App_type as keys and list of ue_id as values
self.n_ues = self._initialize_n_ues()
# number of UEs meeting their SLAs
self.ues_meeting_sla = self._initialize_ues_slas()
# maximum connections AP can have
self.max_connections = max_connections
# uplink bandwidth of the AP
self.uplink_bandwidth = uplink_bandwidth
# channel bandwidth of the AP
self.channel_bandwidth = channel_bandwidth
def _initialize_n_ues(self):
"""
Helper to setup an empty dictionary with type of Apps as keys.
{"web": set(), "voice": set(), "video": set(), "others": set()}
"""
return {key: set() for key in APPS_DICT.keys()}
def _initialize_ues_slas(self):
"""
Helper to setup an empty dictionary with type of Apps as keys.
{"web": 0, "voice": 0, "video": 0, "others": 0}
"""
return {key: 0 for key in APPS_DICT.keys()}
@property
def to_dict(self):
"""
Formats class AP to a dict
"""
return self.__dict__
def __repr__(self):
"""
Helper to represent AP in the form of:
"AP {'ap_id: 4, 'location': (x, y), 'n_ues': 154}
"""
return "<AP {}>".format(self.to_dict)
class UE:
def __init__(self,
ue_id=0,
ap=0,
location=None,
app=None,
required_bandwidth=0,
neighboring_aps=None,
distance=0,
throughput=0,
sla=1,
signal_power=-100,
):
# Id of the UE
self.ue_id = ue_id
# The access Point (AP) UE is conncted to. AP is identified by its
# location
self.ap = ap
# Location of the UE (used for calculating sig_power)
self.location = location
# Type of application (Web/Video) the UE is running currently
self.app = app
# Required bandwidth for the UE based on the APP
self.required_bandwidth = required_bandwidth
# List of neighboring APs
self.neighboring_aps = neighboring_aps
# Distance between AP and UE
self.distance = distance
# UE's Throughput
self.throughput = throughput
# SLA. Default is meets SLA (1)
self.sla = sla
# Signal power between UE and AP
self.signal_power = signal_power
@property
def to_dict(self):
"""
Formats class UE to a dict
"""
return self.__dict__
@property
def to_json(self):
"""
Formats class UE to a json serializable format
"""
return json.dumps(
self, default=lambda o: o.to_dict, sort_keys=True, indent=4)
def __repr__(self):
"""
Helper to represent UE in the form of:
"<UE {'ud_id': 1, 'location': (x, y), 'ap': 4}>
"""
return "<UE {}>".format(self.to_dict)
def get_ue_app():
"""
Function to randomly generate apps for UE and returns app_type and required
bandwidth
"""
prob = np.around(np.random.rand(), decimals=3)
# 70% of UEs are running "web" application
if prob < 0.7:
return "web"
# rest are running "video"
return "video"
def get_random_location(_min, _max):
"""
Function to generate random (x, y) between min and max
"""
xloc = np.random.randint(_min, _max)
yloc = np.random.randint(_min, _max)
return (xloc, yloc)
def get_center_grid(scale, aps_per_axis):
"""
Function to generate random x and y within 1.5*scale of radius
"""
mid_point = sum(aps_per_axis) / len(aps_per_axis)
_min = mid_point - 1.5*scale
_max = mid_point + 1.5*scale
return get_random_location(_min, _max)
def get_ue_location(app_type, scale, aps_per_axis):
"""
Function to generate location for UE based on the app.
UEs running video based apps will be placed in the center for the grid.
This is designed so as to simulate 'high traffic load' in certain parts
of the network which will force a handoff to neighboring APs.
Args:
app_type: (string):
Type of application UE is running.
scale: (float):
Scale of each grid. e.g. 100.0 => Each grid is of 100.0 units
aps_per_axis: (list):
List of points in X-axis where APs are located.
Returns:
location: (tuple):
Tuple of X and Y in the grid.
"""
if app_type == "video":
# place in within the center of the grid
return get_center_grid(scale, aps_per_axis)
# place it anywhere on the grid
return get_random_location(0, (1 + (2 * len(aps_per_axis)) * scale))
def get_interval(value, num_list):
"""
Helper to find the interval within which the value lies
"""
if value < num_list[0]:
return (num_list[0], num_list[0])
if value > num_list[-1]:
return (num_list[-1], num_list[-1])
if value == num_list[0]:
return (num_list[0], num_list[1])
if value == num_list[-1]:
return (num_list[-2], num_list[-1])
for index, num in enumerate(num_list):
if value <= num:
return (num_list[index - 1], num_list[index])
def get_aps_in_grid(ue_location, aps_per_axis):
"""
Function to retrieve a list of neighboring APs in the grid of the UE.
"""
_min, _max = ue_location[0], ue_location[1]
_min_interval = get_interval(_min, aps_per_axis)
_max_interval = get_interval(_max, aps_per_axis)
return list(set(product(_min_interval, _max_interval)))
def valid_ap(ap, aps_per_axis):
"""
Helper to validate ap
"""
ap_x, ap_y = ap
return (ap_x in aps_per_axis and ap_y in aps_per_axis)
def get_valid_neighbors(ap, aps_per_axis):
"""
Helper to return only valid neighbors
"""
scale = aps_per_axis[1] - aps_per_axis[0]
_aps = [
(ap[0] - scale, ap[1]),
(ap[0] + scale, ap[1]),
(ap[0], ap[1] - scale),
(ap[0], ap[1] + scale)]
valid_aps = []
for ap in _aps:
if valid_ap(ap, aps_per_axis):
valid_aps.append(ap)
return valid_aps
def get_extended_neighboring_aps(closest_aps, aps_per_axis, radius):
"""
Function to search for All APs within a given radius from the closest APs.
"""
if not radius:
return closest_aps
all_aps = set(closest_aps)
for ap in closest_aps:
all_aps.update(get_valid_neighbors(ap, aps_per_axis))
return get_extended_neighboring_aps(
list(all_aps), aps_per_axis, radius - 1)
def get_neighboring_aps(ue_location, aps_per_axis, radius=1):
"""
Function to retrieve a list of neighboring APs with a given radius
around the UE.
"""
neighboring_aps_in_grid = get_aps_in_grid(ue_location, aps_per_axis)
rest = set()
if radius > 1:
rest.update(get_extended_neighboring_aps(
neighboring_aps_in_grid, aps_per_axis, radius - 1))
rest -= set(neighboring_aps_in_grid)
return NEIGHBORING_APS(
within_grid=neighboring_aps_in_grid, rest=list(rest))
def get_ue_ap_distance(ap_location, ue_location):
"""
Function to calculate distance between UE and AP
"""
ap_location = np.array(ap_location)
ue_location = np.array(ue_location)
return np.around(
np.linalg.norm(ap_location - ue_location), decimals=3)
def get_closest_ap_location(neighboring_aps, ue_location):
"""
Function that returns closest AP's location from the neighboring ap list
"""
closest_ap = neighboring_aps[0]
min_distance = get_ue_ap_distance(closest_ap, ue_location)
for ap_location in neighboring_aps[1:]:
distance = get_ue_ap_distance(ap_location, ue_location)
if distance < min_distance:
min_distance = distance
closest_ap = ap_location
return closest_ap
def get_ue_ap(ue_location, aps_per_axis, radius):
"""
Function to retrive the closest AP to the UE
"""
neighboring_aps = get_neighboring_aps(ue_location, aps_per_axis, radius)
closest_ap_location = get_closest_ap_location(
neighboring_aps.within_grid, ue_location)
all_neighboring_aps = neighboring_aps.within_grid + neighboring_aps.rest
return (closest_ap_location, all_neighboring_aps)
def calculate_distance_factor(ue_ap_distance, scale):
"""
Function to calculate distance factor
"""
return np.around(
(math.exp(-(ue_ap_distance)/(2 * scale))), decimals=3)
def calculate_radio_bandwidth(distance_factor, ap_channel_bandwidth):
"""
Function to calculate radio bandwidth of the AP
"""
# calculate radio bandwidth
return np.around((distance_factor * ap_channel_bandwidth), decimals=3)
def calculate_network_bandwidth(n_ues_on_ap, ap_uplink_bandwidth):
"""
Function to calculate network bandwidth
"""
# Ap factor
ap_factor = 1
# to avoid ZeroDivisionError
if n_ues_on_ap:
ap_factor /= n_ues_on_ap
# network bandwidth
return np.around((
ap_factor * ap_uplink_bandwidth), decimals=3)
def get_ue_throughput(scale,
ue_ap_distance,
n_ues_on_ap,
ap_uplink_bandwidth,
ap_channel_bandwidth,
app_required_bandwidth):
"""
Function to calculate throughput of UE
"""
distance_factor = calculate_distance_factor(ue_ap_distance, scale)
radio_bandwidth = calculate_radio_bandwidth(
distance_factor, ap_channel_bandwidth)
network_bandwidth = calculate_network_bandwidth(
n_ues_on_ap, ap_uplink_bandwidth)
return min(radio_bandwidth, network_bandwidth, app_required_bandwidth)
def get_ue_sig_power(ue_ap_distance):
"""
Function to calculate signal power between the UE and AP
"""
# To avoid ZeroDivisionError
if ue_ap_distance:
distance = (10 * math.log10(1 / math.pow(ue_ap_distance, 2)))
# discretizing the distance
distance /= 10
return round(distance)
def get_ue_sla(ue_throughput, ue_required_bandwidth):
"""
Function to calculate UE's SLA
"""
return int(ue_throughput >= ue_required_bandwidth)
def main():
"""
Test locally!
"""
ap_list = list(range(100, 900, 200))
print(ap_list)
assert get_interval(345, ap_list) == (300, 500)
ue_location = (345, 567)
neighboring_aps = get_aps_in_grid(ue_location, ap_list)
print(neighboring_aps)
print(get_ue_ap_distance(neighboring_aps[0], ue_location))
closest_ap = get_closest_ap_location(
neighboring_aps, ue_location)
print(closest_ap)
(closest_ap, neighboring_aps) = get_ue_ap(ue_location, ap_list, 1)
print(neighboring_aps)
print(closest_ap)
print("valid neighbors")
print(get_valid_neighbors((500, 700), ap_list))
print("Testing extended_neighboring_aps")
print(get_extended_neighboring_aps(
[(500, 500), (300, 700), (300, 500), (500, 700)], ap_list, 2))
print("radius: 1")
print(get_neighboring_aps(ue_location, ap_list, radius=1))
print("radius: 2")
print(get_neighboring_aps(ue_location, ap_list, radius=2))
print("radius: 3")
print(get_neighboring_aps(ue_location, ap_list, radius=3))
print("radius: 4")
print(get_neighboring_aps(ue_location, ap_list, radius=4))
print("radius: 5")
print(get_neighboring_aps(ue_location, ap_list, radius=5))
print("radius: 6")
print(get_neighboring_aps(ue_location, ap_list, radius=6))
print(get_center_grid(100, ap_list))
ue_ap_distance = 441.367
assert calculate_distance_factor(ue_ap_distance, 100) == 0.11
assert calculate_radio_bandwidth(0.11, 10.0) == 1.1
assert calculate_network_bandwidth(58, 50.0) == 0.862
assert get_ue_throughput(100, 441.367, 58, 50.0, 10.0, 0.25) == 0.25
print(get_ue_sig_power(ue_ap_distance))
return True
if __name__ == '__main__':
main()
|
{"hexsha": "1e767451bedee75bd0182d975de385a08042269a", "size": 13021, "ext": "py", "lang": "Python", "max_stars_repo_path": "rainman2/lib/environment/cellular/dev/utils.py", "max_stars_repo_name": "att-innovate/rainman2", "max_stars_repo_head_hexsha": "edd07c03a9d33a2e44b3a333fc28dc73c8cbe56e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-06-19T16:52:25.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-25T22:05:38.000Z", "max_issues_repo_path": "rainman2/lib/environment/cellular/dev/utils.py", "max_issues_repo_name": "att-innovate/rainman2", "max_issues_repo_head_hexsha": "edd07c03a9d33a2e44b3a333fc28dc73c8cbe56e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rainman2/lib/environment/cellular/dev/utils.py", "max_forks_repo_name": "att-innovate/rainman2", "max_forks_repo_head_hexsha": "edd07c03a9d33a2e44b3a333fc28dc73c8cbe56e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-15T03:06:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-15T03:06:08.000Z", "avg_line_length": 29.5260770975, "max_line_length": 79, "alphanum_fraction": 0.6354350664, "include": true, "reason": "import numpy", "num_tokens": 3368}
|
__precompile__()
module JFVM
# global mumps_solver
# using PyPlot
try
import MUMPS
global mumps_solver = MUMPS
catch
@info "MUMPS solver (optional) is not available."
end
using SparseArrays, FFTW
# using PyCall
# I prefer not to use the following command for the issues that it has on windows machines
# pygui_start(:wx)
# mayavis=0
# try
# @pyimport mayavi.mlab as m
# mayavis=m
# catch
# warn("Mayavi is not installed or could not be imported.")
# end
import Base: +, -, *, /, ^, ==, >, >=, <, <=, broadcast, sin, cos, tan, cot, abs, exp, log, log10
export MeshStructure, BoundaryCondition, CellValue, FaceValue, CellVector,
arithmeticMean, geometricMean, harmonicMean, upwindMean, linearMean,
tvdMean, createBC, boundaryConditionTerm, cellBoundary!, solvePDE,
divergenceTerm, gradientTerm, convectionUpwindTerm, createCellVector,
convectionTerm, convectionTvdTerm, diffusionTerm, createCellVariable,
createFaceVariable, copyCell, fluxLimiter, createMesh1D,
createMesh2D, createMesh3D, createMeshRadial2D, createMeshCylindrical2D,
createMeshCylindrical3D, createMeshCylindrical1D, solveLinearPDE,
linearSourceTerm, constantSourceTerm, transientTerm,
solveMUMPSLinearPDE, faceEval, cellEval, permfieldlogrndg, permfieldlogrnde,
JFVM_test, solveExplicitPDE, reshapeCell,
cellVolume, reshapeInternalCell, internalCells, domainInt, convectionTvdRHS,
linearMean!, update!, solveLinearPDE!
# visualizeCells, visualizeCellVectors, plot, imshow, xlabel, ylabel, figure, legend, pcolor, contour, colorbar,
include("fvmToolTypes.jl")
include("meshstructure.jl")
include("boundarycondition.jl")
include("domainVariables.jl")
include("diffusionterms.jl")
include("transientTerms.jl")
include("domainOperators.jl")
include("convectionTerms.jl")
include("averagingTerms.jl")
include("calculusTerms.jl")
include("sourceTerms.jl")
include("solveVisualizePDE.jl")
include("JFVMtools.jl")
include("jfvm_test.jl")
end # module
|
{"hexsha": "d255c54a2d03af51f7b92e325b1db1f1c7ce17b4", "size": 2027, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/JFVM.jl", "max_stars_repo_name": "simulkade/JFVM", "max_stars_repo_head_hexsha": "3e6bf931a430c4e4ccb7ca7824a947c6b09f8fe2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2015-11-15T00:56:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T00:03:15.000Z", "max_issues_repo_path": "src/JFVM.jl", "max_issues_repo_name": "simulkade/JFVM.jl", "max_issues_repo_head_hexsha": "3e6bf931a430c4e4ccb7ca7824a947c6b09f8fe2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2015-05-22T11:09:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-12T08:21:29.000Z", "max_forks_repo_path": "src/JFVM.jl", "max_forks_repo_name": "simulkade/JFVM", "max_forks_repo_head_hexsha": "3e6bf931a430c4e4ccb7ca7824a947c6b09f8fe2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2015-05-11T00:37:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-06T14:17:30.000Z", "avg_line_length": 34.3559322034, "max_line_length": 119, "alphanum_fraction": 0.7523433646, "num_tokens": 566}
|
[STATEMENT]
lemma lset_P_V [simp]: "lset P \<subseteq> V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lset P \<subseteq> V
[PROOF STEP]
by (simp add: valid_path_in_V)
|
{"llama_tokens": 77, "file": "Parity_Game_ParityGame", "length": 1}
|
module Searching
export bfs_parents, bfs_tree, dfs_parents, dfs_tree
using ...CSetDataStructures, ..BasicGraphs
"""
tree(parents)
Convert a parents array into a directed graph.
"""
function tree(parents::AbstractVector{Int})
n = T(length(parents))
t = Graph(n)
for (v, u) in enumerate(parents)
if u > 0 && u != v
add_edge!(t, u, v)
end
end
return t
end
"""
bfs_parents(g, s[; dir=:out])
Perform a breadth-first search of graph `g` starting from vertex `s`.
Return a vector of parent vertices indexed by vertex. If `dir` is specified,
use the corresponding edge direction (`:in` and `:out` are acceptable values).
### Performance
This implementation is designed to perform well on large graphs. There are
implementations which are marginally faster in practice for smaller graphs,
but the performance improvements using this implementation on large graphs
can be significant.
"""
bfs_parents(g::ACSet, s::Int; dir = :out) =
(dir == :out) ? _bfs_parents(g, s, outneighbors) : _bfs_parents(g, s, inneighbors)
function _bfs_parents(g::ACSet, source, neighborfn::Function)
n = nv(g)
visited = falses(n)
parents = zeros(Int, nv(g))
cur_level = Int[]
sizehint!(cur_level, n)
next_level = Int[]
sizehint!(next_level, n)
@inbounds for s in source
visited[s] = true
push!(cur_level, s)
parents[s] = s
end
while !isempty(cur_level)
@inbounds for v in cur_level
@inbounds @simd for i in neighborfn(g, v)
if !visited[i]
push!(next_level, i)
parents[i] = v
visited[i] = true
end
end
end
empty!(cur_level)
cur_level, next_level = next_level, cur_level
sort!(cur_level)
end
return parents
end
"""
bfs_tree(g, s[; dir=:out])
Provide a breadth-first traversal of the graph `g` starting with source vertex `s`,
and return a directed acyclic graph of vertices in the order they were discovered.
If `dir` is specified, use the corresponding edge direction (`:in` and `:out` are
acceptable values).
"""
bfs_tree(g::ACSet, s::Integer; dir = :out) = tree(bfs_parents(g, s; dir = dir))
"""
dfs_parents(g, s[; dir=:out])
Perform a depth-first search of graph `g` starting from vertex `s`.
Return a vector of parent vertices indexed by vertex. If `dir` is specified,
use the corresponding edge direction (`:in` and `:out` are acceptable values).
### Implementation Notes
This version of DFS is iterative.
"""
dfs_parents(g::ACSet, s::Integer; dir=:out) =
(dir == :out) ? _dfs_parents(g, s, outneighbors) : _dfs_parents(g, s, inneighbors)
function _dfs_parents(g::ACSet, s::Int, neighborfn::Function)
parents = zeros(Int, nv(g))
seen = zeros(Bool, nv(g))
S = [s]
seen[s] = true
parents[s] = s
while !isempty(S)
v = S[end]
u = 0
for n in neighborfn(g, v)
if !seen[n]
u = n
break
end
end
if u == 0
pop!(S)
else
seen[u] = true
push!(S, u)
parents[u] = v
end
end
return parents
end
"""
dfs_tree(g, s)
Return a directed acyclic graph based on
depth-first traversal of the graph `g` starting with source vertex `s`.
"""
dfs_tree(g::AbstractGraph, s::Integer; dir=:out) = tree(dfs_parents(g, s; dir=dir))
end
|
{"hexsha": "461d1d6b29523dc479300e0d6afe246ee8d0aa5e", "size": 3486, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/graphs/Searching.jl", "max_stars_repo_name": "slwu89/Catlab.jl", "max_stars_repo_head_hexsha": "d197b0c12c65fe72198baf9c990e6a4e1f3aebe0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2017-06-10T09:57:09.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-27T06:22:19.000Z", "max_issues_repo_path": "src/graphs/Searching.jl", "max_issues_repo_name": "slwu89/Catlab.jl", "max_issues_repo_head_hexsha": "d197b0c12c65fe72198baf9c990e6a4e1f3aebe0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2017-06-23T01:13:31.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-26T22:17:18.000Z", "max_forks_repo_path": "src/graphs/Searching.jl", "max_forks_repo_name": "epatters/CompCat", "max_forks_repo_head_hexsha": "d197b0c12c65fe72198baf9c990e6a4e1f3aebe0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6666666667, "max_line_length": 86, "alphanum_fraction": 0.6156052783, "num_tokens": 921}
|
@testset "reshape" begin
test_rrule(reshape, rand(4, 5), (2, 10) ⊢ nothing)
test_rrule(reshape, rand(4, 5), 2 ⊢ nothing, 10 ⊢ nothing)
end
@testset "hcat" begin
A = randn(3, 2)
B = randn(3)
C = randn(3, 3)
test_rrule(hcat, A, B, C; check_inferred=false)
end
@testset "reduce hcat" begin
A = randn(3, 2)
B = randn(3, 1)
C = randn(3, 3)
test_rrule(reduce, hcat ⊢ nothing, [A, B, C])
end
@testset "vcat" begin
A = randn(2, 4)
B = randn(1, 4)
C = randn(3, 4)
test_rrule(vcat, A, B, C; check_inferred=false)
end
@testset "reduce vcat" begin
A = randn(2, 4)
B = randn(1, 4)
C = randn(3, 4)
test_rrule(reduce, vcat ⊢ nothing, [A, B, C])
end
@testset "fill" begin
test_rrule(fill, 44.0, 4 ⊢ nothing; check_inferred=false)
test_rrule(fill, 2.0, (3, 3, 3) ⊢ nothing)
end
|
{"hexsha": "26d000bd3016fc9a1fcedb8c69d8ffb175c99993", "size": 846, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/rulesets/Base/array.jl", "max_stars_repo_name": "DhairyaLGandhi/ChainRules.jl", "max_stars_repo_head_hexsha": "76ef95c326e773c6c7140fb56eb2fd16a2af468b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/rulesets/Base/array.jl", "max_issues_repo_name": "DhairyaLGandhi/ChainRules.jl", "max_issues_repo_head_hexsha": "76ef95c326e773c6c7140fb56eb2fd16a2af468b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/rulesets/Base/array.jl", "max_forks_repo_name": "DhairyaLGandhi/ChainRules.jl", "max_forks_repo_head_hexsha": "76ef95c326e773c6c7140fb56eb2fd16a2af468b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2631578947, "max_line_length": 62, "alphanum_fraction": 0.5898345154, "num_tokens": 361}
|
module sqlite
use iso_c_binding, only: c_int
implicit none
private c_int
include "constants.f90"
interface
function sqlite3_bind_text(stmt, index, text, bytes, destructor) bind(c)
use iso_c_binding, only: c_int, c_ptr
type(c_ptr), value :: stmt, text, destructor
integer(c_int), value :: index, bytes
integer(c_int) sqlite3_bind_text
end function sqlite3_bind_text
function sqlite3_close(db) bind(c)
use iso_c_binding, only: c_int, c_ptr
type(c_ptr), value :: db
integer(c_int) sqlite3_close
end function sqlite3_close
function sqlite3_column_double(stmt, col) bind(c)
use iso_c_binding, only: c_double, c_int, c_ptr
type(c_ptr), value :: stmt
integer(c_int), value :: col
real(c_double) sqlite3_column_double
end function sqlite3_column_double
function sqlite3_column_int(stmt, col) bind(c)
use iso_c_binding, only: c_int, c_ptr
type(c_ptr), value :: stmt
integer(c_int), value :: col
integer(c_int) sqlite3_column_int
end function sqlite3_column_int
function sqlite3_column_text(stmt, col) bind(c)
use iso_c_binding, only: c_int, c_ptr
type(c_ptr), value :: stmt
integer(c_int), value :: col
type(c_ptr) sqlite3_column_text
end function sqlite3_column_text
function sqlite3_errmsg(db) bind(c)
use iso_c_binding, only: c_ptr
type(c_ptr), value :: db
type(c_ptr) sqlite3_errmsg
end function sqlite3_errmsg
function sqlite3_finalize(stmt) bind(c)
use iso_c_binding, only: c_int, c_ptr
type(c_ptr), value :: stmt
integer(c_int) sqlite3_finalize
end function sqlite3_finalize
function sqlite3_open(filename, db) bind(c)
use iso_c_binding, only: c_int, c_ptr
type(c_ptr), value :: filename, db
integer(c_int) sqlite3_open
end function sqlite3_open
function sqlite3_prepare_v2(db, sql, bytes, stmt, tail) bind(c)
use iso_c_binding, only: c_int, c_ptr
type(c_ptr), value :: db, sql, stmt, tail
integer(c_int), value :: bytes
integer(c_int) sqlite3_prepare_v2
end function sqlite3_prepare_v2
function sqlite3_step(stmt) bind(c)
use iso_c_binding, only: c_int, c_ptr
type(c_ptr), value :: stmt
integer(c_int) sqlite3_step
end function sqlite3_step
end interface
end module sqlite
|
{"hexsha": "ea301a28f5e87e3a50b4662c3ba4f8c148fd9c0b", "size": 2442, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "sources/sqlite.f90", "max_stars_repo_name": "dram/fortran-sqlite", "max_stars_repo_head_hexsha": "9912de17549db247cbcf9768a462e52ed7907af0", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sources/sqlite.f90", "max_issues_repo_name": "dram/fortran-sqlite", "max_issues_repo_head_hexsha": "9912de17549db247cbcf9768a462e52ed7907af0", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sources/sqlite.f90", "max_forks_repo_name": "dram/fortran-sqlite", "max_forks_repo_head_hexsha": "9912de17549db247cbcf9768a462e52ed7907af0", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7142857143, "max_line_length": 77, "alphanum_fraction": 0.6781326781, "num_tokens": 635}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 04 10:35:33 2018
@author: ldh
"""
# utils.py
import datetime as dt
import numpy as np
import pandas as pd
def array_decorator(func):
return np.frompyfunc(func,1,1)
@array_decorator
def time_matlab2py(date_time_ordinal):
'''
Convert matlab format of time or datetime to the format of python.
Accurate to 1 second.
Parameters
-----------
date_time_ordinal
the matlab format of ordianl time.
Returns
--------
python normal format of time like YYYY-mm-dd HH:MM:SS
Notes
-------
0 736819.593056 600340 36.4200 2017-05-04 14:14:00
1 736819.593750 600340 36.3999 2017-05-04 14:15:00
2 736819.594444 600340 36.3500 2017-05-04 14:16:00
3 736819.595139 600340 36.3100 2017-05-04 14:17:00
4 736819.595833 600340 36.2299 2017-05-04 14:18:00
5 736819.596528 600340 36.1899 2017-05-04 14:19:00
'''
int_part = int(date_time_ordinal)
float_part = date_time_ordinal - int_part
seconds = int(float_part * 24 * 60 * 60) + 1
date_part = dt.datetime.fromordinal(int_part - 366 )
time_part = dt.timedelta(seconds = seconds)
date_time = date_part + time_part
time_adjustor = pd.offsets.DateOffset(second = 0,microsecond = 0)
date_time = date_time + time_adjustor
return date_time
@array_decorator
def time_py2matlab(date_time):
'''
Convert normal format of time to matlab ordianl time.
Accurate to 1 second.
Parameters
-----------
date_time
the normal datetime like '2017-05-04 14:15:36'
Returns
---------
matlab ordinal format of time.
'''
try:
date_time_obj = dt.datetime.strptime(date_time,'%Y-%m-%d %H:%M:%S')
except:
date_time_obj = dt.datetime.strptime(date_time,'%Y%m%d %H:%M:%S')
int_part = date_time_obj.date().toordinal() + 366
time_part = date_time_obj.time()
float_part = time_part.hour * 60.0 * 60.0 + time_part.minute * 60.0 + time_part.second
float_part = float_part / 86400.0
return int_part + float_part
if __name__ == '__main__':
a = time_py2matlab(pd.Series(['2017-05-04 14:15:36','2017-05-04 14:18:00']))
b = time_py2matlab(['2017-05-04 14:15:36','2017-05-04 14:18:00'])
|
{"hexsha": "a83807024b1c264ffd6da45d52cca5350f23f68a", "size": 2344, "ext": "py", "lang": "Python", "max_stars_repo_path": "matlab_convert/utils.py", "max_stars_repo_name": "orxg/helper", "max_stars_repo_head_hexsha": "6cbad158213028e64407c8ef0fd4e66a9aff9917", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "matlab_convert/utils.py", "max_issues_repo_name": "orxg/helper", "max_issues_repo_head_hexsha": "6cbad158213028e64407c8ef0fd4e66a9aff9917", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matlab_convert/utils.py", "max_forks_repo_name": "orxg/helper", "max_forks_repo_head_hexsha": "6cbad158213028e64407c8ef0fd4e66a9aff9917", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5764705882, "max_line_length": 90, "alphanum_fraction": 0.6296928328, "include": true, "reason": "import numpy", "num_tokens": 768}
|
\openepigraph{Science knows no country, because knowledge belongs to humanity, and is the torch which illuminates the world.}{---Louis Pasteur}
\openepigraph{We live in a society exquisitely dependent on science and technology, in which hardly anyone knows anything about science and technology.}{---Carl Sagan}
Many people believe that women tend to talk more than men---with some even suggesting that this difference has a biological basis. One widely cited estimate is that women speak 20,000 words per day on average and men speak only 7,000. This claim seems plausible, but is it true? A group of psychologists led by Matthias Mehl decided to find out. They checked to see if anyone had actually tried to count the daily number of words spoken by women and men. No one had. So these researchers conducted a study in which female and male college students (369 in all) wore audio recorders while they went about their lives. The result? The women spoke an average of 16,215 words per day and the men spoke an average of 15,669---an extremely small difference that could easily be explained by chance. In an article in the journal Science, these researchers summed up their findings as follows: "We therefore conclude, on the basis of available empirical evidence, that the widespread and highly publicized stereotype about female talkativeness is unfounded" \citep{mehl_are_2007}.
Science is a process for asking and answering questions about the world around. It is a powerful tool for changing our own minds about how we think the world works. For example, perhaps you too believed the stereotype that women are more talkative than men. If so, science has given you the opportunity to change your mind. The evidence collected so far shows that there are no virtually no differences in the number of words that women and men speak per day. If you choose to think like a scientist, then you ought to change your belief and strongly consider the possibility that the stereotype simply is not true. You might also read the published journal article from the research described above to critically evaluate how the research was conducted, and take a closer look at the patterns in the data. After all, if you want to update your beliefs on the basis of evidence, then you ought to make sure you can trust the evidence.
This course is an introduction to the process of using the scientific process to ask and answer questions relevant to psychologists. We will talk about how to critically evaluate scientific findings so that we can learn from the existing scientific literature. And, we will talk about how to collect data to ask questions, and how to analyze data to answer questions, so that we can contribute knowledge to the literature.
\section{Understanding Science}
\marginnote{\allcaps{Learning Objectives}
\begin{enumerate}
\item Define science.
\item Describe the three fundamental features of science.
\item Explain why psychology is a science.
\item Define pseudoscience and give some examples.
\end{enumerate}
}
Psychology is usually defined as the scientific study of human behavior and mental processes, and this example illustrates the features that make it scientific. In this chapter, we look closely at these features, introduce a model of scientific research in psychology, and address several basic questions that students often have about it. Who conducts scientific research in psychology? Why? Does scientific psychology tell us anything that common sense does not? Why should I bother to learn the scientific approach---especially if I want to be a clinical psychologist and not a researcher? These are extremely good questions, and answering them now will provide a solid foundation for learning the rest of the material in your course.
\subsection{What Is Science?}
Some people are surprised to learn that psychology is a science. They generally agree that astronomy, biology, and chemistry are sciences but wonder what psychology has in common with these other fields. Before answering this question, however, it is worth reflecting on what astronomy, biology, and chemistry have in common with each other. It is clearly not their subject matter. Astronomers study celestial bodies, biologists study living organisms, and chemists study matter and its properties. It is also not the equipment and techniques that they use. Few biologists would know what to do with a radio telescope, for example, and few chemists would know how to track a moose population in the wild. For these and other reasons, philosophers and scientists who have thought deeply about this question have concluded that what the sciences have in common is a general approach to understanding the natural world. Psychology is a science because it takes this same general approach to understanding one aspect of the natural world: human behavior.
\subsection{Features of Science}
The general scientific approach has three fundamental features \citep{stanovich_how_2013}. The first is systematic empiricism. Empiricism refers to learning based on observation, and scientists learn about the natural world systematically, by carefully planning, making, recording, and analyzing observations of it. As we will see, logical reasoning and even creativity play important roles in science too, but scientists are unique in their insistence on checking their ideas about the way the world is against their systematic observations. Notice, for example, that Mehl and his colleagues did not trust other people's stereotypes or even their own informal observations. Instead, they systematically recorded, counted, and compared the number of words spoken by a large sample of women and men. Furthermore, when their systematic observations turned out to conflict with people's stereotypes, they trusted their systematic observations.
The second feature of the scientific approach---which follows in a straightforward way from the first---is that it is concerned with empirical questions. Empirical questions are questions that can be answered by observations. These are questions about the way the world actually is and, therefore, can be answered by systematically observing it. The question of whether women talk more than men is empirical in this way. Either women really do talk more than men or they do not, and this can be determined by systematically observing how much women and men actually talk. Having said this, there are many interesting and important questions that are not empirically testable and that science is not in a position to answer. Among these are questions about values---whether things are good or bad, just or unjust, or beautiful or ugly, and how the world ought to be. So, although the question of whether a stereotype is accurate or inaccurate is an empirically testable one that science can answer, the question---or, rather, the value judgment---of whether it is wrong for people to hold inaccurate stereotypes is not. Similarly, the question of whether criminal behavior has a genetic basis is an empirical question, but the question of what actions ought to be considered illegal is not. It is especially important for researchers in psychology to be mindful of this distinction.
The third feature of science is that it creates public knowledge. After asking their empirical questions, making their systematic observations, and drawing their conclusions, scientists publish their work. This usually means writing an article for publication in a professional journal, where they put their research question in the context of previous research, describe in detail the methods they used to answer their question, and clearly present their results and conclusions. Increasingly, scientists are opting to publish their work in open access journals so the articles are freely available to all -- scientists and nonscientists alike. This important choice allows publicly-funded research to create knowledge that is truly public.
Publication is an essential feature of science for two reasons. One is that science is a social process---a large-scale collaboration among many researchers distributed across both time and space. Our current scientific knowledge of most topics is based on many different studies conducted by many different researchers who have shared their work publicly over many years. The second is that publication allows science to be self-correcting. Individual scientists understand that, despite their best efforts, their methods can be flawed and their conclusions incorrect. Publication allows others in the scientific community to detect and correct these errors so that, over time, scientific knowledge increasingly reflects the way the world actually is.
A good example of the self-correcting nature of science is the "Many Labs Replication Project" -- a large and coordinated effort by prominent psychological scientists around the world to attempt to replicate findings from 13 classic and contemporary studies (Klein et al., 2013). One of the findings selected by these researchers for replication was the fascinating effect, first reported by Simone Schnall and her colleagues at the University of Plymouth, that washing one's hands leads people to view moral transgressions---ranging from keeping money inside a found wallet, to using a kitten for sexual arousal---as less wrong \citep{schnall_clean_2008}. If reliable, this effect might help explain why so many religious traditions associate physical cleanliness with moral purity. However, despite using the same materials and nearly identical procedures with a much larger sample, the "Many Labs" researchers were unable to replicate the original finding \citep{johnson_does_2014}, suggesting that the original finding may have stemmed from the relatively small sample size (which can lead to unreliable results) used in the original study. To be clear, at this stage we are still unable to definitively conclude that the handwashing effect does not exist; however, the effort that has gone into testing its reliability certainly demonstrates the collaborative and cautious nature of scientific progress.
\subsection{Science Versus Pseudoscience}
\marginnote{\allcaps{Skeptic's Dictionary}
The Skeptic's Dictionary is an excellent source for information on pseudoscience \url{http://www.skepdic.com}.
Among the pseudoscientific beliefs and practices you can learn about are the following:
\begin{itemize}
\item Cryptozoology. The study of "hidden" creatures like Bigfoot, the Loch Ness monster, and the chupacabra.
\item Pseudoscientific psychotherapies. Past-life regression, rebirthing therapy, and bioscream therapy, among others.
\item Homeopathy. The treatment of medical conditions using natural substances that have been diluted sometimes to the point of no longer being present.
\item Pyramidology. Odd theories about the origin and function of the Egyptian pyramids (e.g., that they were built by extraterrestrials) and the idea that pyramids in general have healing and other special powers.
\end{itemize}
Another excellent online resource is Neurobonkers \url{http://neurobonkers.com}, which regularly posts articles that investigate claims that pertain specifically to psychological science.
}
Pseudoscience refers to activities and beliefs that are claimed to be scientific by their proponents---and may appear to be scientific at first glance---but are not. Consider the theory of biorhythms (not to be confused with sleep cycles or other biological cycles that do have a scientific basis). The idea is that people's physical, intellectual, and emotional abilities run in cycles that begin when they are born and continue until they die. Allegedly, the physical cycle has a period of 23 days, the intellectual cycle a period of 33 days, and the emotional cycle a period of 28 days. So, for example, if you had the option of when to schedule an exam, you would want to schedule it for a time when your intellectual cycle will be at a high point. The theory of biorhythms has been around for more than 100 years, and you can find numerous popular books and websites about biorhythms, often containing impressive and scientific-sounding terms like sinusoidal wave and bioelectricity. The problem with biorhythms, however, is that there is simply no evidence for them, so there is no good reason to think they exist \citep{hines_comprehensive_1998}.
A set of beliefs or activities can be said to be pseudoscientific if (a) its adherents claim or imply that it is scientific, but (b) it lacks one or more of the three features of science. For instance, it might lack systematic empiricism. Either there is no relevant scientific research or, as in the case of biorhythms, there is relevant scientific research but it is ignored. It might also lack public knowledge. People who promote the beliefs or activities might claim to have conducted scientific research but never publish that research in a way that allows others to evaluate it.
A set of beliefs and activities might also be pseudoscientific because it does not address empirical questions. The philosopher Karl Popper was especially concerned with this idea \citep{popper_conjectures_2014}. He argued more specifically that any scientific claim must be expressed in such a way that there are observations that would---if they were made---count as evidence against the claim. In other words, scientific claims must be \emph{falsifiable}. The claim that women talk more than men is falsifiable because systematic observations could reveal either that they do talk more than men or that they do not. As an example of an unfalsifiable claim, consider that many people who believe in extrasensory perception (ESP) and other psychic powers claim that such powers can disappear when they are observed too closely. This makes it so that no possible observation would count as evidence against ESP. If a careful test of a self-proclaimed psychic showed that she predicted the future at better-than-chance levels, this would be consistent with the claim that she had psychic powers. But if she failed to predict the future at better-than-chance levels, this would also be consistent with the claim because her powers can supposedly disappear when they are observed too closely.
Why should we concern ourselves with pseudoscience? There are at least three reasons. One is that learning about pseudoscience helps bring the fundamental features of science---and their importance---into sharper focus. A second is that biorhythms, psychic powers, astrology, and many other pseudoscientific beliefs are widely held and are promoted on the Internet, on television, and in books and magazines. Far from being harmless, the promotion of these beliefs often results in great personal toll as, for example, believers in pseudoscience opt for "treatments" such as homeopathy for serious medical conditions instead of empirically-supported treatments. Learning what makes them pseudoscientific can help us to identify and evaluate such beliefs and practices when we encounter them. A third reason is that many pseudoscience's purport to explain some aspect of human behavior and mental processes, including biorhythms, astrology, graphology (handwriting analysis), and magnet therapy for pain control. It is important for students of psychology to distinguish their own field clearly from this "pseudo psychology."
\subsection{Key Takeaways}
\begin{fullwidth}
\begin{itemize}
\item Science is a general way of understanding the natural world. Its three fundamental features are systematic empiricism, empirical questions, and public knowledge.
\item Psychology is a science because it takes the scientific approach to understanding human behavior.
\item Pseudoscience refers to beliefs and activities that are claimed to be scientific but lack one or more
of the three features of science. It is important to distinguish the scientific approach to understanding human behavior from the many pseudoscientific approaches.
\end{itemize}
\end{fullwidth}
\subsection{Exercises}
\begin{fullwidth}
\begin{enumerate}
\item Practice: List three empirical questions about human behavior. List three nonempirical questions about human behavior.
\item Discussion: Consider the following psychological claim. "People's choice of spouse is strongly influenced by their perception of their own parents. Some choose a spouse who is similar in some way to one of their parents. Others choose a spouse who is different from one of their parents." Is this claim falsifiable? Why or why not?
\item Discussion: People sometimes suggest that psychology cannot be a science because either (a) human behavior cannot be predicted with perfect accuracy or (b) much of its subject matter (e.g., thoughts and feelings) cannot be observed directly. Do you agree or disagree with each of these ideas? Why?
\item Watch the following video by PHD Comics for an overview of open access publishing and why it matters. \url{https://www.youtube.com/watch?v=L5rVH1KGBCY}
\end{enumerate}
\end{fullwidth}
\newpage
\section{Scientific Research in Psychology}
\subsection{A Model of Scientific Research in Psychology}
\begin{marginfigure}[0in]
\includegraphics[width=\linewidth]{figures/C1Figure1.pdf}
\caption{A simple model of scientific research in Psychology}
\label{fig:Theresearchcycle}
\end{marginfigure}
Figure \ref{fig:Theresearchcycle} presents a more specific model of scientific research in psychology. The researcher (who more often than not is really a small group of researchers) formulates a research question, conducts a study designed to answer the question, analyzes the resulting data, draws conclusions about the answer to the question, and publishes the results so that they become part of the research literature. Because the research literature is one of the primary sources of new research questions, this process can be thought of as a cycle. New research leads to new questions, which lead to new research, and so on. Figure \ref{fig:Theresearchcycle} also indicates that research questions can originate outside of this cycle either with informal observations or with practical problems that need to be solved. But even in these cases, the researcher would start by checking the research literature to see if the question had already been answered and to refine it based on what previous research had already found.
The research by Mehl and his colleagues is described nicely by this model. Their question---whether women are more talkative than men---was suggested to them both by people's stereotypes and by published claims about the relative talkativeness of women and men. When they checked the research literature, however, they found that this question had not been adequately addressed in scientific studies. They then conducted a careful empirical study, analyzed the results (finding very little difference between women and men), and published their work so that it became part of the research literature. The publication of their article is not the end of the story, however, because their work suggests many new questions (about the reliability of the result, about potential cultural differences, etc.) that will likely be taken up by them and by other researchers inspired by their work.
As another example, consider that as cell phones became more widespread during the 1990s, people began to wonder whether, and to what extent, cell phone use had a negative effect on driving. Many psychologists decided to tackle this question scientifically \citep{collet_phoning_2010}. It was clear from previously published research that engaging in a simple verbal task impairs performance on a perceptual or motor task carried out at the same time, but no one had studied the effect specifically of cell phone use on driving. Under carefully controlled conditions, these researchers compared people's driving performance while using a cell phone with their performance while not using a cell phone, both in the lab and on the road. They found that people's ability to detect road hazards, reaction time, and control of the vehicle were all impaired by cell phone use. Each new study was published and became part of the growing research literature on this topic.
\subsection{Who Conducts Scientific Research in Psychology?}
\marginnote{\allcaps{Scientific Psychology Blogs}
A fun and easy way to follow current scientific research in psychology is to read any of the many excellent blogs devoted to summarizing and commenting on new findings. Among them are the following:
\begin{itemize}
\item Brain Blogger \url{http://brainblogger.com/}
\item Mind Hacks \url{http://mindhacks.com/}
\item Research Digest \url{http://digest.bps.org.uk/}
\item Talk Psych \url{http://www.talkpsych.com/}
\item PsyBlog \url{http://www.spring.org.uk}
\item Social Psychology Eye \url{http://socialpsychologyeye.wordpress.com}
\item We're Only Human \url{http://www.psychologicalscience.org/onlyhuman}
\item You can also browse to \url{http://www.researchblogging.org}
\end{itemize}
}
Scientific research in psychology is generally conducted by people with doctoral degrees (usually the doctor of philosophy, PhD) and master's degrees in psychology and related fields, often supported by research assistants with bachelor's degrees or other relevant training. Some of them work for government agencies (e.g., the National Institute of Health), national associations (e.g., the American Psychological Association), nonprofit organizations (e.g., the Canadian Mental Health Association), or in the private sector (e.g., in product development). However, the majority of them are college and university faculty, who often collaborate with their graduate and undergraduate students. Although some researchers are trained and licensed as clinicians---especially those who conduct research in clinical psychology---the majority are not. Instead, they have expertise in one or more of the many other subfields of psychology: behavioral neuroscience, cognitive psychology, developmental psychology, personality psychology, social psychology, and so on. Doctoral-level researchers (post-doctoral fellows or research scientists) might be employed to conduct research full-time or, like many college and university faculty members, to conduct research in addition to teaching classes and serving their institution and community in other ways.
Of course, people also conduct research in psychology because they enjoy the intellectual and technical challenges involved and the satisfaction of contributing to scientific knowledge of human behavior. You might find that you enjoy the process too. If so, your college or university might offer opportunities to get involved in ongoing research as either a research assistant or a participant.
\marginnote{Many undergraduates at Brooklyn College volunteer in research labs in the Psychology Department, and can get course credit for their work by taking Independent Study or Research courses.}
Of course, you might find that you do not enjoy the process of conducting scientific research in psychology. But at least you will have a better understanding of where scientific knowledge in psychology comes from, an appreciation of its strengths and limitations, and an awareness of how it can be applied to solve practical problems in psychology and everyday life.
\subsection{The Broader Purposes of Scientific Research in Psychology}
People have always been curious about the natural world, including themselves and their behavior (in fact, this is probably why you are studying psychology in the first place). Science grew out of this natural curiosity and has become the best way to achieve detailed and accurate knowledge. Keep in mind that most of the phenomena and theories that fill psychology textbooks are the products of scientific research. In a typical introductory psychology textbook, for example, one can learn about specific cortical areas for language and perception, principles of classical and operant conditioning, biases in reasoning and judgment, and people's surprising tendency to obey those in positions of authority. And scientific research continues because what we know right now only scratches the surface of what we can know.
Scientific research is often classified as being either \emph{basic} or \emph{applied}. Basic research in psychology is conducted primarily for the sake of achieving a more detailed and accurate understanding of human behavior, without necessarily trying to address any particular practical problem. The research of Mehl and his colleagues falls into this category. Applied research is conducted primarily to address some practical problem. Research on the effects of cell phone use on driving, for example, was prompted by safety concerns and has led to the enactment of laws to limit this practice. Although the distinction between basic and applied research is convenient, it is not always clear-cut. For example, basic research on sex differences in talkativeness could eventually have an effect on how marriage therapy is practiced, and applied research on the effect of cell phone use on driving could produce new insights into basic processes of perception, attention, and action.
\subsection{Key Takeaways}
\begin{fullwidth}
\begin{itemize}
\item Research in psychology can be described by a simple cyclical model. A research question based on the research literature leads to an empirical study, the results of which are published and become part of the research literature.
\item Scientific research in psychology is conducted mainly by people with doctoral degrees in psychology and related fields, most of whom are college and university faculty members. They do so for professional and for personal reasons, as well as to contribute to scientific knowledge about human behavior.
\item Basic research is conducted to learn about human behavior for its own sake, and applied research is conducted to solve some practical problem. Both are valuable, and the distinction between the two is not always clear-cut.
\end{itemize}
\end{fullwidth}
\subsection{Exercises}
\begin{fullwidth}
\begin{enumerate}
\item Practice: Find a description of an empirical study in a professional journal or in one of the scientific psychology blogs. Then write a brief description of the research in terms of the cyclical model presented here. One or two sentences for each part of the cycle should suffice.
\item Practice: Based on your own experience or on things you have already learned about psychology, list three basic research questions and three applied research questions of interest to you.
\item Watch the following TED Ed video \url{https://youtu.be/ GUpd2HJHUt8}, in which David H. Schwartz provides an introduction to two types of empirical studies along with some methods that scientists use to increase the reliability of their results.
\end{enumerate}
\end{fullwidth}
\newpage
\section{Science and Common Sense}
\marginnote{
\allcaps{Learning Objectives}
\begin{enumerate}
\item Explain the limitations of common sense when it comes to achieving a detailed and accurate understanding of human behavior.
\item Give several examples of common sense or folk psychology that are incorrect.
\item Define skepticism and its role in scientific psychology.
\end{enumerate}
}
\subsection{Can We Rely on Common Sense?}
Some people wonder whether the scientific approach to psychology is necessary. Can we not reach the same conclusions based on common sense or intuition? Certainly we all have intuitive beliefs about people's behavior, thoughts, and feelings---and these beliefs are collectively referred to as folk psychology. Although much of our folk psychology is probably reasonably accurate, it is clear that much of it is not. For example, most people believe that anger can be relieved by "letting it out"---perhaps by punching something or screaming loudly. Scientific research, however, has shown that this approach tends to leave people feeling more angry, not less \citep{bushman_does_2002}. Likewise, most people believe that no one would confess to a crime that he or she had not committed, unless perhaps that person was being physically tortured. But again, extensive empirical research has shown that false confessions are surprisingly common and occur for a variety of reasons \citep{kassin_psychology_2004}. There are many more examples where our own intuitions about ourselves and others are incorrect.
\subsection{How Could We Be So Wrong?}
\marginnote{
\allcaps{Common Myths}
In 50 Great Myths of Popular Psychology, psychologist Scott Lilienfeld and colleagues \citep{lilienfeld_50_2011} discuss several widely held commonsense beliefs about human behavior that \emph{scientific research has shown to be incorrect}. Here is a short list:
\begin{itemize}
\item People use only 10\% of their brain power.
\item Most people experience a midlife crisis in their 40's or 50's."
\item Students learn best when teaching styles are matched to their learning styles."
\item Low self-esteem is a major cause of psychological problems."
\item Psychiatric admissions and crimes increase during full moons.
\end{itemize}
}
How can so many of our intuitive beliefs about human behavior be so wrong? Notice that this is an empirical question, and it just so happens that psychologists have conducted scientific research on it and identified many contributing factors \citep{gilovich_how_2008}. One is that forming detailed and accurate beliefs requires powers of observation, memory, and analysis to an extent that we do not naturally possess. It would be nearly impossible to count the number of words spoken by the women and men we happen to encounter, estimate the number of words they spoke per day, average these numbers for both groups, and compare them---all in our heads. This is why we tend to rely on mental shortcuts (what psychologists refer to as heuristics) in forming and maintaining our beliefs. For example, if a belief is widely shared---especially if it is endorsed by "experts"---and it makes intuitive sense, we tend to assume it is true. This is compounded by the fact that we then tend to focus on cases that confirm our intuitive beliefs and not on cases that dis-confirm them. This is called \emph{confirmation bias}. For example, once we begin to believe that women are more talkative than men, we tend to notice and remember talkative women and silent men but ignore or forget silent women and talkative men. We also hold incorrect beliefs in part because it would be nice if they were true. For example, many people believe that calorie-reducing diets are an effective long- term treatment for obesity, yet a thorough review of the scientific evidence has shown that they are not \citep{mann_medicares_2007}. People may continue to believe in the effectiveness of dieting in part because it gives them hope for losing weight if they are obese or makes them feel good about their own "self-control" if they are not.
\marginnote{\allcaps{Cognitive Biases}
Psychologists have identified numerous biases that influence how people think, reason, and make judgments about the world around them. Wikipedia maintains a long list of these biases that you can check out here: \url{https://en.wikipedia.org/wiki/List_of_cognitive_biases}
}
Scientists---especially psychologists---understand that they are just as susceptible as anyone else to intuitive but incorrect beliefs. This is why they cultivate an attitude of \emph{skepticism}. Being skeptical does not mean being cynical or distrustful, nor does it mean questioning every belief or claim one comes across (which would be impossible anyway). Instead, it means pausing to consider alternatives and to search for evidence---especially systematically collected empirical evidence---when there is enough at stake to justify doing so. For example, imagine that you read a magazine article claiming that giving children a weekly allowance is a good way to help them develop financial responsibility. This is an interesting and potentially important claim (especially if you have children of your own). Taking an attitude of skepticism, however, would mean pausing to ask whether it might be instead that receiving an allowance merely teaches children to spend money---perhaps even to be more materialistic. Taking an attitude of skepticism would also mean asking what evidence supports the original claim. Is the author a scientific researcher? Is any scientific evidence cited? If the issue was important enough, it might also mean turning to the research literature to see if anyone else had studied it. Then, you could evaluate the existing evidence yourself to determine whether the evidence supports the claim.
Because there is often not enough evidence to fully evaluate a belief or claim, scientists also cultivate a tolerance for uncertainty. They accept that there are many things that they simply do not know. For example, it turns out that there is no scientific evidence that receiving an allowance causes children to be more financially responsible, nor is there any scientific evidence that it causes them to be materialistic. Although this kind of uncertainty can be problematic from a practical perspective---for example, making it difficult to decide what to do when our children ask for an allowance---it is exciting from a scientific perspective. If we do not know the answer to an interesting and empirically testable question, science, and perhaps even you as a researcher, may be able to provide the answer.
\subsection{\allcaps{Key Takeaways}}
\begin{fullwidth}
\begin{itemize}
\item People's intuitions about human behavior, also known as folk psychology, often turn out to be wrong. This is one primary reason that psychology relies on science rather than common sense.
\item Researchers in psychology cultivate certain critical-thinking attitudes. One is skepticism. They search for evidence and consider alternatives before accepting a claim about human behavior as true. Another is tolerance for uncertainty. They withhold judgment about whether a claim is true or not when there is insufficient evidence to decide.
\end{itemize}
\end{fullwidth}
\subsection{\allcaps{Exercises}}
\begin{fullwidth}
\begin{enumerate}
\item Practice: For each of the following intuitive beliefs about human behavior, list three reasons that it might be true and three reasons that it might not be true:
\begin{itemize}
\item You cannot truly love another person unless you love yourself.
\item People who receive "crisis counseling" immediately after experiencing a traumatic event are better able to cope with that trauma in the long term.
\item Studying is most effective when it is always done in the same location.
\end{itemize}
\item Watch the following video, in which psychologist Scott Lilienfeld talks about confirmation bias, tunnel vision, and using evidence to evaluate the world around us \url{https://youtu.be/ Eut8jMfSA_k}
\end{enumerate}
\end{fullwidth}
\newpage
\section{Science and Clinical Practice}
\marginnote{\allcaps{Learning Objectives}
\begin{enumerate}
\item Define the clinical practice of psychology and distinguish it from the science of psychology.
\item Explain how science is relevant to clinical practice.
\item Define the concept of an empirically supported treatment and give some examples.
\end{enumerate}
}
Psychology is the scientific study of behavior and mental processes. But it is also the application of scientific research to "help people, organizations, and communities function better" (American Psychological Association, 2011). By far the most common and widely known application is the clinical practice of psychology---the diagnosis and treatment of psychological disorders and related problems. Let us use the term clinical practice broadly to refer to the activities of clinical and counseling psychologists, school psychologists, marriage and family therapists, licensed clinical social workers, and others who work with people individually or in small groups to identify and help address their psychological problems. It is important to consider the relationship between scientific research and clinical practice because many students are especially interested in clinical practice, perhaps even as a career.
\marginnote{\allcaps{Empirically Supported Treatments}
An empirically supported treatment is one that has been studied scientifically and shown to result in greater improvement than no treatment, a placebo, or some alternative treatment. These include many forms of psychotherapy, which can be as effective as standard drug therapies. Among the forms of psychotherapy with strong empirical support are the following:
\begin{itemize}
\item Cognitive behavioral therapy. For depression, panic disorder, bulimia nervosa, and post- traumatic stress disorder.
\item Exposure therapy. For post-traumatic stress disorder.
\item Behavioral therapy. For depression.
\item Behavioral couples therapy. For alcoholism and substance abuse.
\item Exposure therapy with response prevention. For obsessive-compulsive disorder.
\item Family therapy. For schizophrenia.
\end{itemize}
For a more complete list, see the following website, which is maintained by Division 12 of the American Psychological Association, the Society for Clinical Psychology \url{http://www.div12.org/psychological- treatments}
}
The main point is that psychological disorders and other behavioral problems are part of the natural world. This means that questions about their nature, causes, and consequences are empirically testable and therefore subject to scientific study. As with other questions about human behavior, we cannot rely on our intuition or common sense for detailed and accurate answers. Consider, for example, that dozens of popular books and thousands of websites claim that adult children of alcoholics have a distinct personality profile, including low self-esteem, feelings of powerlessness, and difficulties with intimacy. Although this sounds plausible, scientific research has demonstrated that adult children of alcoholics are no more likely to have these problems than anybody else \citep{lilienfeld_50_2011}. Similarly, questions about whether a particular psychotherapy is effective are empirically testable questions that can be answered by scientific research. If a new psychotherapy is an effective treatment for depression, then systematic observation should reveal that depressed people who receive this psychotherapy improve more than a similar group of depressed people who do not receive this psychotherapy (or who receive some alternative treatment). Treatments that have been shown to work in this way are called \textbf{empirically supported treatments}.
Many in the clinical psychology community have argued that their field has not paid enough attention to scientific research---for example, by failing to use empirically supported treatments---and have suggested a variety of changes in the way clinicians are trained and treatments are evaluated and put into practice. Others believe that these claims are exaggerated and the suggested changes are unnecessary \citep{norcross_evidence-based_2006}. On both sides of the debate, however, there is agreement that a scientific approach to clinical psychology is essential if the goal is to diagnose and treat psychological problems based on detailed and accurate knowledge about those problems and the most effective treatments for them. So not only is it important for scientific research in clinical psychology to continue, but it is also important for clinicians who never conduct a scientific study themselves to be scientifically literate so that they can read and evaluate new research and make treatment decisions based on the best available evidence.
\subsection{\allcaps{Key Takeaways}}
\begin{fullwidth}
\begin{itemize}
\item The clinical practice of psychology—the diagnosis and treatment of psychological problems—is one important application of the scientific discipline of psychology.
\item Scientific research is relevant to clinical practice because it provides detailed and accurate knowledge about psychological problems and establishes whether treatments are effective.
\end{itemize}
\end{fullwidth}
\subsection{\allcaps{Exercises}}
\begin{fullwidth}
\begin{enumerate}
\item Discussion: Some clinicians argue that what they do is an “art form” based on intuition and personal experience and therefore cannot be evaluated scientifically. Write a paragraph about how satisfied you would be with such a clinician and why from each of three perspectives:
\begin{itemize}
\item a potential client of the clinician
\item a judge who must decide whether to allow the clinician to testify as an expert witness in a child abuse case
\item an insurance company representative who must decide whether to reimburse the clinician for his or her services
\end{itemize}
\item Practice: Create a short list of questions that a client could ask a clinician to determine whether he or she pays sufficient attention to scientific research.
\end{enumerate}
\end{fullwidth}
\section{Using Psychological Science to Inform Your Worldview}
Psychology is a very broad scientific discipline that asks and answers all sorts of questions about human and non-human animals. Psychological science encompasses many levels of analysis spanning the building blocks of biological systems, such as genes and cells, neurochemistry, neurons, and networks of neurons; perceptual and cognitive abilities of individuals such as learning, memory, attention, decision-making, language, thought, intelligence, and consciousness, to complex aspects of individuals such as development, personality, social behavior, and many others. A typical introductory psychology has the difficult job of presenting a bird's eye view of all of these major psychological domains of inquiry. Although psychologists ask many different kinds of questions, they all employ the scientific method as a tool to answer questions. So, this course is an introduction to the scientific research methods that are used in all areas of Psychology.
The primary focus of the course will be on experiments, which is the most powerful empirical tool researchers have to determine the underlying causes of the psychological phenomena that they measure. Psychological research methods are not limited to experiments, and non-experimental, or quasi-experimental approaches are often used with great success to ask and answer questions. Some of these research methods will be highlighted throughout the course.
\subsection{Why Should I Care About How Psychology Experiments work?}
Imagine for the moment a world without experiments that does not use the scientific method. This world would still have people claiming to have knowledge about how things work, and it would still have tools and technologies that are claimed to solve particular problems. However, without experiments to test whether the claims are true, all we are left with is the untested claims that may be true or false. We would be left in the dark. Inevitably, and not too different from our world today, there would be large segments of the population who believe false claims about how things work, and large segments of the population using therapies, tools, or other technologies that simply do not work (even if they believe they do).
The world we live in today discovered the scientific method and uses experiments to test claims about how things work. Indeed, with the enormous number of ways that we receive information through the media today, it is difficult to avoid hearing about all sorts of new scientific claims as well as totally unfounded claims that may not be based in science. For example, we have probably all heard that eating too much of something is good or bad for you, and increases or decreases your risk for a health problem. These claims can even flip around so that last year eating too much of X was bad, but this year eating too little of X is bad. What's more, many of these claims are supposedly scientific ones based on experiments. Should you believe these claims, and should you change your own behavior because of them?
When we receive claims through the media we are getting second-hand information, and based on this information alone it is difficult to evaluate the claim and the evidence for the claim. One option is to find expert reporters that you trust, and then believe everything they say. The second option is to find the primary source, and then evaluate the evidence yourself to determine whether you should believe the claim. The ability to understand how experiments work gives you the tools you need to critically evaluate claims about how things work.
\subsection{Evaluating Claims}
It is an understatement to say that people believe all sorts of crazy things. Note, this is a claim that I just made. Should you believe it? What do you need to know to determine whether or not you should believe this claim or any other claim? Scientific thinking requires that claims are supported by evidence. Other forms of belief and thinking may not require evidence to support claims.
For the moment I'll put on my scientific thinking hat because there numerous ways that I can provide evidence for my claim the people believe all sorts of crazy things. I am a person, and I know that I have believed crazy things in the past. For example, when I was four I believed that all children grow to be taller than their parents because I visited a family who had many children of different ages, and the oldest ones were all taller than their parents. I believed this claim for many years until finding out at the age of 12 that all of the children in that family were adopted (nevertheless, I am taller than both my parents, but my brother is not, so much for my theory). The internet is full of people claiming to believe things that I think are completely crazy. For example, the flat earth society believes that the earth is flat and shaped like a frisbee. Believers in the great reptilian conspiracy maintain that many of our world leaders are lizard people. The abundance of conspiracy theories provides a deep well of evidence that people believe all sorts of crazy things. So, because I can back up my claim with evidence, I will continue believing that people believe all sorts of crazy things.
I could also take my science hat off, and then I can believe anything I want. Indeed, this remarkable imagination ability may be one reason why people believe so many crazy things without needing any evidence whatsoever to back up their beliefs. I can believe that I am a lizard monster who lives on a frisbee just because I want to. Indeed, the freedom to have your own opinion or belief about anything is a sacred cultural value in our western democracy. As citizens we respect each others right to their own opinions and beliefs. This is a way of respecting the right to have truthful and false and crazy beliefs, or respecting each others right to be completely wrong.
\subsection{Testable and Untestable Claims}
The scientific method for determining whether claims are true or false, or somewhere in the middle, has limitations because it can only be used to evaluate testable claims. A testable claim is one that makes a clear implication about a state of the world. For example, I claim that I have two hands. This is a testable claim because it clearly implies that if someone were to observe my arms, they would expect to find two hands at the end of them. If they did not find two hands, then they could dispatch with my claim because the evidence showed I had no hands, which would be in direct contradiction with the claim. An untestable claim is nonsensical, or does not make a clear implication about a state of the world. For example, consider the claim “aldfoha ofghnfsklhjas asdfilubhs”. This is just nonsense, and no one knows what it means, it does not make clear implications about a state of the world, so we will never know if it is true or false. Consider the claim “the members of the Zarkovian alien race from planet Zarko in a parallel universe all look like perfect glass spheres”. This claim is possibly sensible, because it could be tested if we could travel to that planet and find members of the Zarkovian race, but it is not practically testable because the needed evidence can not be gathered; so, we will never know if this made up claim is true or false, or almost true (perhaps they are cubes or ellipsoids).
Claims and evidence are two central parts of the scientific method. And, in psychological science neither of these parts come for free. Researchers construct both of them. One job is to create claims that can be tested. The other job is to make the evidence by creating situations necessary to conduct the tests. Joining the creation of claims with the creation of testable situations produces evidence that bears directly on the claim. The evidence can be consistent or inconsistent with the claim, allowing the claim to continue to be accepted or rejected.
Here is another claim: People don't always like to be wrong. I don't always like to be wrong, so at least there is one example. People have beliefs that are near and dear to their heart, so close perhaps, that a person might be devastated if they found out one of their precious ideas was wrong. For this reason, the evidence provided by scientific research may be viewed as a threat to a persons system of beliefs about the world. After all, when those beliefs involve testable claims, research can sometimes show those claims to be completely false. In which case, a rational person might be forced to delete parts of their beliefs that they would have preferred to hold on to. However, people aren't always rational and discovering evidence does not force anyone to do anything. For example, lots of research shows that people can persevere in maintaining false beliefs, even after they are told about the evidence showing their beliefs are false. So, people really do believe crazy things.
\subsection{Learning How Not To Be Crazy}
Learning about how experiments work is an opportunity to learn how not be crazy. Remember, experiments can only tell us about claims that can be tested, so we can not use this method to know whether our untestable beliefs are crazy. Fortunately, there are an endless number of testable claims that we can investigate that can add to the evergrowing library of human knowledge that science has produced so far, and be translated to applications that benefit ourselves, society, and the world around us.
|
{"hexsha": "443af29b9435f0d488b7107f3c6e7be2ed323c58", "size": 50713, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "LatexVersion/Chapter1_PsychologicalScience.tex", "max_stars_repo_name": "danBurrell/research_methods_with_R", "max_stars_repo_head_hexsha": "74745c4bd69d185f1a36ef38638be8cc55966b06", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-12-29T16:39:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T12:46:59.000Z", "max_issues_repo_path": "LatexVersion/Chapter1_PsychologicalScience.tex", "max_issues_repo_name": "danBurrell/research_methods_with_R", "max_issues_repo_head_hexsha": "74745c4bd69d185f1a36ef38638be8cc55966b06", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LatexVersion/Chapter1_PsychologicalScience.tex", "max_forks_repo_name": "danBurrell/research_methods_with_R", "max_forks_repo_head_hexsha": "74745c4bd69d185f1a36ef38638be8cc55966b06", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2017-09-01T14:17:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T23:17:38.000Z", "avg_line_length": 169.6086956522, "max_line_length": 1817, "alphanum_fraction": 0.815077002, "num_tokens": 9906}
|
from scipy import integrate, interpolate
from matplotlib import pyplot as plt
import numpy as np
import utils
# Options
nb_nodes = 30
nb_phases = 4
nb_frame_inter = 500
nb_dim = 3
output_files = "Eocar"
# read states
nb_points = (nb_phases * nb_nodes) + 1
i = 0
t = np.ndarray(nb_points) # initialization of the time
# initialization of the derived states
all_q = np.ndarray((nb_dim, nb_points))
all_qdot = np.ndarray((nb_dim, nb_points))
with open(f"../optimal_control/Results/States{output_files}.txt", "r") as data:
# Nodes first lines
for l in range(nb_nodes):
line = data.readline()
lin = line.split('\t') # separation of the line in element
lin[:1] = [] # remove the first element ( [ )
lin[(nb_phases * nb_dim) + (nb_phases * nb_dim) + 1:] = [] # remove the last ]
t[i] = float(lin[0]) # complete the time with the first column
for p in range(nb_phases):
all_q[:, i + p * nb_nodes] = [float(j) for j in lin[
1 + p * 2*nb_dim:nb_dim + p * 2*nb_dim + 1]] # complete the states with the nQ next columns
all_qdot[:, i + p * nb_nodes] = [float(k) for k in
lin[nb_dim + 1 + p * 2*nb_dim:2*nb_dim * (p + 1) + 1]]
i += 1
# Last line
line = data.readline()
lin = line.split('\t') # separation of the line in element
lin[:1] = [] # remove the first element ( [ )
lin[(nb_phases * nb_dim) + (nb_phases * nb_dim) + 1:] = [] # remove the last ( ] )
t[i] = float(lin[0])
all_q[:, -1] = [float(j) for j in lin[1 + (nb_phases - 1) * 2*nb_dim:nb_dim + (nb_phases - 1) * 2*nb_dim + 1]]
all_qdot[:, -1] = [float(k) for k in
lin[nb_dim + 1 + (nb_phases - 1) * 2*nb_dim:2*nb_dim * nb_phases + 1]]
t_final = t
for p in range(1, nb_phases):
for j in range(nb_nodes + 1):
t_final[(nb_nodes * p) + j] = t_final[(nb_nodes * p) - 1] + t[j + 1]
# read controls
i = 0
all_u = np.ndarray((nb_dim, nb_points))
with open(f"../optimal_control/Results/Controls{output_files}.txt", "r") as fichier_u:
for l in range(nb_nodes):
line = fichier_u.readline()
lin = line.split('\t')
lin[:1] = []
lin[(nb_phases*nb_dim) + 1:] = []
for p in range(nb_phases):
all_u[:, i+p*nb_nodes] = [float(i) for i in lin[1+p*nb_dim:nb_dim*(p+1)+1]]
i += 1
line = fichier_u.readline()
lin = line.split('\t')
lin[:1] = []
lin[(nb_phases*nb_dim) + 1:] = []
all_u[:, -1] = [float(i) for i in lin[1+(nb_phases-1)*nb_dim:nb_dim*nb_phases+1]]
# Show data
plt.figure("Eocar")
for i in range(nb_dim):
plt.subplot(nb_dim, 3, 1+(3*i))
plt.plot(t_final, all_q[i, :])
plt.title("Q %i" % i)
plt.subplot(nb_dim, 3, 2+(3*i))
plt.plot(t_final, all_qdot[i, :])
plt.title("Qdot %i" % i)
for i in range(nb_dim):
plt.subplot(nb_dim, 3, 3 + (3 * i))
utils.plot_piecewise_constant(t_final, all_u[i, :])
plt.title("Torques %i" % i)
# plt.ion() # Non blocking plt.show
plt.show()
|
{"hexsha": "5b2eb8d6bd0931d296c51bc1909735456fd7ee0a", "size": 3116, "ext": "py", "lang": "Python", "max_stars_repo_path": "analyses/show_eocar.py", "max_stars_repo_name": "ValKanAll/ViolinOptimalControl", "max_stars_repo_head_hexsha": "556311aecc3e13b1fd2dd6927d22510b127c38c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analyses/show_eocar.py", "max_issues_repo_name": "ValKanAll/ViolinOptimalControl", "max_issues_repo_head_hexsha": "556311aecc3e13b1fd2dd6927d22510b127c38c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-07-26T07:44:17.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-26T07:44:17.000Z", "max_forks_repo_path": "analyses/show_eocar.py", "max_forks_repo_name": "ValentinAllard/ViolinOptimalControl", "max_forks_repo_head_hexsha": "556311aecc3e13b1fd2dd6927d22510b127c38c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1237113402, "max_line_length": 152, "alphanum_fraction": 0.5683568678, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1020}
|
from os import PathLike
from pathlib import Path
import cv2
import numpy as np
import torch
from PIL import Image
from numpy import linalg
from torch import nn
from torchvision import transforms, models
from kts.cpd_auto import cpd_auto
class FeatureExtractor(object):
def __init__(self):
self.preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.model = models.googlenet(pretrained=True)
self.model = nn.Sequential(*list(self.model.children())[:-2])
self.model = self.model.cuda().eval()
def run(self, img: np.ndarray) -> np.ndarray:
img = Image.fromarray(img)
img = self.preprocess(img)
batch = img.unsqueeze(0)
with torch.no_grad():
feat = self.model(batch.cuda())
feat = feat.squeeze().cpu().numpy()
assert feat.shape == (1024,), f'Invalid feature shape {feat.shape}: expected 1024'
# normalize frame features
feat /= linalg.norm(feat) + 1e-10
return feat
class VideoPreprocessor(object):
def __init__(self, sample_rate: int) -> None:
self.model = FeatureExtractor()
self.sample_rate = sample_rate
def get_features(self, video_path: PathLike):
video_path = Path(video_path)
cap = cv2.VideoCapture(str(video_path))
assert cap is not None, f'Cannot open video: {video_path}'
features = []
n_frames = 0
while True:
ret, frame = cap.read()
if not ret:
break
if n_frames % self.sample_rate == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
feat = self.model.run(frame)
features.append(feat)
n_frames += 1
cap.release()
features = np.array(features)
return n_frames, features
def kts(self, n_frames, features):
seq_len = len(features)
picks = np.arange(0, seq_len) * self.sample_rate
# compute change points using KTS
kernel = np.matmul(features, features.T)
change_points, _ = cpd_auto(kernel, seq_len - 1, 1, verbose=False)
change_points *= self.sample_rate
change_points = np.hstack((0, change_points, n_frames))
begin_frames = change_points[:-1]
end_frames = change_points[1:]
change_points = np.vstack((begin_frames, end_frames - 1)).T
n_frame_per_seg = end_frames - begin_frames
return change_points, n_frame_per_seg, picks
def run(self, video_path: PathLike):
n_frames, features = self.get_features(video_path)
cps, nfps, picks = self.kts(n_frames, features)
return n_frames, features, cps, nfps, picks
|
{"hexsha": "2be1e9271b87d120178eac1bf5af58ac09403450", "size": 2891, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/helpers/video_helper.py", "max_stars_repo_name": "wqliu657/DSNet", "max_stars_repo_head_hexsha": "1804176e2e8b57846beb063667448982273fca89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 113, "max_stars_repo_stars_event_min_datetime": "2020-12-04T21:27:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T11:09:51.000Z", "max_issues_repo_path": "src/helpers/video_helper.py", "max_issues_repo_name": "wqliu657/DSNet", "max_issues_repo_head_hexsha": "1804176e2e8b57846beb063667448982273fca89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2021-02-26T15:15:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T12:37:08.000Z", "max_forks_repo_path": "src/helpers/video_helper.py", "max_forks_repo_name": "wqliu657/DSNet", "max_forks_repo_head_hexsha": "1804176e2e8b57846beb063667448982273fca89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2020-12-19T08:38:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T05:49:43.000Z", "avg_line_length": 31.7692307692, "max_line_length": 90, "alphanum_fraction": 0.6184711173, "include": true, "reason": "import numpy,from numpy", "num_tokens": 674}
|
"""
displayer.py is responsible for saving the rendered animation into file.
"""
import subprocess as sp
import os
import time
import numpy as np
import pygame as pg
import configs as cfg
FFMPEG_BIN = "ffmpeg" # on Windows
def savevideo(animation):
""" Saves the simulation as a video """
command = [FFMPEG_BIN,
"-threads", "0",
"-y", # (optional) overwrite output file if it exists
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-s", str(cfg.window_size[0]) + "x" + str(cfg.window_size[1]), # size of one frame
"-pix_fmt", "rgb24", # "rgb" + str(bitsize),
"-r", "24", # frames per second
"-i", "-", # The imput comes from a pipe
"-an", # Tells FFMPEG not to expect any audio
"-vcodec", "mpeg4",
# "-b:v", "2000000",
"-qscale:v", "1", # Takes as much time, but file is bigger (bitrate is bigger)
os.path.join(os.getcwd(), cfg.movie_url)]
print("Saving video in", os.path.join(os.getcwd(), cfg.movie_url))
pipe = sp.Popen(command, stdin=sp.PIPE, stdout=None, stderr=sp.STDOUT, shell=True)
# Reference to pixel values in a surface (pointers)
i = 0
for frame in animation:
pxarray = pg.surfarray.pixels3d(frame).astype(np.uint8)
pxarray_t = np.transpose(pxarray, [1, 0, 2])
try:
pipe.stdin.write(pxarray_t.tostring())
except OSError as e:
print(str(i) + ": ")
print(e)
i += 1
pipe.stdin.close()
if pipe.stderr is not None:
pipe.stderr.close()
pipe.wait()
del pipe
def saveimages(animation):
""" Saves simulation as a series of pictures """
i = 0
# Clear previous images
for filename in os.listdir(os.getcwd()):
if filename.startswith(cfg.images_filename) and filename.endswith(cfg.images_format):
removed = False
while not removed:
try:
os.remove(filename)
removed = True
except:
pass
for frame in animation:
print("Saving image in", os.path.join(os.getcwd(),
cfg.images_filename +
str(int(cfg.startSecond * cfg.framespersecond) + i) + cfg.images_format))
# saved = False
# while (not saved):
attempt = 0
while attempt < 10:
try:
pg.image.save(frame, os.path.join(os.getcwd(),
cfg.images_filename +
str(int(cfg.startSecond * cfg.framespersecond) + i) +
cfg.images_format))
break
except:
attempt += 1
time.sleep(0.1)
if attempt == 10:
print("Failed to save image.")
i += 1
|
{"hexsha": "cd0f9b9cbb768ac81cae72525b6e78b39f1b6f9b", "size": 2923, "ext": "py", "lang": "Python", "max_stars_repo_path": "displayer.py", "max_stars_repo_name": "naummo/swarm_maze_opencl_solver", "max_stars_repo_head_hexsha": "1047e1293e90f484ccc4ff77cfe61196fb7cbbc6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "displayer.py", "max_issues_repo_name": "naummo/swarm_maze_opencl_solver", "max_issues_repo_head_hexsha": "1047e1293e90f484ccc4ff77cfe61196fb7cbbc6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "displayer.py", "max_forks_repo_name": "naummo/swarm_maze_opencl_solver", "max_forks_repo_head_hexsha": "1047e1293e90f484ccc4ff77cfe61196fb7cbbc6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8426966292, "max_line_length": 98, "alphanum_fraction": 0.5237769415, "include": true, "reason": "import numpy", "num_tokens": 666}
|
using ParameterisedModule
using Test
Functor = Function
# Write your own tests here.
# :(@sig struct S{A}
# x :: Int
# y :: A
# struct K end
# end) |> (x -> macroexpand(ParameterisedModule, x)) |> println
@testset "I'm here?" begin
@sig struct S{A}
x :: Int
y :: A
struct K end
end
println(S)
# :(@structure struct S{String}
# struct K
# x :: Int
# end
# x = 2
# y = "2"
# end) |> (x -> macroexpand(ParameterisedModule, x)) |> println
mod1 = @structure struct S{String}
struct K
x :: Int
end
x = 2
y = "2"
end
@test mod1.x == 2
@test mod1.y == "2"
@test_throws Any mod1.x = 3
@test_throws Any begin
@structure struct S{Nothing}
struct K_ end
K = K_
x = 0
y = "2" # should be nothing
end
end
@sig struct Numeric
(+) :: Function
(-) :: Function
end
mod3 = @structure struct Numeric
a + b = "$a + $b"
a - b = "$a - $b"
end
@test mod3.:+(1, 2) == "1 + 2"
@test mod3.:-(1, 2) == "1 - 2"
@open Numeric mod3 begin
@test 1 + 2 == "1 + 2"
end
using ParameterisedModule
# this is the module type declaration
@sig struct NatAlgebra
struct Eltype end # this is type declaration
succ :: Function
zero :: Eltype
end
# make a module `num_nat`, whose module type is NatAlgebra
num_nat = @structure struct NatAlgebra
Eltype = Int
succ(x) = x + 1
zero = 0
end
@open NatAlgebra num_nat begin
println(succ(succ(zero))) # 2
end
@test_throws UndefVarError println(succ(succ(zero)))
# ERROR: UndefVarError: succ not defined
str_nat = @structure struct NatAlgebra
Eltype = String
succ(x) = "succ($x)"
zero = "zero"
end
@open NatAlgebra str_nat begin
println(succ(succ(zero))) # succ(succ(zero))
end
@sig struct TF{Eltype}
e :: Eltype
end
TFZero(nat :: NatAlgebra) =
@structure struct TF{nat.Eltype}
e = nat.zero
end
word_algebra =
@structure struct NatAlgebra
Eltype = Functor
zero = TFZero
succ(T1) =
function (N::NatAlgebra)
@structure struct TF{N.Eltype}
e = N.succ(T1(N).e)
end
end
end
@sig struct H
h :: Functor
end
word_algebra.succ(TFZero)(num_nat) |> println
HTFC(N::NatAlgebra) =
@structure struct H
h(T) = T(N).e
end
@open H HTFC(num_nat) begin
@test h(word_algebra.zero) == num_nat.zero
case(x::Functor) =
h(word_algebra.succ(x)) == num_nat.succ(h(x))
words = Functor[TFZero]
for i = 1:100
push!(words, word_algebra.succ(words[end]))
end
@test all(words) do x; case(x) end
end
end
|
{"hexsha": "51eee4efdaa26bb0178a58567f7b1bc465f18897", "size": 2678, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "JuliaTagBot/ParameterisedModule.jl", "max_stars_repo_head_hexsha": "5e8eb12915093479382db225760958204c8e7e8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-11-11T12:43:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-11T15:45:21.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "JuliaTagBot/ParameterisedModule.jl", "max_issues_repo_head_hexsha": "5e8eb12915093479382db225760958204c8e7e8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-02-08T16:08:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-13T23:01:32.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "JuliaTagBot/ParameterisedModule.jl", "max_forks_repo_head_hexsha": "5e8eb12915093479382db225760958204c8e7e8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:29:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-10T05:47:09.000Z", "avg_line_length": 17.6184210526, "max_line_length": 63, "alphanum_fraction": 0.5784167289, "num_tokens": 859}
|
{-# OPTIONS --prop #-}
{-# TERMINATING #-}
makeloop : {P : Prop} → P → P
makeloop p = makeloop p
postulate
A : Set
B C : A → Prop
record AB : Set where
no-eta-equality -- the problem goes away if this is left out
constructor _,_
field
a : A
b : B a
open AB public
-- -- Same problem if replacing the no-eta record by a datatype
-- data AB : Set where
-- _,_ : (a : A) → B a → AB
-- a : AB → A
-- a (x , y) = x
-- b : (z : AB) → B (a z)
-- b (x , y) = y
record ABC : Set where
constructor _,_
field
ab : AB
c : C (a ab) -- the problem goes away if this field is left out
open ABC public
f : AB → ABC
f ab = (a ab , makeloop (b ab)) , {!!}
postulate
P : ABC → Prop
g : (ab : AB) → P (f ab)
works : (ab : AB) → P (f ab)
works ab = g ab
loops : (ab : AB) → P (f ab)
loops ab = g _
-- WAS: Agda loops while typechecking @loops@
-- SHOULD: succeed (with unsolved metas)
|
{"hexsha": "a9c1a3b5aec68878487262818032e08d62bbb3f9", "size": 914, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/Fail/Issue4118.agda", "max_stars_repo_name": "shlevy/agda", "max_stars_repo_head_hexsha": "ed8ac6f4062ea8a20fa0f62d5db82d4e68278338", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1989, "max_stars_repo_stars_event_min_datetime": "2015-01-09T23:51:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:20:48.000Z", "max_issues_repo_path": "test/Fail/Issue4118.agda", "max_issues_repo_name": "shlevy/agda", "max_issues_repo_head_hexsha": "ed8ac6f4062ea8a20fa0f62d5db82d4e68278338", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4066, "max_issues_repo_issues_event_min_datetime": "2015-01-10T11:24:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:14:49.000Z", "max_forks_repo_path": "test/Fail/Issue4118.agda", "max_forks_repo_name": "Agda-zh/agda", "max_forks_repo_head_hexsha": "231d6ad8e77b67ff8c4b1cb35a6c31ccd988c3e9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 371, "max_forks_repo_forks_event_min_datetime": "2015-01-03T14:04:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T19:00:30.000Z", "avg_line_length": 17.9215686275, "max_line_length": 68, "alphanum_fraction": 0.5612691466, "num_tokens": 324}
|
# from typing import ?
import torch
import torch.nn as nn
from numpy import exp, sqrt
from numpy.random import normal
class VAE(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
# linear (size of input, 2d), size of input= max possible size i.e. largest mol
nn.Linear(input_size, d**2),
nn.ReLU(),
nn.Linear(d ** 2, d * 2)
)
self.decoder = nn.Sequential(
nn.Linear(d, d ** 2),
nn.ReLU(),
nn.Linear(d ** 2, input_size)
# would use sigmoid here if input was between 0 and 1
)
def reparameterise(self, mean_z, log_var_z):
if self.training:
# eps = normal(loc=0, scale=1, size=(len(graphs.nodes), self.latent_dim=2d))
# since variances only positive, computing log allows you to output full real range for encoder
eps = normal(0, 1, size=(len(input_nodes), latent_dims))
z = mean_z + eps * sqrt(exp(log_var_z))
return z
else:
return mean_z
def forward(self, x):
# reshape input into a vector, then reshape using view(-1, batchsize=2, d)
params_z = self.encoder(x.view(-1, input_size)).view(-1, 2, d)
mean_z = params_z[:, 0, :]
log_var_z = params_z[:, 1, :]
z = self.reparameterise(mean_z, log_var_z)
return self.decoder(z), mean_z, log_var_z
model = VAE().to(device)
# setting optimiser
learning_rate = 1e-3
optimiser = torch.optim.Adam(model.parameters(), lr = learning_rate)
# reconstruction + KL divergence losses summed over all elements
def loss_function(z_hat, x, mean_z, log_var_z):
# binary cross entropy between input and reconstruction
BCE = nn.functional.binary_cross_entropy(x_hat, x.view(-1, 784), reduction='sum')
# kl divergence: var is linear, - log var is logarithmic, mean is squared
KLD = 0.5 * torch.sum(exp(log_var_z) - log_var_z - 1 + mean_z**2)
return BCE + KLD
# training and testing the VAE
epochs = 5
codes = dict(mean=list(), log_var=list(), y=list())
for epoch in range(0, epochs+1):
# training
if epoch > 0:
model.train()
train_loss = 0
for x, _ in train_loader:
x = x.to(device)
# === forward ===
x_hat, mean, log_var = model(x)
loss = loss_function(x_hat, x, mean, log_var)
train_loss += loss.item()
# === backward ===
optimiser.zero_grad()
loss.backward()
optimiser.step()
# === log ===
print(f'====> Epoch: {epoch} Average loss: {train_loss / len(train_loader.dataset):.4f}')
# testing
means, log_vars, labels = list(), list(), list()
with torch.no_grad():
model.eval()
test_loss = 0
for x, y in test_loader:
x = x.to(device)
# === forward ===
x_hat, mean, log_var = model(x)
test_loss += loss_function(x_hat, x, mean, log_var).item()
# === log ===
means.append(mean.detach())
log_vars.append(log_var.detach())
labels.append(y.detach())
# === log ===
codes['mean'].append(torch.cat(means))
codes['log_var'].append(torch.cat(log_vars))
codes['y'].append(torch.cat(labels))
test_loss /= len(test_loader.dataset)
print(f'===> Test set loss: {test_loss:.4f}')
display_images(x, x_hat, 1, f'Epoch {epoch}')
# generating a few samples
N = 16
z = torch.randn((N, d)).to(device)
sample = model.decoder(z)
display_images(None, sample, N//4, count=True)
# Choose starting and ending point for the interpolation -> shows original and reconstructed
A, B = 1, 14
sample = model.decoder(torch.stack((mean[A].data, mean[B].data), 0))
display_images(None, torch.stack(((
x[A].data.view(-1),
x[B].data.view(-1),
sample.data[0],
sample.data[1]
)), 0))
|
{"hexsha": "33d97c280b59e3be525682e98fc1ab82f95f868e", "size": 3976, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/prototypes/vae.py", "max_stars_repo_name": "avishvj/3d-reactions", "max_stars_repo_head_hexsha": "8326fbecf2e8a0d0445508ae809dc61e3116d161", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-11T09:50:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T10:13:11.000Z", "max_issues_repo_path": "models/prototypes/vae.py", "max_issues_repo_name": "avishvj/3d-reactions", "max_issues_repo_head_hexsha": "8326fbecf2e8a0d0445508ae809dc61e3116d161", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/prototypes/vae.py", "max_forks_repo_name": "avishvj/3d-reactions", "max_forks_repo_head_hexsha": "8326fbecf2e8a0d0445508ae809dc61e3116d161", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6949152542, "max_line_length": 107, "alphanum_fraction": 0.583249497, "include": true, "reason": "from numpy", "num_tokens": 1020}
|
"""
Tensor Products of Crystals
Main entry points:
- :class:`~sage.combinat.crystals.tensor_product.TensorProductOfCrystals`
- :class:`~sage.combinat.crystals.tensor_product.CrystalOfTableaux`
AUTHORS:
- Anne Schilling, Nicolas Thiery (2007): Initial version
- Ben Salisbury, Travis Scrimshaw (2013): Refactored tensor products to handle
non-regular crystals and created new subclass to take advantage of
the regularity
- Travis Scrimshaw (2020): Added queer crystal
"""
#*****************************************************************************
# Copyright (C) 2007 Anne Schilling <anne at math.ucdavis.edu>
# Nicolas Thiery <nthiery at users.sf.net>
# 2020 Travis Scrimshaw <tcscrims at gmail.com>
# Ben Salisbury <salis1bt at cmich.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#****************************************************************************
import operator
from sage.misc.cachefunc import cached_method
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.global_options import GlobalOptions
from sage.categories.category import Category
from sage.categories.cartesian_product import cartesian_product
from sage.categories.classical_crystals import ClassicalCrystals
from sage.categories.regular_crystals import RegularCrystals
from sage.categories.sets_cat import Sets
from sage.combinat.root_system.cartan_type import CartanType, SuperCartanType_standard
from sage.combinat.partition import _Partitions
from .letters import CrystalOfLetters
from .spins import CrystalOfSpins, CrystalOfSpinsMinus, CrystalOfSpinsPlus
from sage.combinat.crystals.tensor_product_element import (TensorProductOfCrystalsElement,
TensorProductOfRegularCrystalsElement, CrystalOfTableauxElement,
TensorProductOfSuperCrystalsElement, TensorProductOfQueerSuperCrystalsElement)
from sage.misc.flatten import flatten
from sage.structure.element import get_coercion_model
from sage.rings.semirings.all import NN
from sage.arith.misc import integer_trunc as trunc
##############################################################################
# Support classes
##############################################################################
class CrystalOfWords(UniqueRepresentation, Parent):
"""
Auxiliary class to provide a call method to create tensor product elements.
This class is shared with several tensor product classes and is also used
in :class:`~sage.combinat.crystals.tensor_product.CrystalOfTableaux`
to allow tableaux of different tensor product structures in
column-reading (and hence different shapes) to be considered elements
in the same crystal.
"""
def _element_constructor_(self, *crystalElements):
"""
EXAMPLES::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C)
sage: T(1,1)
[1, 1]
sage: _.parent()
Full tensor product of the crystals [The crystal of letters for type ['A', 2], The crystal of letters for type ['A', 2]]
sage: T = crystals.TensorProduct(C,C,C,generators=[[C(2),C(1),C(1)]])
sage: T(C(2), C(1), C(1))
[2, 1, 1]
"""
return self.element_class(self, list(crystalElements))
class Element(TensorProductOfCrystalsElement):
pass
class TensorProductOfCrystals(CrystalOfWords):
r"""
Tensor product of crystals.
Given two crystals `B` and `B'` of the same Cartan type,
one can form the tensor product `B \otimes B^{\prime}`. As a set
`B \otimes B^{\prime}` is the Cartesian product
`B \times B^{\prime}`. The crystal operators `f_i` and
`e_i` act on `b \otimes b^{\prime} \in B \otimes B^{\prime}` as
follows:
.. MATH::
f_i(b \otimes b^{\prime}) = \begin{cases}
f_i(b) \otimes b^{\prime} & \text{if } \varepsilon_i(b) \geq
\varphi_i(b^{\prime}) \\
b \otimes f_i(b^{\prime}) & \text{otherwise}
\end{cases}
and
.. MATH::
e_i(b \otimes b') = \begin{cases}
e_i(b) \otimes b' & \text{if } \varepsilon_i(b) >
\varphi_i(b') \\ b \otimes e_i(b') & \text{otherwise.}
\end{cases}
We also define:
.. MATH::
\begin{aligned}
\varphi_i(b \otimes b') & = \max\left( \varphi_i(b),
\varphi_i(b') + \langle \alpha_i^{\vee}, \mathrm{wt}(b) \rangle
\right),
\\ \varepsilon_i(b \otimes b') & = \max\left( \varepsilon_i(b'),
\varepsilon_i(b) - \langle \alpha_i^{\vee}, \mathrm{wt}(b') \rangle
\right).
\end{aligned}
.. NOTE::
This is the opposite of Kashiwara's convention for tensor
products of crystals.
Since tensor products are associative `(\mathcal{B} \otimes \mathcal{C})
\otimes \mathcal{D} \cong \mathcal{B} \otimes (\mathcal{C} \otimes
\mathcal{D})` via the natural isomorphism `(b \otimes c) \otimes d \mapsto
b \otimes (c \otimes d)`, we can generalizing this to arbitrary tensor
products. Thus consider `B_N \otimes \cdots \otimes B_1`, where each
`B_k` is an abstract crystal. The underlying set of the tensor product is
`B_N \times \cdots \times B_1`, while the crystal structure is given
as follows. Let `I` be the index set, and fix some `i \in I` and `b_N
\otimes \cdots \otimes b_1 \in B_N \otimes \cdots \otimes B_1`. Define
.. MATH::
a_i(k) := \varepsilon_i(b_k) - \sum_{j=1}^{k-1} \langle
\alpha_i^{\vee}, \mathrm{wt}(b_j) \rangle.
Then
.. MATH::
\begin{aligned}
\mathrm{wt}(b_N \otimes \cdots \otimes b_1) &=
\mathrm{wt}(b_N) + \cdots + \mathrm{wt}(b_1),
\\ \varepsilon_i(b_N \otimes \cdots \otimes b_1) &= \max_{1 \leq k
\leq n}\left( \sum_{j=1}^k \varepsilon_i(b_j) - \sum_{j=1}^{k-1}
\varphi_i(b_j) \right)
\\ & = \max_{1 \leq k \leq N}\bigl( a_i(k) \bigr),
\\ \varphi_i(b_N \otimes \cdots \otimes b_1) &= \max_{1 \leq k \leq N}
\left( \varphi_i(b_N) + \sum_{j=k}^{N-1} \big( \varphi_i(b_j) -
\varepsilon_i(b_{j+1}) \big) \right)
\\ & = \max_{1 \leq k \leq N}\bigl( \lambda_i + a_i(k) \bigr)
\end{aligned}
where `\lambda_i = \langle \alpha_i^{\vee}, \mathrm{wt}(b_N \otimes \cdots
\otimes b_1) \rangle`. Then for `k = 1, \ldots, N` the action of the
Kashiwara operators is determined as follows.
- If `a_i(k) > a_i(j)` for `1 \leq j < k` and `a_i(k) \geq a_i(j)`
for `k < j \leq N`:
.. MATH::
e_i(b_N \otimes \cdots \otimes b_1) = b_N \otimes \cdots \otimes
e_i b_k \otimes \cdots \otimes b_1.
- If `a_i(k) \geq a_i(j)` for `1 \leq j < k` and `a_i(k) > a_i(j)`
for `k < j \leq N`:
.. MATH::
f_i(b_N \otimes \cdots \otimes b_1) = b_N \otimes \cdots \otimes
f_i b_k \otimes \cdots \otimes b_1.
Note that this is just recursively applying the definition of the tensor
product on two crystals. Recall that `\langle \alpha_i^{\vee},
\mathrm{wt}(b_j) \rangle = \varphi_i(b_j) - \varepsilon_i(b_j)` by the
definition of a crystal.
.. RUBRIC:: Regular crystals
Now if all crystals `B_k` are regular crystals, all `\varepsilon_i` and
`\varphi_i` are non-negative and we can
define tensor product by the *signature rule*. We start by writing a word
in `+` and `-` as follows:
.. MATH::
\underbrace{- \cdots -}_{\varphi_i(b_N) \text{ times}} \quad
\underbrace{+ \cdots +}_{\varepsilon_i(b_N) \text{ times}}
\quad \cdots \quad
\underbrace{- \cdots -}_{\varphi_i(b_1) \text{ times}} \quad
\underbrace{+ \cdots +}_{\varepsilon_i(b_1) \text{ times}},
and then canceling ordered pairs of `+-` until the word is in the reduced
form:
.. MATH::
\underbrace{- \cdots -}_{\varphi_i \text{ times}} \quad
\underbrace{+ \cdots +}_{\varepsilon_i \text{ times}}.
Here `e_i` acts on the factor corresponding to the leftmost `+` and `f_i`
on the factor corresponding to the rightmost `-`. If there is no `+` or
`-` respectively, then the result is `0` (``None``).
EXAMPLES:
We construct the type `A_2`-crystal generated by `2 \otimes 1 \otimes 1`::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C,C,generators=[[C(2),C(1),C(1)]])
It has `8` elements::
sage: T.list()
[[2, 1, 1], [2, 1, 2], [2, 1, 3], [3, 1, 3],
[3, 2, 3], [3, 1, 1], [3, 1, 2], [3, 2, 2]]
One can also check the Cartan type of the crystal::
sage: T.cartan_type()
['A', 2]
Other examples include crystals of tableaux (which internally are
represented as tensor products obtained by reading the tableaux
columnwise)::
sage: C = crystals.Tableaux(['A',3], shape=[1,1,0])
sage: D = crystals.Tableaux(['A',3], shape=[1,0,0])
sage: T = crystals.TensorProduct(C,D, generators=[[C(rows=[[1], [2]]), D(rows=[[1]])], [C(rows=[[2], [3]]), D(rows=[[1]])]])
sage: T.cardinality()
24
sage: TestSuite(T).run()
sage: T.module_generators
([[[1], [2]], [[1]]], [[[2], [3]], [[1]]])
sage: [x.weight() for x in T.module_generators]
[(2, 1, 0, 0), (1, 1, 1, 0)]
If no module generators are specified, we obtain the full tensor
product::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C)
sage: T.list()
[[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1], [3, 2], [3, 3]]
sage: T.cardinality()
9
For a tensor product of crystals without module generators, the
default implementation of ``module_generators`` contains all elements
in the tensor product of the crystals. If there is a subset of
elements in the tensor product that still generates the crystal,
this needs to be implemented for the specific crystal separately::
sage: T.module_generators.list()
[[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1], [3, 2], [3, 3]]
For classical highest weight crystals, it is also possible to list
all highest weight elements::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C,C,generators=[[C(2),C(1),C(1)],[C(1),C(2),C(1)]])
sage: T.highest_weight_vectors()
([2, 1, 1], [1, 2, 1])
Examples with non-regular and infinite crystals (these did not work
before :trac:`14402`)::
sage: B = crystals.infinity.Tableaux(['D',10])
sage: T = crystals.TensorProduct(B,B)
sage: T
Full tensor product of the crystals
[The infinity crystal of tableaux of type ['D', 10],
The infinity crystal of tableaux of type ['D', 10]]
sage: B = crystals.infinity.GeneralizedYoungWalls(15)
sage: T = crystals.TensorProduct(B,B,B)
sage: T
Full tensor product of the crystals
[Crystal of generalized Young walls of type ['A', 15, 1],
Crystal of generalized Young walls of type ['A', 15, 1],
Crystal of generalized Young walls of type ['A', 15, 1]]
sage: La = RootSystem(['A',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: B = crystals.GeneralizedYoungWalls(2,La[0]+La[1])
sage: C = crystals.GeneralizedYoungWalls(2,2*La[2])
sage: D = crystals.GeneralizedYoungWalls(2,3*La[0]+La[2])
sage: T = crystals.TensorProduct(B,C,D)
sage: T
Full tensor product of the crystals
[Highest weight crystal of generalized Young walls of Cartan type ['A', 2, 1] and highest weight Lambda[0] + Lambda[1],
Highest weight crystal of generalized Young walls of Cartan type ['A', 2, 1] and highest weight 2*Lambda[2],
Highest weight crystal of generalized Young walls of Cartan type ['A', 2, 1] and highest weight 3*Lambda[0] + Lambda[2]]
There is also a global option for setting the convention (by default Sage
uses anti-Kashiwara)::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C)
sage: elt = T(C(1), C(2)); elt
[1, 2]
sage: crystals.TensorProduct.options.convention = "Kashiwara"
sage: elt
[2, 1]
sage: crystals.TensorProduct.options._reset()
"""
@staticmethod
def __classcall_private__(cls, *crystals, **options):
"""
Create the correct parent object.
EXAMPLES::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C, C)
sage: T2 = crystals.TensorProduct(C, C, cartan_type=['A',2])
sage: T is T2
True
sage: T.category()
Category of tensor products of classical crystals
sage: T3 = crystals.TensorProduct(C, C, C)
sage: T3p = crystals.TensorProduct(T, C)
sage: T3 is T3p
True
sage: B1 = crystals.TensorProduct(T, C)
sage: B2 = crystals.TensorProduct(C, T)
sage: B3 = crystals.TensorProduct(C, C, C)
sage: B1 is B2 and B2 is B3
True
sage: B = crystals.infinity.Tableaux(['A',2])
sage: T = crystals.TensorProduct(B, B)
sage: T.category()
Category of infinite tensor products of highest weight crystals
TESTS:
Check that mismatched Cartan types raise an error::
sage: A2 = crystals.Letters(['A', 2])
sage: A3 = crystals.Letters(['A', 3])
sage: crystals.TensorProduct(A2, A3)
Traceback (most recent call last):
...
ValueError: all crystals must be of the same Cartan type
"""
crystals = tuple(crystals)
if "cartan_type" in options:
cartan_type = CartanType(options.pop("cartan_type"))
else:
if not crystals:
raise ValueError("you need to specify the Cartan type if the tensor product list is empty")
else:
cartan_type = crystals[0].cartan_type()
if any(c.cartan_type() != cartan_type for c in crystals):
raise ValueError("all crystals must be of the same Cartan type")
if "generators" in options:
generators = tuple(tuple(x) if isinstance(x, list) else x for x in options["generators"])
if all(c in RegularCrystals() for c in crystals):
return TensorProductOfRegularCrystalsWithGenerators(crystals, generators, cartan_type)
return TensorProductOfCrystalsWithGenerators(crystals, generators, cartan_type)
# Flatten out tensor products
tp = sum([B.crystals if isinstance(B, FullTensorProductOfCrystals) else (B,)
for B in crystals], ())
if all(c in RegularCrystals() for c in crystals):
return FullTensorProductOfRegularCrystals(tp, cartan_type=cartan_type)
return FullTensorProductOfCrystals(tp, cartan_type=cartan_type)
# add options to class
class options(GlobalOptions):
r"""
Sets the global options for tensor products of crystals. The default is to
use the anti-Kashiwara convention.
There are two conventions for how `e_i` and `f_i` act on tensor products,
and the difference between the two is the order of the tensor factors
are reversed. This affects both the input and output. See the example
below.
@OPTIONS@
.. NOTE::
Changing the ``convention`` also changes how the input is handled.
.. WARNING::
Internally, the crystals are always stored using the anti-Kashiwara
convention.
If no parameters are set, then the function returns a copy of the
options dictionary.
EXAMPLES::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C)
sage: elt = T(C(1), C(2)); elt
[1, 2]
sage: crystals.TensorProduct.options.convention = "Kashiwara"
sage: elt
[2, 1]
sage: T(C(1), C(2)) == elt
False
sage: T(C(2), C(1)) == elt
True
sage: crystals.TensorProduct.options._reset()
"""
NAME = 'TensorProductOfCrystals'
module = 'sage.combinat.crystals'
convention = dict(default="antiKashiwara",
description='Sets the convention used for displaying/inputting tensor product of crystals',
values=dict(antiKashiwara='use the anti-Kashiwara convention',
Kashiwara='use the Kashiwara convention'),
alias=dict(anti="antiKashiwara", opposite="antiKashiwara"),
case_sensitive=False)
def _element_constructor_(self, *crystalElements):
"""
EXAMPLES::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C)
sage: T(1,1)
[1, 1]
sage: _.parent()
Full tensor product of the crystals [The crystal of letters for type ['A', 2], The crystal of letters for type ['A', 2]]
sage: T = crystals.TensorProduct(C,C,C,generators=[[C(2),C(1),C(1)]])
sage: T(C(2), C(1), C(1))
[2, 1, 1]
"""
if self.options.convention == "Kashiwara":
crystalElements = reversed(crystalElements)
return self.element_class(self, list(crystalElements))
class TensorProductOfCrystalsWithGenerators(TensorProductOfCrystals):
"""
Tensor product of crystals with a generating set.
.. TODO::
Deprecate this class in favor of using
:meth:`~sage.categories.crystals.Crystals.ParentMethods.subcrystal`.
"""
def __init__(self, crystals, generators, cartan_type):
"""
EXAMPLES::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C,C,generators=[[C(2),C(1),C(1)]])
sage: TestSuite(T).run()
"""
assert isinstance(crystals, tuple)
assert isinstance(generators, tuple)
category = Category.meet([crystal.category() for crystal in crystals])
Parent.__init__(self, category = category)
self.crystals = crystals
self._cartan_type = cartan_type
self.module_generators = tuple([self(*x) for x in generators])
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: C = crystals.Letters(['A',2])
sage: crystals.TensorProduct(C,C,generators=[[C(2),C(1)]])
The tensor product of the crystals [The crystal of letters for type ['A', 2], The crystal of letters for type ['A', 2]]
"""
if self.options.convention == "Kashiwara":
st = repr(list(reversed(self.crystals)))
else:
st = repr(list(self.crystals))
return "The tensor product of the crystals {}".format(st)
class FullTensorProductOfCrystals(TensorProductOfCrystals):
"""
Full tensor product of crystals.
.. TODO::
Merge this into :class:`TensorProductOfCrystals`.
"""
def __init__(self, crystals, **options):
"""
TESTS::
sage: from sage.combinat.crystals.tensor_product import FullTensorProductOfCrystals
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C)
sage: isinstance(T, FullTensorProductOfCrystals)
True
sage: TestSuite(T).run()
"""
category = Category.meet([crystal.category() for crystal in crystals])
category = category.TensorProducts()
if any(c in Sets().Infinite() for c in crystals):
category = category.Infinite()
Parent.__init__(self, category=category)
self.crystals = crystals
if 'cartan_type' in options:
self._cartan_type = CartanType(options['cartan_type'])
else:
if not crystals:
raise ValueError("you need to specify the Cartan type if the tensor product list is empty")
else:
self._cartan_type = crystals[0].cartan_type()
self.cartesian_product = cartesian_product(self.crystals)
self.module_generators = self
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: C = crystals.Letters(['A',2])
sage: crystals.TensorProduct(C,C)
Full tensor product of the crystals [The crystal of letters for type ['A', 2], The crystal of letters for type ['A', 2]]
"""
if self.options.convention == "Kashiwara":
st = repr(list(reversed(self.crystals)))
else:
st = repr(list(self.crystals))
return "Full tensor product of the crystals {}".format(st)
# TODO: __iter__ and cardinality should be inherited from EnumeratedSets().CartesianProducts()
def __iter__(self):
"""
EXAMPLES::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C)
sage: list(T)
[[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1], [3, 2], [3, 3]]
sage: _[0].parent()
Full tensor product of the crystals [The crystal of letters for type ['A', 2], The crystal of letters for type ['A', 2]]
"""
for x in self.cartesian_product:
yield self(*x)
# list = CombinatorialClass._CombinatorialClass__list_from_iterator
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: C = crystals.Letters(['A',2])
sage: T = crystals.TensorProduct(C,C)
sage: T.cardinality()
9
"""
return self.cartesian_product.cardinality()
@cached_method
def weight_lattice_realization(self):
r"""
Return the weight lattice realization used to express weights.
The weight lattice realization is the common parent which all
weight lattice realizations of the crystals of ``self`` coerce
into.
EXAMPLES::
sage: B = crystals.elementary.B(['A',4], 2)
sage: B.weight_lattice_realization()
Root lattice of the Root system of type ['A', 4]
sage: T = crystals.infinity.Tableaux(['A',4])
sage: T.weight_lattice_realization()
Ambient space of the Root system of type ['A', 4]
sage: TP = crystals.TensorProduct(B, T)
sage: TP.weight_lattice_realization()
Ambient space of the Root system of type ['A', 4]
"""
cm = get_coercion_model()
return cm.common_parent(*[crystal.weight_lattice_realization()
for crystal in self.crystals])
class FullTensorProductOfRegularCrystals(FullTensorProductOfCrystals):
"""
Full tensor product of regular crystals.
"""
class Element(TensorProductOfRegularCrystalsElement):
pass
class TensorProductOfRegularCrystalsWithGenerators(TensorProductOfCrystalsWithGenerators):
"""
Tensor product of regular crystals with a generating set.
"""
class Element(TensorProductOfRegularCrystalsElement):
pass
class FullTensorProductOfSuperCrystals(FullTensorProductOfCrystals):
r"""
Tensor product of super crystals.
EXAMPLES::
sage: L = crystals.Letters(['A', [1,1]])
sage: T = tensor([L,L,L])
sage: T.cardinality()
64
"""
class Element(TensorProductOfSuperCrystalsElement):
pass
class QueerSuperCrystalsMixin(object):
"""
Mixin class with methods for a finite queer supercrystal.
"""
@cached_method
def index_set(self):
"""
Return the enlarged index set.
EXAMPLES::
sage: Q = crystals.Letters(['Q',3])
sage: T = tensor([Q,Q])
sage: T.index_set()
(-4, -3, -2, -1, 1, 2)
"""
n = self.cartan_type().n
return tuple(range(-2*n, 0)) + tuple(range(1, n+1))
@cached_method
def _long_element(self):
r"""
Return the long element in `S_n`.
This method is used in the construction of the crystal operators
`e_i` and `f_i`.
EXAMPLES::
sage: Q = crystals.Letters(['Q', 4])
sage: T = tensor([Q,Q,Q,Q])
sage: T._long_element()
(3, 2, 1, 3, 2, 3)
"""
from sage.combinat.permutation import Permutations
n = self.cartan_type().n
return tuple(Permutations(n+1).long_element().reduced_word())
class FullTensorProductOfQueerSuperCrystals(FullTensorProductOfCrystals, QueerSuperCrystalsMixin):
r"""
Tensor product of queer super crystals.
"""
class Element(TensorProductOfQueerSuperCrystalsElement):
pass
#########################################################
## Crystal of tableaux
class CrystalOfTableaux(CrystalOfWords):
r"""
A class for crystals of tableaux with integer valued shapes
INPUT:
- ``cartan_type`` -- a Cartan type
- ``shape`` -- a partition of length at most ``cartan_type.rank()``
- ``shapes`` -- a list of such partitions
This constructs a classical crystal with the given Cartan type and
highest weight(s) corresponding to the given shape(s).
If the type is `D_r`, the shape is permitted to have a negative
value in the `r`-th position. Thus if the shape equals `[s_1,\ldots,s_r]`,
then `s_r` may be negative but in any case `s_1 \geq \cdots \geq s_{r-1}
\geq |s_r|`. This crystal is related to that of shape
`[s_1,\ldots,|s_r|]` by the outer automorphism of `SO(2r)`.
If the type is `D_r` or `B_r`, the shape is permitted to be of
length `r` with all parts of half integer value. This corresponds
to having one spin column at the beginning of the tableau. If
several shapes are provided, they currently should all or none
have this property.
Crystals of tableaux are constructed using an embedding into
tensor products following Kashiwara and Nakashima [KN1994]_. Sage's tensor
product rule for crystals differs from that of Kashiwara and Nakashima
by reversing the order of the tensor factors. Sage produces the same
crystals of tableaux as Kashiwara and Nakashima. With Sage's convention,
the tensor product of crystals is the same as the monoid operation on
tableaux and hence the plactic monoid.
.. SEEALSO::
:mod:`sage.combinat.crystals.crystals` for general help on
crystals, and in particular plotting and `\LaTeX` output.
EXAMPLES:
We create the crystal of tableaux for type `A_2`, with
highest weight given by the partition `[2,1,1]`::
sage: T = crystals.Tableaux(['A',3], shape = [2,1,1])
Here is the list of its elements::
sage: T.list()
[[[1, 1], [2], [3]], [[1, 2], [2], [3]], [[1, 3], [2], [3]],
[[1, 4], [2], [3]], [[1, 4], [2], [4]], [[1, 4], [3], [4]],
[[2, 4], [3], [4]], [[1, 1], [2], [4]], [[1, 2], [2], [4]],
[[1, 3], [2], [4]], [[1, 3], [3], [4]], [[2, 3], [3], [4]],
[[1, 1], [3], [4]], [[1, 2], [3], [4]], [[2, 2], [3], [4]]]
Internally, a tableau of a given Cartan type is represented as a
tensor product of letters of the same type. The order in which the
tensor factors appear is by reading the columns of the tableaux
left to right, top to bottom (in French notation). As an example::
sage: T = crystals.Tableaux(['A',2], shape = [3,2])
sage: T.module_generators[0]
[[1, 1, 1], [2, 2]]
sage: list(T.module_generators[0])
[2, 1, 2, 1, 1]
To create a tableau, one can use::
sage: Tab = crystals.Tableaux(['A',3], shape = [2,2])
sage: Tab(rows=[[1,2],[3,4]])
[[1, 2], [3, 4]]
sage: Tab(columns=[[3,1],[4,2]])
[[1, 2], [3, 4]]
.. TODO::
FIXME:
- Do we want to specify the columns increasingly or
decreasingly? That is, should this be
``Tab(columns = [[1,3],[2,4]])``?
- Make this fully consistent with
:func:`~sage.combinat.tableau.Tableau`!
We illustrate the use of a shape with a negative last entry in
type `D`::
sage: T = crystals.Tableaux(['D',4],shape=[1,1,1,-1])
sage: T.cardinality()
35
sage: TestSuite(T).run()
We illustrate the construction of crystals of spin tableaux when
the partitions have half integer values in type `B` and `D`::
sage: T = crystals.Tableaux(['B',3],shape=[3/2,1/2,1/2]); T
The crystal of tableaux of type ['B', 3] and shape(s) [[3/2, 1/2, 1/2]]
sage: T.cardinality()
48
sage: T.module_generators
([+++, [[1]]],)
sage: TestSuite(T).run()
sage: T = crystals.Tableaux(['D',3],shape=[3/2,1/2,-1/2]); T
The crystal of tableaux of type ['D', 3] and shape(s) [[3/2, 1/2, -1/2]]
sage: T.cardinality()
20
sage: T.module_generators
([++-, [[1]]],)
sage: TestSuite(T).run()
We can also construct the tableaux for `\mathfrak{gl}(m|n)` as
given by [BKK2000]_::
sage: T = crystals.Tableaux(['A', [1,2]], shape=[4,2,1,1,1])
sage: T.cardinality()
1392
We can also construct the tableaux for `\mathfrak{q}(n)` as
given by [GJK+2014]_::
sage: T = crystals.Tableaux(['Q', 3], shape=[3,1])
sage: T.cardinality()
24
TESTS:
Base cases::
sage: T = crystals.Tableaux(['A',2], shape = [])
sage: T.list()
[[]]
sage: TestSuite(T).run()
sage: T = crystals.Tableaux(['C',2], shape = [1])
sage: T.list()
[[[1]], [[2]], [[-2]], [[-1]]]
sage: TestSuite(T).run()
sage: T = crystals.Tableaux(['A',2], shapes = [[],[1],[2]])
sage: T.list()
[[], [[1]], [[2]], [[3]], [[1, 1]], [[1, 2]], [[2, 2]], [[1, 3]], [[2, 3]], [[3, 3]]]
sage: T.module_generators
([], [[1]], [[1, 1]])
sage: T = crystals.Tableaux(['B',2], shape=[3])
sage: T(rows=[[1,1,0]])
[[1, 1, 0]]
Input tests::
sage: T = crystals.Tableaux(['A',3], shape = [2,2])
sage: C = T.letters
sage: list(Tab(rows = [[1,2],[3,4]])) == [C(3),C(1),C(4),C(2)]
True
sage: list(Tab(columns = [[3,1],[4,2]])) == [C(3),C(1),C(4),C(2)]
True
For compatibility with
:func:`~sage.combinat.crystals.tensor_product.TensorProductOfCrystals` we
need to accept as input the internal list or sequence of elements::
sage: list(Tab(list = [3,1,4,2])) == [C(3),C(1),C(4),C(2)]
True
sage: list(Tab(3,1,4,2)) == [C(3),C(1),C(4),C(2)]
True
The next example checks whether a given tableau is in fact a valid
type `C` tableau or not::
sage: T = crystals.Tableaux(['C',3], shape = [2,2,2])
sage: Tab = T(rows=[[1,3],[2,-3],[3,-1]])
sage: Tab in T.list()
True
sage: Tab = T(rows=[[2,3],[3,-3],[-3,-2]])
sage: Tab in T.list()
False
Check that entries are weakly decreasing also in the spin case::
sage: crystals.Tableaux(['D',4], shape=[-1/2,1/2,1/2,-1/2])
Traceback (most recent call last):
...
ValueError: entries of each shape must be weakly decreasing
"""
@staticmethod
def __classcall_private__(cls, cartan_type, shapes = None, shape = None):
"""
Normalizes the input arguments to ensure unique representation,
and to delegate the construction of spin tableaux.
EXAMPLES::
sage: T1 = crystals.Tableaux(CartanType(['A',3]), shape = [2,2])
sage: T2 = crystals.Tableaux(['A',3], shape = (2,2))
sage: T3 = crystals.Tableaux(['A',3], shapes = ([2,2],))
sage: T2 is T1, T3 is T1
(True, True)
sage: T1 = crystals.Tableaux(['A', [1,1]], shape=[3,1,1,1])
sage: T1
Crystal of BKK tableaux of shape [3, 1, 1, 1] of gl(2|2)
sage: T2 = crystals.Tableaux(['A', [1,1]], [3,1,1,1])
sage: T1 is T2
True
"""
cartan_type = CartanType(cartan_type)
if cartan_type.letter == 'A' and isinstance(cartan_type, SuperCartanType_standard):
if shape is None:
shape = shapes
shape = _Partitions(shape)
from sage.combinat.crystals.bkk_crystals import CrystalOfBKKTableaux
return CrystalOfBKKTableaux(cartan_type, shape=shape)
if cartan_type.letter == 'Q':
if any(shape[i] == shape[i+1] for i in range(len(shape)-1)):
raise ValueError("not a strict partition")
shape = _Partitions(shape)
return CrystalOfQueerTableaux(cartan_type, shape=shape)
n = cartan_type.rank()
# standardize shape/shapes input into a tuple of tuples
# of length n, or n+1 in type A
assert operator.xor(shape is not None, shapes is not None)
if shape is not None:
shapes = (shape,)
if cartan_type.type() == "A":
n1 = n + 1
else:
n1 = n
if not all(all(i == 0 for i in shape[n1:]) for shape in shapes):
raise ValueError("shapes should all have length at most equal to the rank or the rank + 1 in type A")
spin_shapes = tuple((tuple(shape) + (0,)*(n1-len(shape)))[:n1] for shape in shapes)
try:
shapes = tuple(tuple(trunc(i) for i in shape) for shape in spin_shapes)
except Exception:
raise ValueError("shapes should all be partitions or half-integer partitions")
if spin_shapes == shapes:
shapes = tuple(_Partitions(shape) if shape[n1-1] in NN else shape for shape in shapes)
return super(CrystalOfTableaux, cls).__classcall__(cls, cartan_type, shapes)
# Handle the construction of a crystals of spin tableaux
# Caveat: this currently only supports all shapes being half
# integer partitions of length the rank for type B and D. In
# particular, for type D, the spins all have to be plus or all
# minus spins
if any(len(sh) != n for sh in shapes):
raise ValueError("the length of all half-integer partition shapes should be the rank")
if any(2*i % 2 != 1 for shape in spin_shapes for i in shape):
raise ValueError("shapes should be either all partitions or all half-integer partitions")
if any(any(i < j for i, j in zip(shape, shape[1:-1] + (abs(shape[-1]),))) for shape in spin_shapes):
raise ValueError("entries of each shape must be weakly decreasing")
if cartan_type.type() == 'D':
if all(i >= 0 for shape in spin_shapes for i in shape):
S = CrystalOfSpinsPlus(cartan_type)
elif all(shape[-1] < 0 for shape in spin_shapes):
S = CrystalOfSpinsMinus(cartan_type)
else:
raise ValueError("in type D spins should all be positive or negative")
else:
if any(i < 0 for shape in spin_shapes for i in shape):
raise ValueError("shapes should all be partitions")
S = CrystalOfSpins(cartan_type)
B = CrystalOfTableaux(cartan_type, shapes=shapes)
T = TensorProductOfCrystals(S, B, generators=[[S.module_generators[0],x] for x in B.module_generators])
T.rename("The crystal of tableaux of type %s and shape(s) %s"%(cartan_type, list(list(shape) for shape in spin_shapes)))
T.shapes = spin_shapes
return T
def __init__(self, cartan_type, shapes):
"""
Construct the crystal of all tableaux of the given shapes.
INPUT:
- ``cartan_type`` -- (data coercible into) a Cartan type
- ``shapes`` -- a list (or iterable) of shapes
- ``shape`` -- a shape
Shapes themselves are lists (or iterable) of integers.
EXAMPLES::
sage: T = crystals.Tableaux(['A',3], shape = [2,2])
sage: TestSuite(T).run()
"""
# super(CrystalOfTableaux, self).__init__(category = FiniteEnumeratedSets())
Parent.__init__(self, category = ClassicalCrystals())
self.letters = CrystalOfLetters(cartan_type)
self.shapes = shapes
self.module_generators = tuple(self.module_generator(la) for la in shapes)
self.rename("The crystal of tableaux of type %s and shape(s) %s"
% (cartan_type, list(list(shape) for shape in shapes)))
def cartan_type(self):
"""
Returns the Cartan type of the associated crystal
EXAMPLES::
sage: T = crystals.Tableaux(['A',3], shape = [2,2])
sage: T.cartan_type()
['A', 3]
"""
return self.letters.cartan_type()
def module_generator(self, shape):
"""
This yields the module generator (or highest weight element) of a classical
crystal of given shape. The module generator is the unique tableau with equal
shape and content.
EXAMPLES::
sage: T = crystals.Tableaux(['D',3], shape = [1,1])
sage: T.module_generator([1,1])
[[1], [2]]
sage: T = crystals.Tableaux(['D',4],shape=[2,2,2,-2])
sage: T.module_generator(tuple([2,2,2,-2]))
[[1, 1], [2, 2], [3, 3], [-4, -4]]
sage: T.cardinality()
294
sage: T = crystals.Tableaux(['D',4],shape=[2,2,2,2])
sage: T.module_generator(tuple([2,2,2,2]))
[[1, 1], [2, 2], [3, 3], [4, 4]]
sage: T.cardinality()
294
"""
type = self.cartan_type()
if type[0] == 'D' and len(shape) == type[1] and shape[type[1]-1] < 0:
invert = True
shape = shape[:-1] + (-shape[type[1]-1],)
else:
invert = False
p = _Partitions(shape).conjugate()
# The column canonical tableau, read by columns
module_generator = flatten([[val-i for i in range(val)] for val in p])
if invert:
module_generator = [(-x if x == type[1] else x) for x in module_generator]
return self(list=[self.letters(x) for x in module_generator])
def _element_constructor_(self, *args, **options):
"""
Return a
:class:`~sage.combinat.crystals.tensor_product.CrystalOfTableauxElement`.
EXAMPLES::
sage: T = crystals.Tableaux(['A',3], shape = [2,2])
sage: T(rows=[[1,2],[3,4]])
[[1, 2], [3, 4]]
sage: T(columns=[[3,1],[4,2]])
[[1, 2], [3, 4]]
"""
return self.element_class(self, *args, **options)
class Element(CrystalOfTableauxElement):
pass
class CrystalOfQueerTableaux(CrystalOfWords, QueerSuperCrystalsMixin):
"""
A queer crystal of the semistandard decomposition tableaux of a given shape.
INPUT:
- ``cartan_type`` -- a Cartan type
- ``shape`` -- a shape
"""
def __init__(self, cartan_type, shape):
"""
Initialize ``self``.
EXAMPLES::
sage: T = crystals.Tableaux(['Q',3], shape=[4,2])
sage: TestSuite(T).run()
sage: T = crystals.Tableaux(['Q',4], shape=[4,1])
sage: TestSuite(T).run() # long time
"""
from sage.categories.regular_supercrystals import RegularSuperCrystals
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
Parent.__init__(self, category=(RegularSuperCrystals(), FiniteEnumeratedSets()))
self.shape = shape
self._cartan_type = cartan_type
self.letters = CrystalOfLetters(cartan_type)
n = cartan_type.rank() + 1
data = sum(([self.letters(n-i)] * row_len for i,row_len in enumerate(shape)), [])
mg = self.element_class(self, list=data)
self.module_generators = (mg,)
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: crystals.Tableaux(['Q',3], shape=[4,2])
The crystal of tableaux of type ['Q', 3] and shape [4, 2]
"""
return "The crystal of tableaux of type {} and shape {}".format(self._cartan_type, self.shape)
class Element(TensorProductOfQueerSuperCrystalsElement):
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['Q',3], shape=[3,2,1])
sage: B.an_element()
[[3, 3, 3], [2, 2], [1]]
"""
return repr([list(reversed(row)) for row in self.rows()])
def _ascii_art_(self):
r"""
Return an ASCII art representation of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['Q',3], shape=[3,2,1])
sage: t = B.an_element()
sage: t._ascii_art_()
3 3 3
2 2
1
"""
from sage.typeset.ascii_art import AsciiArt
ret = [" "*(3*i) + "".join("%3s" % str(x) for x in reversed(row))
for i, row in enumerate(self.rows())]
return AsciiArt(ret)
def _latex_(self):
r"""
Return latex code for ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['Q',3], shape=[3,2,1])
sage: t = B.an_element()
sage: latex(t)
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{3}c}\cline{1-3}
\lr{3}&\lr{3}&\lr{3}\\\cline{1-3}
&\lr{2}&\lr{2}\\\cline{2-3}
&&\lr{1}\\\cline{3-3}
\end{array}$}
}
"""
from sage.combinat.output import tex_from_array
return tex_from_array([[None]*i + list(reversed(row))
for i, row in enumerate(self.rows())])
def rows(self):
"""
Return the list of rows of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['Q',3], shape=[3,2,1])
sage: t = B.an_element()
sage: t.rows()
[[3, 3, 3], [2, 2], [1]]
"""
ret = []
pos = 0
for l in self.parent().shape:
ret.append(self[pos:pos+l])
pos += l
return ret
|
{"hexsha": "7cacfeb335bf5999c026802106511ca8e50523f8", "size": 43529, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sage/combinat/crystals/tensor_product.py", "max_stars_repo_name": "haiyashah/sage", "max_stars_repo_head_hexsha": "55a711e3d6251f2ff4f3bcccc4c6a8b7a2a8d1b2", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sage/combinat/crystals/tensor_product.py", "max_issues_repo_name": "haiyashah/sage", "max_issues_repo_head_hexsha": "55a711e3d6251f2ff4f3bcccc4c6a8b7a2a8d1b2", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sage/combinat/crystals/tensor_product.py", "max_forks_repo_name": "haiyashah/sage", "max_forks_repo_head_hexsha": "55a711e3d6251f2ff4f3bcccc4c6a8b7a2a8d1b2", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8184187663, "max_line_length": 132, "alphanum_fraction": 0.5756622022, "include": true, "reason": "from sage", "num_tokens": 11707}
|
(* Useful properties of our Simple.v specification *)
Require Import Simple.
(* Dominates is transitive *)
Theorem dom_trans {D : Domain} :
forall {s1 s2 s3},
Dominates s1 s2 -> Dominates s2 s3 -> Dominates s1 s3.
(* Break apart our Dominates arguments *)
intros. destruct H. destruct H0. refine (conj _ _).
(* Compose their parts *)
intuition. intuition.
Qed.
(* NoWorse is reflexive *)
Theorem no_worse_refl {D : Domain} :
forall {s}, NoWorse s s.
intro. exact (or_intror eq_refl).
Qed.
(* NoWorse is transitive *)
Theorem no_worse_trans {D : Domain} :
forall {s1 s2 s3},
NoWorse s1 s2 -> NoWorse s2 s3 -> NoWorse s1 s3.
(* Destruct our NoWorse arguments and solve each case *)
intros. destruct H. destruct H0.
(* Dominates s1 s2 /\ Dominates s2 s3 *)
exact (or_introl (dom_trans H H0)).
(* Dominates s1 s2 /\ s2 = s3 *)
rewrite <- H0. exact (or_introl H).
(* s1 = s2 /\ NoWorse s2 s3 *)
rewrite H. exact H0.
Qed.
(* The next Solver in a NoWorseStream is NoWorse than the previous *)
Fixpoint nws_no_worse {D : Domain} {s}
n (nws : NoWorseStream s)
: NoWorse (get_solver (S n) nws)
(get_solver n nws)
:= match n, nws with
| 0, nwsCons _ _ p _ => p
| S n', nwsCons _ _ _ nws' => nws_no_worse n' nws'
end.
(* All Solvers in a NoWorseStream s are NoWorse than s *)
Theorem get_solver_no_worse {D : Domain} :
forall s n (nws : NoWorseStream s),
NoWorse (get_solver n nws)
s.
intros. induction n. destruct nws. simpl.
exact (no_worse_refl).
assert (no_worse_next := nws_no_worse n nws).
apply (no_worse_trans no_worse_next IHn).
Qed.
|
{"author": "Warbo", "repo": "powerplay", "sha": "8792220032f8a277b775d52e46225ab58ddb6928", "save_path": "github-repos/coq/Warbo-powerplay", "path": "github-repos/coq/Warbo-powerplay/powerplay-8792220032f8a277b775d52e46225ab58ddb6928/SimpleTests.v"}
|
using AxisArrays
using AxisKeys
using CSV
using Combinatorics
using DataFrames
using Dates
using Distances
using Documenter
using HypothesisTests
using LinearAlgebra
using Random
using Statistics
using StatsBase
using TableOperations
using Tables
using Test
using Impute
using Impute:
Impute,
Imputor,
Chain,
DropObs,
DropVars,
Interpolate,
Fill,
KNN,
LOCF,
NOCB,
Replace,
SRS,
DeclareMissings,
Substitute,
WeightedSubstitute,
SVD,
Filter,
Threshold,
WeightedThreshold,
ThresholdError,
apply,
apply!,
impute,
impute!,
interp,
run,
threshold,
wthreshold,
validate
@testset "Impute" begin
include("testutils.jl")
include("validators.jl")
include("declaremissings.jl")
include("chain.jl")
include("data.jl")
include("deprecated.jl")
include("filter.jl")
include("imputors/interp.jl")
include("imputors/knn.jl")
include("imputors/locf.jl")
include("imputors/nocb.jl")
include("imputors/replace.jl")
include("imputors/srs.jl")
include("imputors/substitute.jl")
include("imputors/svd.jl")
include("utils.jl")
# Start running doctests before we wrap up technical changes and work
# on more documentation
# The doctests fail on x86, so only run them on 64-bit hardware & Julia 1.6
Sys.WORD_SIZE == 64 && v"1.6" <= VERSION < v"1.7" && doctest(Impute)
end
|
{"hexsha": "536ae24551569c03b2c1f3b90d31859a2a875cd2", "size": 1447, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "pitmonticone/Impute.jl", "max_stars_repo_head_hexsha": "bd2e1f2c62a7b9d29cf25cb0bd2d5290cc569d07", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2017-05-18T22:52:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T17:09:14.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "pitmonticone/Impute.jl", "max_issues_repo_head_hexsha": "bd2e1f2c62a7b9d29cf25cb0bd2d5290cc569d07", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 103, "max_issues_repo_issues_event_min_datetime": "2017-04-11T23:08:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-23T00:04:56.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "pitmonticone/Impute.jl", "max_forks_repo_head_hexsha": "bd2e1f2c62a7b9d29cf25cb0bd2d5290cc569d07", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2017-12-21T17:12:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-12T07:20:56.000Z", "avg_line_length": 19.2933333333, "max_line_length": 79, "alphanum_fraction": 0.6710435384, "num_tokens": 419}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 23 08:16:00 2015
@author: marc
"""
import numpy as np
import matplotlib.pyplot as plt
from dgp import DGP
from dgp import BCM
from dgp import GP
from dgp import rBCM
from dgp import gPoE
from dgp.utils import tictoc
N = 1000 # no of training inputs
d = 1 # no of input dimensions
np.random.seed(1)
# training data
# training data
X = np.random.uniform(-4,4,(N,d))
y = np.sin(X.sum(axis=1)) + np.random.normal(0,0.1,N)
# test data
Xp = np.linspace(-8,8,200).reshape(-1,1)
branching_factor = 12
depth = 1
numExperts = branching_factor**depth
# repX stands for X-fold repetition of data points. This implements shared data
# points across experts. "X" is the number of times a data point is "replicated"
profile=[(branching_factor,'simple','rep1')]*depth
dgp0 = rBCM.rBCM(X,y,profile=profile, pool='default')
print 'training DGP...'
tictoc.tic()
dgp0.train()
tictoc.toc()
print 'NLML after training',
print dgp0.NLML()
# prediction means and variances
print 'Predicting...'
# rBCM
dgp0.correction = True
if hasattr(dgp0, 'beta'):
del dgp0.beta
meanPred_rbcm, varPred_rbcm= dgp0.predict(Xp,latent_variance=True)
varPred_rbcm += np.exp(dgp0.params[-1])
# gPoE
dgp0.correction = False # no BCM-type correction (prior GP)
dgp0.beta = 1./numExperts # fix beta values
meanPred_gpoe, varPred_gpoe= dgp0.predict(Xp,latent_variance=True)
varPred_gpoe += np.exp(dgp0.params[-1])
# PoE
dgp0.correction = False # no BCM-type correction (prior GP)
dgp0.beta = 1.0 # fix beta values
meanPred_poe, varPred_poe= dgp0.predict(Xp,latent_variance=True)
varPred_poe += np.exp(dgp0.params[-1])
# BCM
dgp0.correction = True
dgp0.beta = 1.0 # fix beta values
meanPred_bcm, varPred_bcm= dgp0.predict(Xp,latent_variance=True)
varPred_bcm += np.exp(dgp0.params[-1])
if d == 1:
# rBCM
plt.scatter(X,y)
plt.plot(Xp,meanPred_rbcm,color='r')
plt.plot(Xp,meanPred_rbcm+2.0*np.sqrt(varPred_rbcm),color='r')
plt.plot(Xp,meanPred_rbcm-2.0*np.sqrt(varPred_rbcm),color='r')
plt.show()
# PoE
plt.scatter(X,y)
plt.plot(Xp,meanPred_poe,color='b')
plt.plot(Xp,meanPred_poe+2.0*np.sqrt(varPred_poe),color='b')
plt.plot(Xp,meanPred_poe-2.0*np.sqrt(varPred_poe),color='b')
plt.show()
# gPoE
plt.scatter(X,y)
plt.plot(Xp,meanPred_gpoe,color='g')
plt.plot(Xp,meanPred_gpoe+2.0*np.sqrt(varPred_gpoe),color='g')
plt.plot(Xp,meanPred_gpoe-2.0*np.sqrt(varPred_gpoe),color='g')
plt.show()
# BCM
plt.scatter(X,y)
plt.plot(Xp,meanPred_bcm,color='m')
plt.plot(Xp,meanPred_bcm+2.0*np.sqrt(varPred_bcm),color='m')
plt.plot(Xp,meanPred_bcm-2.0*np.sqrt(varPred_bcm),color='m')
plt.show()
|
{"hexsha": "4f0211ca853f99215663d99cae741c433df6a1d7", "size": 2715, "ext": "py", "lang": "Python", "max_stars_repo_path": "dgp/dgp/tests/test_rBCM.py", "max_stars_repo_name": "nick-terry/Splitting-GP", "max_stars_repo_head_hexsha": "efd886f6442f096833460cf8cd28ff3e18da732a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-24T09:10:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T09:10:03.000Z", "max_issues_repo_path": "dgp/dgp/tests/test_rBCM.py", "max_issues_repo_name": "nick-terry/Splitting-GP", "max_issues_repo_head_hexsha": "efd886f6442f096833460cf8cd28ff3e18da732a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dgp/dgp/tests/test_rBCM.py", "max_forks_repo_name": "nick-terry/Splitting-GP", "max_forks_repo_head_hexsha": "efd886f6442f096833460cf8cd28ff3e18da732a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3738317757, "max_line_length": 81, "alphanum_fraction": 0.6990791897, "include": true, "reason": "import numpy", "num_tokens": 912}
|
# -*- coding: utf-8 -*-
"""
All K-Nearest Neighbors
"""
# Author: Dayvid Victor <victor.dvro@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.validation import check_X_y
from ..base import InstanceReductionMixin
from protopy.selection.enn import ENN
class AllKNN(InstanceReductionMixin):
"""All K-Nearest Neighbors.
The All KNN removes the instances in the boundaries, maintaining
redudant samples. Creating a much more smooth decision region.
It is similar to the Repeated-Edited Nearest Neighbors, but it has
a different approach.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of limit neighbors to use by default for :meth:`k_neighbors` queries.
Attributes
----------
`X_` : array-like, shape = [indeterminated, n_features]
Selected prototypes.
`y_` : array-like, shape = [indeterminated]
Labels of the selected prototypes.
`reduction_` : float, percentual of reduction.
Examples
--------
>>> from protopy.selection.allknn import AllKNN
>>> import numpy as np
>>> X = np.array([[-1, 0], [-0.8, 1], [-0.8, -1], [-0.5, 0] , [0.5, 0], [1, 0], [0.8, 1], [0.8, -1]])
>>> y = np.array([1, 1, 1, 2, 1, 2, 2, 2])
>>> all_kneigh = AllKNN()
>>> all_kneigh.fit(X, y)
AllKNN(n_neighbors=3)
>>> print(all_kneigh.predict([[-0.6, 0.6]]))
[1]
>>> print all_kneigh.reduction_
0.625
See also
--------
protopy.selection.enn.ENN: edited nearest neighbor
protopy.selection.renn.RENN: repeated edited nearest neighbor
References
----------
I. Tomek. An experiment with the edited nearest-neighbor rule.
IEEE Transactions on Systems, Man, and Cybernetics, 6(6):448–452, 1976.
"""
def __init__(self, n_neighbors= 5):
self.n_neighbors = n_neighbors
self.classifier = None
def reduce_data(self, X, y):
X, y = check_X_y(X, y, accept_sparse="csr")
classes = np.unique(y)
self.classes_ = classes
edited_nn = ENN(n_neighbors = 1)
p_, l_, r_ = X, y, 1.0
for k in range(1, self.n_neighbors + 1):
if l_.shape[0] > k + 1:
edited_nn.n_neighbors = k
edited_nn.fit(p_, l_)
p_ = edited_nn.X_
l_ = edited_nn.y_
r_ = edited_nn.reduction_
self.X_ = p_
self.y_ = l_
self.reduction_ = 1.0 - float(l_.shape[0]) / y.shape[0]
return self.X_, self.y_
|
{"hexsha": "83aaf58677957706e3e3dc8d0986053d0edf79bd", "size": 2560, "ext": "py", "lang": "Python", "max_stars_repo_path": "protopy/selection/allknn.py", "max_stars_repo_name": "mjasher/scikit-protopy", "max_stars_repo_head_hexsha": "f4deddc42c5883b527d7bb1bfc6d0ece7d01979d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2015-01-27T12:30:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-24T22:11:31.000Z", "max_issues_repo_path": "protopy/selection/allknn.py", "max_issues_repo_name": "mjasher/scikit-protopy", "max_issues_repo_head_hexsha": "f4deddc42c5883b527d7bb1bfc6d0ece7d01979d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "protopy/selection/allknn.py", "max_forks_repo_name": "mjasher/scikit-protopy", "max_forks_repo_head_hexsha": "f4deddc42c5883b527d7bb1bfc6d0ece7d01979d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2015-07-07T18:13:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-22T16:42:31.000Z", "avg_line_length": 26.9473684211, "max_line_length": 105, "alphanum_fraction": 0.59140625, "include": true, "reason": "import numpy", "num_tokens": 742}
|
Require Import Lia.
Require Export smpl.Smpl.
Require Import Undecidability.Shared.Libs.PSL.FiniteTypes.BasicDefinitions.
From Complexity.Libs Require Export PSLCompat.
From Complexity.Libs.CookPrelim Require Import MorePrelim.
(** * Representation of finite types by natural numbers *)
(** This is needed as working with the direct extraction of finite types to L is not pleasant *)
(** A finite type is represented by the number of its elements *)
Definition finRepr (X : finType) (n : nat) := n = |elem X|.
(** We define what it means for a number to be of a flat type *)
Definition ofFlatType (k : nat) (e : nat) := e < k.
(** We just enumerate the elements starting at 0 *)
Definition finReprEl (X : finType) (n : nat) k (x : X) := finRepr X n /\ index x = k.
(** A weaker version that does not explicitly enforce x to have a flat type *)
Definition finReprEl' (X : finType) (k : nat) (x : X) := index x = k.
Lemma finReprEl_finReprEl' (X : finType) (n k : nat) (x : X) : finReprEl n k x -> finReprEl' k x.
Proof. unfold finReprEl, finReprEl'. easy. Qed.
Lemma finReprEl_ofFlatType (X : finType) (n k : nat) (x : X) : finReprEl n k x -> ofFlatType n k.
Proof.
intros [H1 H2].
unfold finRepr, ofFlatType in *.
rewrite H1, <- H2. apply index_le.
Qed.
(** For some of the proofs below, the stronger version of finReprEl is much more pleasant than the weaker version finReprEl' (e.g. for sum types)*)
(** flat type constructors *)
Definition flatOption (n : nat) := S n.
Definition flatProd (a b : nat) := a * b.
Definition flatSum (a b : nat) := a + b.
(** flat value constructors *)
Definition flatNone := 0.
Definition flatSome k := S k.
Definition flatInl (k : nat) := k.
Definition flatInr (a: nat) k := a + k.
Definition flatPair (a b : nat) x y := x * b + y.
Smpl Create finRepr.
Ltac finRepr_simpl := smpl finRepr; repeat smpl finRepr.
Smpl Add (match goal with |- finReprEl' _ _ => eapply finReprEl_finReprEl' end) : finRepr.
Lemma finReprOption (X : finType) (n : nat) : finRepr X n -> finRepr (finType_CS (option X)) (flatOption n).
Proof.
intros. unfold finRepr in *. unfold flatOption; cbn -[enum]. rewrite H; cbn.
rewrite map_length. reflexivity.
Qed.
Smpl Add (apply finReprOption) : finRepr.
Lemma finReprElSome (X : finType) n k x : finReprEl n k x -> @finReprEl (finType_CS (option X)) (flatOption n) (flatSome k) (Some x).
Proof.
intros (H1 & H2). split;cbn in *.
- now apply finReprOption.
- rewrite getPosition_map. 2: unfold injective; congruence. now rewrite <- H2.
Qed.
Lemma finReprElNone (X : finType) n : finRepr X n -> @finReprEl (finType_CS (option X)) (flatOption n) flatNone None.
Proof.
intros. split; cbn.
- now apply finReprOption.
- now unfold flatNone.
Qed.
Ltac finReprOption :=
lazymatch goal with
| |- finReprEl _ _ (Some _) => apply finReprElSome
| |- finReprEl _ _ None => apply finReprElNone
| |- finRepr (finType_CS (option _)) _ => apply finReprOption
end.
Smpl Add 99 finReprOption : finRepr.
Lemma finReprSum (A B: finType) (a b : nat) : finRepr A a -> finRepr B b -> finRepr (finType_CS (sum A B)) (flatSum a b).
Proof.
intros. unfold finRepr in *. unfold flatSum; cbn in *.
rewrite app_length. rewrite H, H0.
unfold toSumList1, toSumList2. now rewrite !map_length.
Qed.
Smpl Add (apply finReprSum) : finRepr.
Lemma finReprElInl (A B : finType) (a b : nat) k x : finRepr B b -> finReprEl a k x -> @finReprEl (finType_CS (sum A B)) (flatSum a b) (flatInl k) (inl x).
Proof.
intros H0 (H1 & H2). split.
- now apply finReprSum.
- unfold finRepr in H1.
clear H0 H1. cbn. unfold toSumList1, toSumList2, flatInl.
rewrite getPosition_app1 with (k := k).
+ reflexivity.
+ rewrite map_length, <- H2. apply index_le.
+ unfold index in H2. rewrite <- getPosition_map with (f := (@inl A B)) in H2. 2: now unfold injective.
easy.
Qed.
Lemma finReprElInr (A B : finType) (a b : nat) k x : finRepr A a -> finReprEl b k x -> @finReprEl (finType_CS (sum A B)) (flatSum a b) (flatInr a k) (inr x).
Proof.
intros H0 (H1 & H2). split.
- now apply finReprSum.
- clear H1. cbn. unfold toSumList1, toSumList2, flatInr.
rewrite getPosition_app2 with (k := k).
+ rewrite map_length. unfold finRepr in H0. now cbn.
+ rewrite map_length, <- H2. apply index_le.
+ intros H1. apply in_map_iff in H1. destruct H1 as (? & ? &?); congruence.
+ unfold index in H2. rewrite <- getPosition_map with (f := (@inr A B)) in H2. 2: now unfold injective.
easy.
Qed.
Ltac finReprSum :=
lazymatch goal with
| |- finReprEl _ _ (inl _) => apply finReprElInl
| |- finReprEl _ _ (inr _) => apply finReprElInr
| |- finRepr (finType_CS (sum _ _)) _ => apply finReprSum
end.
Smpl Add 99 finReprSum : finRepr.
Lemma finReprProd (A B : finType) (a b : nat) : finRepr A a -> finRepr B b -> finRepr (finType_CS (prod A B)) (flatProd a b).
Proof.
intros. unfold finRepr in *. unfold flatProd.
cbn. now rewrite prod_length.
Qed.
Smpl Add (apply finReprProd) : finRepr.
Lemma finReprElPair (A B : finType) (a b : nat) k1 k2 x1 x2 : finReprEl a k1 x1 -> finReprEl b k2 x2 -> @finReprEl (finType_CS (prod A B)) (flatProd a b) (flatPair a b k1 k2) (pair x1 x2).
Proof.
intros (H1 & H2) (F1 & F2). split.
- now apply finReprProd.
- cbn. unfold flatPair. unfold finRepr in *.
rewrite getPosition_prodLists with (k1 := k1) (k2 := k2); eauto.
+ rewrite <- H2; apply index_le.
+ rewrite <- F2; apply index_le.
Qed.
Ltac finReprProd :=
lazymatch goal with
| |- finReprEl _ _ (pair _ _) => apply finReprElPair
| |- finRepr (finType_CS (prod _ _)) _ => apply finReprProd
end.
Smpl Add 99 finReprProd : finRepr.
(** flattened lists *)
Definition isFlatListOf (X : finType) (l : list nat) (l' : list X) := l = map index l'.
Lemma isFlatListOf_cons (X : finType) (A : X) a l L: isFlatListOf (a :: l) (A :: L) <-> finReprEl' a A /\ isFlatListOf l L.
Proof.
unfold isFlatListOf in *. cbn. split; intros.
- inv H. easy.
- destruct H as (-> & ->). easy.
Qed.
Lemma isFlatListOf_app (X : finType) (L1 L2 : list X) l1 l2 : isFlatListOf l1 L1 -> isFlatListOf l2 L2 -> isFlatListOf (l1 ++ l2) (L1 ++ L2).
Proof.
revert L1. induction l1; intros.
- unfold isFlatListOf in H; destruct L1; [easy | cbn in *; congruence ].
- destruct L1; [ unfold isFlatListOf in H; cbn in H; congruence | ].
apply isFlatListOf_cons in H as (H1 & H2). cbn.
apply isFlatListOf_cons; split; [ apply H1 | apply IHl1; easy].
Qed.
Lemma isFlatListOf_functional (X: finType) (L1 L2 : list X) (l : list nat) :
isFlatListOf l L1 -> isFlatListOf l L2 -> L1 = L2.
Proof.
unfold isFlatListOf. intros. rewrite H0 in H. apply map_injective in H; [easy | ].
intros a b H2. now apply injective_index, H2.
Qed.
Lemma isFlatListOf_injective (X : finType) (L : list X) (l1 l2 : list nat) :
isFlatListOf l1 L -> isFlatListOf l2 L -> l1 = l2.
Proof.
unfold isFlatListOf. intros. easy.
Qed.
Lemma isFlatListOf_Some1 (T : finType) (T' : nat) (a : list nat) (b : list T) (n : nat) (x : nat):
finRepr T T' -> isFlatListOf a b -> nth_error a n = Some x -> exists x', nth_error b n = Some x' /\ finReprEl T' x x'.
Proof.
intros. rewrite H0 in H1. rewrite nth_error_map in H1.
destruct (nth_error b n); cbn in H1; [ | congruence ].
inv H1. exists e.
split; [reflexivity | repeat split]. apply H.
Qed.
Lemma isFlatListOf_incl1 (X : finType) (fin : list X) flat l:
isFlatListOf flat fin -> l <<= flat -> exists l', isFlatListOf (X := X) l l' /\ l' <<= fin.
Proof.
intros. revert fin H. induction l; cbn in *; intros.
- exists []; split; eauto. unfold isFlatListOf. now cbn.
- apply incl_cons_inv in H0 as (H0 & H1).
apply IHl with (fin := fin) in H1 as (l' & H2 & H3).
2: apply H.
rewrite H in H0. apply in_map_iff in H0 as (a' & H4 & H5).
exists (a' :: l'). split.
+ unfold isFlatListOf. cbn. now rewrite <- H4, H2.
+ cbn. intros ? [-> | H6]; eauto.
Qed.
Lemma isFlatListOf_incl2 (X : finType) (fin : list X) flat l':
isFlatListOf flat fin -> l' <<= fin -> exists l, isFlatListOf (X := X) l l' /\ l <<= flat.
Proof.
intros.
exists (map index l'). split.
- reflexivity.
- induction l'; cbn.
+ eauto.
+ apply incl_cons_inv in H0 as (H0 & H1).
apply IHl' in H1. intros ? [<- | H2].
* rewrite H. apply in_map_iff; eauto.
* now apply H1.
Qed.
Lemma seq_isFlatListOf (X : finType) : isFlatListOf (seq 0 (|elem X|)) (elem X).
Proof.
unfold isFlatListOf. unfold index. rewrite dupfree_map_getPosition.
2: apply dupfree_elements.
now change (fun x => getPosition (elem X) x) with (getPosition (elem X)).
Qed.
Lemma repEl_isFlatListOf (X : finType) a (A : X) n : finReprEl' a A -> isFlatListOf (repeat a n) (repeat A n).
Proof.
induction n; cbn; intros; [ easy | now apply isFlatListOf_cons].
Qed.
(** lists that only contain elements which belong to the flat representation of a finite type *)
Definition list_ofFlatType (k : nat) (l : list nat) := forall a, a el l -> ofFlatType k a.
Lemma isFlatListOf_list_ofFlatType (X : finType) (L : list X) l : isFlatListOf l L -> list_ofFlatType (|elem X|) l.
Proof.
intros. unfold list_ofFlatType. rewrite H. intros a (a' & <- & H1)%in_map_iff.
unfold ofFlatType. apply index_le.
Qed.
Lemma list_ofFlatType_app (l1 l2 : list nat) (k : nat) : list_ofFlatType k (l1 ++ l2) <-> list_ofFlatType k l1 /\ list_ofFlatType k l2.
Proof.
split; intros; unfold list_ofFlatType in *.
- setoid_rewrite in_app_iff in H. split; intros; apply H; eauto.
- destruct H as (H1 & H2); setoid_rewrite in_app_iff; intros a [ | ]; eauto.
Qed.
Lemma list_ofFlatType_cons x y k : list_ofFlatType k (x :: y) <-> ofFlatType k x /\ list_ofFlatType k y.
Proof.
split; unfold list_ofFlatType; intros.
- split; [ apply H; eauto | intros; apply H; eauto].
- destruct H0 as [-> | H0].
+ apply (proj1 H).
+ apply (proj2 H), H0.
Qed.
Definition list_finReprEl' (f : finType) (l : list nat) (L : list f ) :=
(forall v, v el l -> exists v', v' el L /\ v = index v') /\ (forall v, v el L -> index v el l).
Lemma isFlatListOf_list_finReprEl' (f : finType) (l : list nat) (L : list f): isFlatListOf l L -> list_finReprEl' l L.
Proof.
unfold isFlatListOf, list_finReprEl'.
intros Hmap. split.
- intros v Hel. rewrite Hmap in Hel. apply in_map_iff in Hel as (v' & <- & Hel). eauto.
- intros v Hel. rewrite Hmap. apply in_map_iff. eauto.
Qed.
(** Given a representation of a finite type by natural numbers, we can restore the original elements *)
Lemma finRepr_exists (X : finType) (x : nat) (a : nat) :
finRepr X x -> ofFlatType x a -> sigT (fun (a' : X) => finReprEl x a a').
Proof.
intros. unfold ofFlatType in H0.
assert (sigT (fun a' =>nth_error (elem X) a = Some a')) as (a' & H2).
{
rewrite H in H0. apply nth_error_Some in H0. now destruct nth_error.
}
exists a'. split; [ easy | ].
unfold index. specialize (nth_error_nth H2) as <-.
apply getPosition_nth.
+ apply dupfree_elements.
+ eapply nth_error_Some_length, H2.
Qed.
Lemma finReprElP_exists (X : finType) n : ofFlatType (| elem X |) n -> { e:X | finReprEl' n e}.
Proof.
intros. unfold ofFlatType in H. apply nth_error_Some in H. destruct (nth_error (elem X) n) eqn:H1; [ | congruence ].
exists e. unfold finReprEl'. clear H.
specialize (nth_error_nth H1) as <-. apply getPosition_nth.
+ apply dupfree_elements.
+ eapply nth_error_Some_length, H1.
Defined. (* because sigma? *)
Lemma finRepr_exists_list (X : finType) (x : nat) (l : list nat) :
finRepr X x -> list_ofFlatType x l -> sigT (fun (L : list X) => isFlatListOf l L).
Proof.
revert x. induction l; intros.
- exists []. unfold isFlatListOf. now cbn.
- apply list_ofFlatType_cons in H0 as (H0 & (L & H1)%IHl). 2: apply H.
specialize (finRepr_exists H H0) as (a' & (_ & H2)).
exists (a' :: L). unfold isFlatListOf.
now rewrite H1, <- H2.
Defined. (* because sigma? *)
(** deciders for isValidFlattening*)
Definition ofFlatType_dec (b a : nat) := leb (S a) b.
Definition list_ofFlatType_dec (t : nat) (s : list nat) := forallb (ofFlatType_dec t) s.
Lemma leb_iff a b : leb a b = true <-> a <= b.
Proof.
split; intros.
- now apply leb_complete.
- now apply leb_correct.
Qed.
Lemma list_ofFlatType_dec_correct s t : list_ofFlatType_dec t s = true <-> list_ofFlatType t s.
Proof.
unfold list_ofFlatType_dec, list_ofFlatType. rewrite forallb_forall.
unfold ofFlatType_dec. setoid_rewrite leb_iff.
split; intros H; intros; now apply H.
Qed.
(** unflattening to Fin.t *)
Lemma unflattenString (f : list nat) k : list_ofFlatType k f -> {f' : list (finType_CS (Fin.t k)) & isFlatListOf f f'}.
Proof.
intros H.
eapply finRepr_exists_list with (X := finType_CS (Fin.t k)) in H as (a' & H).
2: { unfold finRepr. symmetry. apply Fin_cardinality. }
eauto.
Qed.
(** extraction *)
From Undecidability.L.Tactics Require Import LTactics GenEncode.
From Undecidability.L.Datatypes Require Import LProd LOptions LBool LSum.
From Complexity.Libs.CookPrelim Require Import PolyBounds.
From Undecidability.L.Functions Require Import EqBool.
Require Import Nat.
#[export]
Instance term_id (X : Type) `{encodable X}: computableTime' (@id X) (fun a _ => (1, tt)).
Proof.
extract. solverec.
Qed.
Definition c__flatPair := c__add1 + 2 + c__mult1.
Definition flatPair_time x b := mult_time x b + add_time (x * b) + c__flatPair.
#[export]
Instance term_flatPair : computableTime' flatPair (fun a _ => (1, fun b _ => (1, fun x _ => (1, fun y _ => (flatPair_time x b, tt))))).
Proof.
extract. solverec. unfold flatPair_time, c__flatPair; solverec.
Qed.
(*ofFlatTypeDec *)
Definition c__ofFlatTypeDec := c__leb2 + 2.
Definition ofFlatType_dec_time (sig e : nat) := leb_time (1 + e) sig + c__ofFlatTypeDec.
#[export]
Instance term_ofFlatType_dec : computableTime' ofFlatType_dec (fun sig _ => (1, fun e _ => (ofFlatType_dec_time sig e, tt))).
Proof.
extract. solverec. unfold ofFlatType_dec_time, c__ofFlatTypeDec. solverec.
Qed.
Definition c__ofFlatTypeDecBound := c__ofFlatTypeDec + c__leb.
Definition poly__ofFlatTypeDec n := (n +1) * c__ofFlatTypeDecBound.
Lemma ofFlatType_dec_time_bound sig e: ofFlatType_dec_time sig e <= poly__ofFlatTypeDec (size (enc sig)).
Proof.
unfold ofFlatType_dec_time. rewrite leb_time_bound_r. unfold poly__ofFlatTypeDec, c__ofFlatTypeDecBound; nia.
Qed.
Lemma ofFlatType_dec_poly : monotonic poly__ofFlatTypeDec /\ inOPoly poly__ofFlatTypeDec.
Proof.
split; unfold poly__ofFlatTypeDec; smpl_inO.
Qed.
(*list_ofFlatType_dec *)
Definition c__listOfFlatTypeDec := 3.
Definition list_ofFlatType_dec_time (sig : nat) (l : list nat) := forallb_time (fun x1 => ofFlatType_dec_time sig x1) l + c__listOfFlatTypeDec.
#[export]
Instance term_list_ofFlatType_dec : computableTime' list_ofFlatType_dec (fun sig _ => (1, fun l _ => (list_ofFlatType_dec_time sig l, tt))).
Proof.
extract. solverec. unfold list_ofFlatType_dec_time, c__listOfFlatTypeDec. solverec.
Qed.
Definition c__listOfFlatTypeDecBound := c__forallb + c__listOfFlatTypeDec.
Definition poly__listOfFlatTypeDec n := ((n+1) * (poly__ofFlatTypeDec n + c__listOfFlatTypeDecBound)).
Lemma list_ofFlatType_dec_time_bound t l : list_ofFlatType_dec_time t l <= poly__listOfFlatTypeDec (size (enc t) + size (enc l)).
Proof.
unfold list_ofFlatType_dec_time.
erewrite forallb_time_bound_env.
2: {
split; [ intros | ].
- rewrite (ofFlatType_dec_time_bound y a). poly_mono ofFlatType_dec_poly.
2: apply Nat.le_add_l with (n := size(enc y)). reflexivity.
- apply ofFlatType_dec_poly.
}
rewrite list_size_length.
replace_le (size(enc l)) with (size (enc t) + size (enc l)) by lia at 1.
setoid_rewrite Nat.add_comm at 5.
unfold poly__listOfFlatTypeDec, c__listOfFlatTypeDecBound. nia.
Qed.
Lemma list_ofFlatType_dec_poly : monotonic poly__listOfFlatTypeDec /\ inOPoly poly__listOfFlatTypeDec.
Proof.
split; unfold poly__listOfFlatTypeDec; smpl_inO; apply ofFlatType_dec_poly.
Qed.
|
{"author": "uds-psl", "repo": "coq-library-complexity", "sha": "5a996877f16fd6fe16dc5f0c3b933486957869df", "save_path": "github-repos/coq/uds-psl-coq-library-complexity", "path": "github-repos/coq/uds-psl-coq-library-complexity/coq-library-complexity-5a996877f16fd6fe16dc5f0c3b933486957869df/theories/Libs/CookPrelim/FlatFinTypes.v"}
|
import random
import torch
import numpy as np
import scipy.io as sio
from lwrl.memories import Memory
class SequentialMemory(Memory):
def __init__(self, max_length, history_length=1):
super().__init__()
self.max_length = max_length
self.obs_buffer = None
self.history_length = history_length
self.current = 0
self.count = 0
self.prestates = None
def add(self, obs, action, reward, done):
if len(obs.shape) > 1:
obs = np.transpose(obs, (2, 0, 1))
if self.obs_buffer is None:
# infer action type
try:
action_type = action.dtype
if action_type is torch.float32:
action_type = np.float32
except AttributeError:
action_type = type(action)
self.action_buffer = np.empty(self.max_length, dtype=action_type)
self.reward_buffer = np.empty(self.max_length, dtype=np.float32)
self.obs_buffer = np.empty(
(self.max_length, *obs.shape), dtype=obs.dtype)
self.terminal_buffer = np.empty(self.max_length, dtype=bool)
self.dim = obs.shape
self.action_buffer[self.current] = action
self.reward_buffer[self.current] = reward
self.obs_buffer[self.current, ...] = obs
self.terminal_buffer[self.current] = done
self.count = max(self.count, self.current + 1)
self.current = (self.current + 1) % self.max_length
def _get_obs(self, index):
index = index % self.count
if index >= self.history_length - 1:
return self.obs_buffer[(index - self.history_length + 1):(
index + 1), ...]
else:
indices = [(index - i) % self.count
for i in reversed(range(self.history_length))]
return self.obs_buffer[indices]
def sample(self, size):
if self.prestates is None:
state_shape = (size, self.history_length, *self.dim)
self.prestates = np.empty(state_shape, dtype=self.obs_buffer.dtype)
self.poststates = np.empty(
state_shape, dtype=self.obs_buffer.dtype)
indices = []
i = 0
while i < size:
while True:
index = random.randint(self.history_length, self.count - 1)
if index >= self.current and index - self.history_length < self.current:
continue
if self.terminal_buffer[(
index - self.history_length):index].any():
continue
break
self.prestates[i, ...] = self._get_obs(index - 1)
self.poststates[i, ...] = self._get_obs(index)
indices.append(index)
i += 1
actions = self.action_buffer[indices]
rewards = self.reward_buffer[indices]
dones = self.terminal_buffer[indices]
if len(self.dim) == 1:
shape = (size, self.dim[0])
else:
shape = (size, -1, self.dim[1], self.dim[2])
return (self.prestates.reshape(shape), actions, rewards,
self.poststates.reshape(shape), dones)
def size(self):
return self.count
def save(self, filename):
sio.savemat(
filename,
mdict=dict(
observations=self.obs_buffer,
actions=self.action_buffer,
rewards=self.reward_buffer,
terminals=self.terminal_buffer))
|
{"hexsha": "f09ac62ffa89e53c27117b1f84717a31465af47e", "size": 3562, "ext": "py", "lang": "Python", "max_stars_repo_path": "lwrl/memories/sequential.py", "max_stars_repo_name": "sealday/lwrl", "max_stars_repo_head_hexsha": "52bcd67751e605c38db4afa609c58938c7034e8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-11T11:55:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-29T18:09:51.000Z", "max_issues_repo_path": "lwrl/memories/sequential.py", "max_issues_repo_name": "sealday/lwrl", "max_issues_repo_head_hexsha": "52bcd67751e605c38db4afa609c58938c7034e8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-06-01T22:21:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:24:36.000Z", "max_forks_repo_path": "lwrl/memories/sequential.py", "max_forks_repo_name": "sealday/lwrl", "max_forks_repo_head_hexsha": "52bcd67751e605c38db4afa609c58938c7034e8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-04-12T03:09:47.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-12T03:09:47.000Z", "avg_line_length": 33.6037735849, "max_line_length": 88, "alphanum_fraction": 0.5612015722, "include": true, "reason": "import numpy,import scipy", "num_tokens": 765}
|
import simplejson as json, os
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from kettle.utils import get_beers
import numpy as np
class BeerMLData(list):
def __init__(self):
self.proj = None
self.arr = None
self.beer_mapping = None
try:
self.load()
except: pass
important_keys = [
('hop_varieties',list),
('dry_hop_varieties',list),
('malt_varieties',list),
('yeast_varieties',list),
('descriptors',list),
('categories',list),
('abv',float),
('style',str),
('price_per_growler',float)
]
def from_model(self):
self.extend(get_beers(False))
def from_file(self,fpath):
with open(fpath,'r') as fp:
self.extend(json.load(fp))
def fields(self):
return [key for key in self[0]['beer'].keys()]
def get_mapping_asarray(self):
num_samples = len(self.beer_mapping)
self.arr = np.zeros((num_samples,self.fs_dim),dtype=float)
for i,(k,v) in enumerate(self.beer_mapping.items()):
self.arr[i] = v
self.compute_pca()
return self.arr
def compute_pca(self):
self.proj = PCA(n_components=2)
self.proj.fit(self.arr)
def project(self):
return self.proj.transform(self.arr)
def create_beer_mapping(self):
data = {}
self.feature_space_keys = {}
for key,dtype in self.important_keys:
self.feature_space_keys[key] = set()
self.fscales = {}
# Figure out feature space dimensionality
self.descriptions = []
for beer in self:
for key,dtype in self.important_keys:
fsk = self.feature_space_keys[key]
dat = dtype(beer[key])
if dat == 100:
continue
if dtype != list:
dat = set([dat])
self.feature_space_keys[key] = fsk.union(dat)
self.descriptions = [beer['description'] for beer in self]
self.count_vect = CountVectorizer(stop_words='english')
X_train_counts = self.count_vect.fit_transform(self.descriptions)
self.tfidf_transformer = TfidfTransformer()
self.X_train_tfidf = self.tfidf_transformer.fit_transform(X_train_counts)
#print(self.X_train_tfidf[0])
#print(dir(self.X_train_tfidf[0]))
self.fs_dim = 0
for k,v in self.feature_space_keys.items():
if k in ('abv','price_per_growler'):
self.fs_dim += 1
continue
v = list(v)
v.sort()
self.feature_space_keys[k] = v
self.fs_dim += len(v)
self.fs_dim += self.X_train_tfidf.shape[1] # For the text description.
#compute floating point scales for continuous data
for k,dtype in self.important_keys:
if dtype != float: continue
mx = max(self.feature_space_keys[k])
self.fscales[k] = mx
# Map each beer into the binary feature space.
num_beers = len(self)
self.beer_mapping = {}
for beer in self:
#beer = x['beer']
beer_id = beer['id']
self.beer_mapping[beer_id] = self.map_beer(beer)
def get_beer_by_id(self,beer_id):
beers = [beer for beer in self if beer['id'] == beer_id]
return beers[0]
def map_beer(self,x):
if isinstance(x,str):
beer = self.get_beer_by_id(x)
else:
beer = x
record = np.zeros(self.fs_dim)
idx = 0
for key,dtype in self.important_keys:
beer_vals = beer[key]
fsk = self.feature_space_keys[key]
if dtype == list:
for k in fsk:
qual = k in beer_vals
if qual:
record[idx] = 1
idx += 1
elif dtype == str:
for k in fsk:
qual = k == beer_vals
if qual:
record[idx] = 1
idx += 1
# divide by their scales...
else:
record[idx] = min(dtype(beer_vals) / self.fscales[key],1.0)
idx += 1
cv = self.count_vect.transform([beer['description']])
cv = self.tfidf_transformer.transform(cv).todense()
#print( cv)
record[idx:] = cv
return record
if __name__ == "__main__":
path = os.path.expanduser('~/Downloads/beer_data.json')
data = BeerMLData()
data.from_model()
#data.from_file(path)
data.create_beer_mapping()
X = data.get_mapping_asarray()
Y = data.project()
print (data.feature_space_keys['descriptors'])
print (data.feature_space_keys['categories'])
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(X)
plt.figure()
plt.gca().set_axis_bgcolor('k')
plt.plot(Y[:,0],Y[:,1],'ro')
mapping = data.beer_mapping
for i,(k,v) in enumerate(mapping.items()):
plt.text(Y[i,0],Y[i,1],k,color='w')
plt.show()
#print(data.fields())
#print(data[0])
|
{"hexsha": "93833a4661a59da31899b3702fe70c0ffb4ac536", "size": 5376, "ext": "py", "lang": "Python", "max_stars_repo_path": "kettle/scripts/formatData.py", "max_stars_repo_name": "hacktobacillus/fermenter", "max_stars_repo_head_hexsha": "198e739aa71b13290c542773658928a33709b8f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kettle/scripts/formatData.py", "max_issues_repo_name": "hacktobacillus/fermenter", "max_issues_repo_head_hexsha": "198e739aa71b13290c542773658928a33709b8f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kettle/scripts/formatData.py", "max_forks_repo_name": "hacktobacillus/fermenter", "max_forks_repo_head_hexsha": "198e739aa71b13290c542773658928a33709b8f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1465968586, "max_line_length": 81, "alphanum_fraction": 0.5485491071, "include": true, "reason": "import numpy", "num_tokens": 1237}
|
[STATEMENT]
lemma invar_butlast: "invar (bq @ [t]) \<Longrightarrow> invar bq"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. invar (bq @ [t]) \<Longrightarrow> invar bq
[PROOF STEP]
unfolding invar_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. queue_invar (bq @ [t]) \<and> rank_invar (bq @ [t]) \<Longrightarrow> queue_invar bq \<and> rank_invar bq
[PROOF STEP]
apply (induct bq)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. queue_invar ([] @ [t]) \<and> rank_invar ([] @ [t]) \<Longrightarrow> queue_invar [] \<and> rank_invar []
2. \<And>a bq. \<lbrakk>queue_invar (bq @ [t]) \<and> rank_invar (bq @ [t]) \<Longrightarrow> queue_invar bq \<and> rank_invar bq; queue_invar ((a # bq) @ [t]) \<and> rank_invar ((a # bq) @ [t])\<rbrakk> \<Longrightarrow> queue_invar (a # bq) \<and> rank_invar (a # bq)
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a bq. \<lbrakk>queue_invar (bq @ [t]) \<and> rank_invar (bq @ [t]) \<Longrightarrow> queue_invar bq \<and> rank_invar bq; queue_invar ((a # bq) @ [t]) \<and> rank_invar ((a # bq) @ [t])\<rbrakk> \<Longrightarrow> queue_invar (a # bq) \<and> rank_invar (a # bq)
[PROOF STEP]
apply (case_tac bq)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>a bq. \<lbrakk>queue_invar (bq @ [t]) \<and> rank_invar (bq @ [t]) \<Longrightarrow> queue_invar bq \<and> rank_invar bq; queue_invar ((a # bq) @ [t]) \<and> rank_invar ((a # bq) @ [t]); bq = []\<rbrakk> \<Longrightarrow> queue_invar (a # bq) \<and> rank_invar (a # bq)
2. \<And>a bq aa list. \<lbrakk>queue_invar (bq @ [t]) \<and> rank_invar (bq @ [t]) \<Longrightarrow> queue_invar bq \<and> rank_invar bq; queue_invar ((a # bq) @ [t]) \<and> rank_invar ((a # bq) @ [t]); bq = aa # list\<rbrakk> \<Longrightarrow> queue_invar (a # bq) \<and> rank_invar (a # bq)
[PROOF STEP]
by (simp_all)
|
{"llama_tokens": 819, "file": "Binomial-Heaps_BinomialHeap", "length": 5}
|
from math import log, isnan
import numpy as np
from bokeh.models import *
from bokeh.plotting import figure
from itertools import cycle
from hail.expr import aggregators
from hail.expr.expressions import *
from hail.expr.expressions import Expression
from hail.typecheck import *
from hail import Table
import hail
palette = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
@typecheck(data=oneof(hail.utils.struct.Struct, expr_float64), range=nullable(sized_tupleof(numeric, numeric)),
bins=int, legend=nullable(str), title=nullable(str))
def histogram(data, range=None, bins=50, legend=None, title=None):
"""Create a histogram.
Parameters
----------
data : :class:`.Struct` or :class:`.Float64Expression`
Sequence of data to plot.
range : Tuple[float]
Range of x values in the histogram.
bins : int
Number of bins in the histogram.
legend : str
Label of data on the x-axis.
title : str
Title of the histogram.
Returns
-------
:class:`bokeh.plotting.figure.Figure`
"""
if isinstance(data, Expression):
if data._indices.source is not None:
agg_f = data._aggregation_method()
if range is not None:
start = range[0]
end = range[1]
else:
start, end = agg_f((aggregators.min(data), aggregators.max(data)))
data = agg_f(aggregators.hist(data, start, end, bins))
else:
return ValueError('Invalid input')
p = figure(title=title, x_axis_label=legend, y_axis_label='Frequency', background_fill_color='#EEEEEE')
p.quad(
bottom=0, top=data.bin_freq,
left=data.bin_edges[:-1], right=data.bin_edges[1:],
legend=legend, line_color='black')
if data.n_larger > 0:
p.quad(
bottom=0, top=data.n_larger,
left=data.bin_edges[-1], right=(data.bin_edges[-1] + (data.bin_edges[1] - data.bin_edges[0])),
line_color='black', fill_color='green', legend='Outliers Above')
if data.n_smaller > 0:
p.quad(
bottom=0, top=data.n_smaller,
left=data.bin_edges[0] - (data.bin_edges[1] - data.bin_edges[0]), right=data.bin_edges[0],
line_color='black', fill_color='red', legend='Outliers Below')
return p
@typecheck(data=oneof(hail.utils.struct.Struct, expr_float64), range=nullable(sized_tupleof(numeric, numeric)),
bins=int, legend=nullable(str), title=nullable(str), normalize=bool, log=bool)
def cumulative_histogram(data, range=None, bins=50, legend=None, title=None, normalize=True, log=False):
"""Create a cumulative histogram.
Parameters
----------
data : :class:`.Struct` or :class:`.Float64Expression`
Sequence of data to plot.
range : Tuple[float]
Range of x values in the histogram.
bins : int
Number of bins in the histogram.
legend : str
Label of data on the x-axis.
title : str
Title of the histogram.
normalize: bool
Whether or not the cumulative data should be normalized.
log: bool
Whether or not the y-axis should be of type log.
Returns
-------
:class:`bokeh.plotting.figure.Figure`
"""
if isinstance(data, Expression):
if data._indices.source is not None:
agg_f = data._aggregation_method()
if range is not None:
start = range[0]
end = range[1]
else:
start, end = agg_f((aggregators.min(data), aggregators.max(data)))
data = agg_f(aggregators.hist(data, start, end, bins))
else:
return ValueError('Invalid input')
cumulative_data = np.cumsum(data.bin_freq) + data.n_smaller
np.append(cumulative_data, [cumulative_data[-1] + data.n_larger])
num_data_points = max(cumulative_data)
if normalize:
cumulative_data = cumulative_data / num_data_points
if title is not None:
title = f'{title} ({num_data_points:,} data points)'
if log:
p = figure(title=title, x_axis_label=legend, y_axis_label='Frequency',
background_fill_color='#EEEEEE', y_axis_type='log')
else:
p = figure(title=title, x_axis_label=legend, y_axis_label='Frequency', background_fill_color='#EEEEEE')
p.line(data.bin_edges[:-1], cumulative_data, line_color='#036564', line_width=3)
return p
@typecheck(x=oneof(sequenceof(numeric), expr_float64), y=oneof(sequenceof(numeric), expr_float64),
label=oneof(nullable(str), expr_str, sequenceof(str)), title=nullable(str),
xlabel=nullable(str), ylabel=nullable(str), size=int, legend=bool,
source_fields=nullable(dictof(str, sequenceof(anytype))), collect_all=nullable(bool), n_divisions=int)
def scatter(x, y, label=None, title=None, xlabel=None, ylabel=None, size=4, legend=True,
collect_all=False, n_divisions=500, source_fields=None):
"""Create a scatterplot.
Parameters
----------
x : List[float] or :class:`.Float64Expression`
List of x-values to be plotted.
y : List[float] or :class:`.Float64Expression`
List of y-values to be plotted.
label : List[str] or :class:`.StringExpression`
List of labels for x and y values, used to assign each point a label (e.g. population)
title : str
Title of the scatterplot.
xlabel : str
X-axis label.
ylabel : str
Y-axis label.
size : int
Size of markers in screen space units.
legend : bool
Whether or not to show the legend in the resulting figure.
collect_all : bool
Whether to collect all values or downsample before plotting.
This parameter will be ignored if x and y are Python objects.
n_divisions : int
Factor by which to downsample (default value = 500). A lower input results in fewer output datapoints.
source_fields : Dict[str, List[Any]]
Extra fields for the ColumnDataSource of the plot.
Returns
-------
:class:`bokeh.plotting.figure.Figure`
"""
if isinstance(x, Expression) and isinstance(y, Expression):
agg_f = x._aggregation_method()
if isinstance(label, Expression):
if collect_all:
res = hail.tuple([x, y, label]).collect()
label = [point[2] for point in res]
else:
res = agg_f(aggregators.downsample(x, y, label=label, n_divisions=n_divisions))
label = [point[2][0] for point in res]
x = [point[0] for point in res]
y = [point[1] for point in res]
else:
if collect_all:
res = hail.tuple([x, y]).collect()
else:
res = agg_f(aggregators.downsample(x, y, n_divisions=n_divisions))
x = [point[0] for point in res]
y = [point[1] for point in res]
elif isinstance(x, Expression) or isinstance(y, Expression):
raise TypeError('Invalid input: x and y must both be either Expressions or Python Lists.')
else:
if isinstance(label, Expression):
label = label.collect()
p = figure(title=title, x_axis_label=xlabel, y_axis_label=ylabel, background_fill_color='#EEEEEE')
if label is not None:
fields = dict(x=x, y=y, label=label)
if source_fields is not None:
for key, values in source_fields.items():
fields[key] = values
source = ColumnDataSource(fields)
if legend:
leg = 'label'
else:
leg = None
factors = list(set(label))
if len(factors) > len(palette):
color_gen = cycle(palette)
colors = []
for i in range(0, len(factors)):
colors.append(next(color_gen))
else:
colors = palette[0:len(factors)]
color_mapper = CategoricalColorMapper(factors=factors, palette=colors)
p.circle('x', 'y', alpha=0.5, source=source, size=size,
color={'field': 'label', 'transform': color_mapper}, legend=leg)
else:
p.circle(x, y, alpha=0.5, size=size)
return p
@typecheck(pvals=oneof(sequenceof(numeric), expr_float64), collect_all=bool, n_divisions=int)
def qq(pvals, collect_all=False, n_divisions=500):
"""Create a Quantile-Quantile plot. (https://en.wikipedia.org/wiki/Q-Q_plot)
Parameters
----------
pvals : List[float] or :class:`.Float64Expression`
P-values to be plotted.
collect_all : bool
Whether to collect all values or downsample before plotting.
This parameter will be ignored if pvals is a Python object.
n_divisions : int
Factor by which to downsample (default value = 500). A lower input results in fewer output datapoints.
Returns
-------
:class:`bokeh.plotting.figure.Figure`
"""
if isinstance(pvals, Expression):
source = pvals._indices.source
if source is not None:
if collect_all:
pvals = pvals.collect()
spvals = sorted(filter(lambda x: x and not(isnan(x)), pvals))
exp = [-log(float(i) / len(spvals), 10) for i in np.arange(1, len(spvals) + 1, 1)]
obs = [-log(p, 10) for p in spvals]
else:
if isinstance(source, Table):
ht = source.select(pval=pvals)
else:
ht = source.select_rows(pval=pvals).rows()
n = ht.count()
ht = ht.order_by('pval').add_index()
ht = ht.annotate(expected_p=(ht.idx + 1) / n)
pvals = ht.aggregate(
aggregators.downsample(-hail.log10(ht.expected_p), -hail.log10(ht.pval), n_divisions=n_divisions))
exp = [point[0] for point in pvals if not isnan(point[1])]
obs = [point[1] for point in pvals if not isnan(point[1])]
else:
return ValueError('Invalid input: expression has no source')
else:
spvals = sorted(filter(lambda x: x and not(isnan(x)), pvals))
exp = [-log(float(i) / len(spvals), 10) for i in np.arange(1, len(spvals) + 1, 1)]
obs = [-log(p, 10) for p in spvals]
p = figure(
title='Q-Q Plot',
x_axis_label='Expected p-value (-log10 scale)',
y_axis_label='Observed p-value (-log10 scale)')
p.scatter(x=exp, y=obs, color='black')
bound = max(max(exp), max(obs)) * 1.1
p.line([0, bound], [0, bound], color='red')
return p
@typecheck(pvals=expr_float64, locus=nullable(expr_locus()), title=nullable(str),
size=int, hover_fields=nullable(dictof(str, expr_any)), collect_all=bool, n_divisions=int)
def manhattan(pvals, locus=None, title=None, size=4, hover_fields=None, collect_all=False, n_divisions=500):
"""Create a Manhattan plot. (https://en.wikipedia.org/wiki/Manhattan_plot)
Parameters
----------
pvals : :class:`.Float64Expression`
P-values to be plotted.
locus : :class:`.LocusExpression`
Locus values to be plotted.
title : str
Title of the plot.
size : int
Size of markers in screen space units.
hover_fields : Dict[str, :class:`.Expression`]
Dictionary of field names and values to be shown in the HoverTool of the plot.
collect_all : bool
Whether to collect all values or downsample before plotting.
n_divisions : int
Factor by which to downsample (default value = 500). A lower input results in fewer output datapoints.
Returns
-------
:class:`bokeh.plotting.figure.Figure`
"""
def get_contig_index(x, starts):
left = 0
right = len(starts) - 1
while left <= right:
mid = (left + right) // 2
if x < starts[mid]:
if x >= starts[mid - 1]:
return mid - 1
right = mid
elif x >= starts[mid+1]:
left = mid + 1
else:
return mid
pvals = -hail.log10(pvals)
if locus is None:
locus = pvals._indices.source.locus
if hover_fields is None:
hover_fields = {}
hover_fields['locus'] = hail.str(locus)
if collect_all:
res = hail.tuple([locus.global_position(), pvals, hail.struct(**hover_fields)]).collect()
hf_struct = [point[2] for point in res]
for key in hover_fields:
hover_fields[key] = [item[key] for item in hf_struct]
else:
agg_f = pvals._aggregation_method()
res = agg_f(aggregators.downsample(locus.global_position(), pvals,
label=hail.array([hail.str(x) for x in hover_fields.values()]),
n_divisions=n_divisions))
fields = [point[2] for point in res]
for idx, key in enumerate(list(hover_fields.keys())):
hover_fields[key] = [field[idx] for field in fields]
x = [point[0] for point in res]
y = [point[1] for point in res]
ref = locus.dtype.reference_genome
total_pos = 0
start_points = []
for i in range(0, len(ref.contigs)):
start_points.append(total_pos)
total_pos += ref.lengths.get(ref.contigs[i])
start_points.append(total_pos) # end point of all contigs
observed_contigs = set()
label = []
for element in x:
contig_index = get_contig_index(element, start_points)
label.append(str(contig_index % 2))
observed_contigs.add(ref.contigs[contig_index])
labels = ref.contigs.copy()
num_deleted = 0
mid_points = []
for i in range(0, len(ref.contigs)):
if ref.contigs[i] in observed_contigs:
length = ref.lengths.get(ref.contigs[i])
mid = start_points[i] + length / 2
if mid % 1 == 0:
mid += 0.5
mid_points.append(mid)
else:
del labels[i - num_deleted]
num_deleted += 1
p = scatter(x, y, label=label, title=title, xlabel='Chromosome', ylabel='P-value (-log10 scale)',
size=size, legend=False, source_fields=hover_fields)
p.xaxis.ticker = mid_points
p.xaxis.major_label_overrides = dict(zip(mid_points, labels))
p.width = 1000
tooltips = [(key, "@{}".format(key)) for key in hover_fields]
tooltips.append(tuple(('p-value', "$y")))
p.add_tools(HoverTool(
tooltips=tooltips
))
return p
|
{"hexsha": "c7fc88075892798bc6bbb5c4e02a976ba6eace76", "size": 14577, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/hail/plot/plots.py", "max_stars_repo_name": "maccum/hail", "max_stars_repo_head_hexsha": "e9e8a40bb4f0c2337e5088c26186a4da4948bed2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/hail/plot/plots.py", "max_issues_repo_name": "maccum/hail", "max_issues_repo_head_hexsha": "e9e8a40bb4f0c2337e5088c26186a4da4948bed2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/hail/plot/plots.py", "max_forks_repo_name": "maccum/hail", "max_forks_repo_head_hexsha": "e9e8a40bb4f0c2337e5088c26186a4da4948bed2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7642487047, "max_line_length": 120, "alphanum_fraction": 0.604102353, "include": true, "reason": "import numpy", "num_tokens": 3567}
|
function spm_progress_bar(action,varargin)
% Display a 'Progress Bar' in the 'Interactive' window
% FORMAT spm_progress_bar('Init',height,xlabel,ylabel,flgs)
% Initialise the bar in the 'Interactive' window.
% If flgs contains a 't', then use tex interpreter for labels.
%
% FORMAT spm_progress_bar('Set',value)
% Set the height of the bar itself.
%
% FORMAT spm_progress_bar('Set','xlabel',xlabel)
% FORMAT spm_progress_bar('Set','ylabel',ylabel)
% Set the progress bar labels.
%
% FORMAT spm_progress_bar('Set','height',height)
% Set the height of the progress bar.
%
% FORMAT spm_progress_bar('Clear')
% Clear the 'Interactive' window.
%__________________________________________________________________________
% Copyright (C) 1996-2015 Wellcome Trust Centre for Neuroimaging
% John Ashburner
% $Id: spm_progress_bar.m 6383 2015-03-19 17:20:41Z guillaume $
persistent pbar;
if ~nargin, action = 'Init'; end
switch lower(action)
% Initialise
%======================================================================
case 'init'
Finter = spm_figure('FindWin','Interactive');
if isempty(Finter), pbar = []; return; end
if nargin > 1, arg1 = varargin{1}; else arg1 = 1; end
if nargin > 2, arg2 = varargin{2}; else arg2 = 'Computing'; end
if nargin > 3, arg3 = varargin{3}; else arg3 = ''; end
if nargin > 4, arg4 = varargin{4}; else arg4 = ' '; end
if any(arg4 == 't'), interp = 'tex'; else interp = 'none'; end
pb = struct('pointer',get(Finter,'Pointer'),...
'name', get(Finter,'Name'),...
'buffer', get(Finter,'DoubleBuffer'));
spm_progress_bar('Clear');
set(Finter,'Pointer','watch');
set(Finter,'Name',pb.name);
set(Finter,'DoubleBuffer','on'); % no effect since R2013a
if ischar(arg2), arg2 = repmat({arg2},1,numel(arg1)); end
if ischar(arg3), arg3 = repmat({arg3},1,numel(arg1)); end
for i=1:numel(arg1)
%-
pb.ax(i) = axes(...
'Position', [((i/(numel(arg1)+1))-0.05) 0.2 0.05 0.6],...
'XTick', [],...
'Xlim', [0 1],...
'Ylim', [0 max([arg1(i) eps])],...
'Box', 'on',...
'Parent', Finter);
try, set(pb.ax(i),'ClippingStyle','rectangle'); end
%-XLabel
lab = get(pb.ax(i),'Xlabel');
if numel(arg2) < i, arg2{i} = ''; end
set(lab,'string',arg2{i},'FontSize',10,'Interpreter',interp);
%-YLabel
lab = get(pb.ax(i),'Ylabel');
if numel(arg3) < i, arg3{i} = ''; end
set(lab,'string',arg3{i},'FontSize',10,'Interpreter',interp);
%-Title
pb.t(i) = get(pb.ax(i),'Title');
set(pb.t(i),'string','0% Complete','Interpreter',interp);
%-Began...
t = clock;
if numel(arg1) == 1, opts = {};
else opts = {'Rotation',90,'HorizontalAlignment','center'}; end
str = sprintf('Began %2.0f:%02.0f:%02.0f',t(4),t(5),t(6));
pb.b(i) = text(2,arg1(i)/2,0,str,...
'FontSize',10,...
'Parent',pb.ax(i),...
opts{:});
%-Progress bar
pb.l(i) = line(...
'Xdata', [0.5 0.5],...
'Ydata', [0 0],...
'LineWidth', 8,...
'Color', [1 0 0],...
'Parent', pb.ax(i));
end
pbar = pb;
drawnow;
% Set
%======================================================================
case 'set'
if isempty(pbar) || ~all(ishandle(pbar.l)), pbar = []; return; end
if nargin == 1, value = 0; else value = varargin{1}; end
if ischar(value)
if nargin == 2, str = ''; else str = varargin{2}; end
if nargin == 3, p = 1; else p = varargin{3}; end
switch lower(value)
case {'xlabel','ylabel'}
set(get(pbar.ax(p),value),'String',str);
case 'height'
t = clock;
bstr = sprintf('Began %2.0f:%02.0f:%02.0f',t(4),t(5),t(6));
set(pbar.b(p),'String',bstr);
set(pbar.ax(p),'YLim',[0 max([str eps])]);
otherwise
error('Unknown action.');
end
else
if nargin == 2, p = 1; else p = varargin{2}; end
set(pbar.l(p),'Ydata',[0 value]);
lim = get(pbar.ax(p),'Ylim');lim=lim(2);
set(pbar.t(p),'string',sprintf('%.0f%% Complete',100*value/lim));
end
try, drawnow limitrate; catch, drawnow; end
% Clear
%======================================================================
case 'clear'
Finter = spm_figure('FindWin','Interactive');
if isempty(Finter), pbar = []; return; end
spm_figure('Clear',Finter);
if isstruct(pbar)
set(Finter,'Pointer', pbar.pointer);
set(Finter,'Name', pbar.name);
set(Finter,'DoubleBuffer',pbar.buffer);
end
pbar = [];
drawnow;
% Error
%======================================================================
otherwise
error('Unknown action string');
end
|
{"author": "fieldtrip", "repo": "fieldtrip", "sha": "c2039be598a02d86b39aae76bfa7aaa720f9801c", "save_path": "github-repos/MATLAB/fieldtrip-fieldtrip", "path": "github-repos/MATLAB/fieldtrip-fieldtrip/fieldtrip-c2039be598a02d86b39aae76bfa7aaa720f9801c/external/spm12/spm_progress_bar.m"}
|
from typing import Union, Any
import numpy as np
import quaternion
from framegraph.utils import transform_vecs, transform_vec
from framegraph.pose_abc import AbstractPose
class Pose(AbstractPose):
def __init__(self,
rotation: Union[np.ndarray, np.quaternion] = None,
translation: np.ndarray = None):
if rotation is None and translation is None:
rotation = quaternion.one
translation = np.zeros(3)
elif rotation is None and translation is not None:
if translation.ndim == 1:
rotation = quaternion.one
else:
rotation = np.full(
translation.shape[0], quaternion.one, dtype=np.quaternion)
elif rotation is not None and translation is not None:
pass
# Rotation is not none and translation is none
else:
rotation = self._get_standard_rotation(rotation)
translation = np.zeros((len(rotation), 3))
self.rotation = rotation
self.translation = translation
self._check_consistency()
@classmethod
def from_quat_pos(cls, quat: Union[np.quaternion, np.ndarray] = None,
translation: np.ndarray = None):
return cls(rotation=quat, translation=translation)
@property
def translation(self):
return self._translation
@translation.setter
def translation(self, translation: np.ndarray):
if translation.ndim not in [1, 2]:
raise ValueError("translation must be 1 or 2 dimensional")
if translation.shape[-1] != 3:
raise ValueError("translation must have last axis length of 3")
self._translation = translation
@property
def rotation(self):
return self._rotation
@rotation.setter
def rotation(self, rotation: Union[np.ndarray, np.quaternion]):
self._rotation = self._get_standard_rotation(rotation)
def interp(self, t_init: np.ndarray, t_interp: np.ndarray):
interp_rots = quaternion.squad(self.rotation, t_init, t_interp)
interp_trans = np.apply_along_axis(
lambda x: np.interp(t_interp, t_init, x), 0, self.translation)
return self.__class__(interp_rots, interp_trans)
def transform_vecs(self, vec: np.ndarray) -> np.ndarray:
rot = quaternion.as_float_array(self.rotation)
if rot.ndim == 1:
return transform_vec(rot, self.translation, vec)
return transform_vecs(rot, self.translation, vec)
def as_trans_mat(self):
r_mat = quaternion.as_rotation_matrix(self.rotation)
if r_mat.ndim == 3:
trans_mat = np.tile(np.eye(4), (r_mat.shape[0], 1, 1))
else:
trans_mat = np.eye(4)
trans_mat[..., :3, :3] = r_mat
trans_mat[..., :3, 3] = self.translation
return trans_mat
@classmethod
def from_trans_mat(cls, trans_mat: np.ndarray):
translation = trans_mat[..., :3, 3]
rotation = quaternion.from_rotation_matrix(trans_mat[..., :3, :3])
return cls(rotation=rotation, translation=translation)
def as_rmat_pos(self):
return quaternion.as_rotation_matrix(self.rotation), self.translation
@classmethod
def from_rmat_pos(cls, rmat: np.ndarray = None, pos: np.ndarray = None):
quat = quaternion.from_rotation_matrix(rmat)
return cls(rotation=quat, translation=pos)
def as_rvec_pos(self):
return quaternion.as_rotation_vector(self.rotation), self.translation
@classmethod
def from_rvec_pos(cls, rvec: np.ndarray = None, pos: np.ndarray = None):
return cls(rotation=quaternion.from_rotation_vector(rvec), translation=pos)
def _check_consistency(self):
if isinstance(self.rotation, np.quaternion):
assert self.translation.shape == (3,)
elif isinstance(self.rotation, np.ndarray):
assert self.translation.ndim == 2
assert self.translation.shape[0] == self.rotation.shape[0]
else:
raise AssertionError("Rotation is of inconsistent type.")
@staticmethod
def _get_standard_rotation(rotation: Any):
std_form = None
if isinstance(rotation, np.quaternion):
std_form = rotation
elif isinstance(rotation, np.ndarray):
if rotation.dtype == np.quaternion:
std_form = rotation
elif rotation.ndim == 1 or rotation.ndim == 2:
std_form = quaternion.from_float_array(rotation)
else:
raise ValueError("rotation has invalid number of dimensions:"
f" {rotation.ndim}")
else:
raise TypeError(f"rotation has invalid type: {type(rotation)}")
return std_form
def __getitem__(self, key):
return self.__class__(rotation=self.rotation[key],
translation=self.translation[key])
def __mul__(self, other):
rot = self.rotation * other.rotation
trans = self.transform_vecs(other.translation)
return self.__class__(rotation=rot, translation=trans)
|
{"hexsha": "70dbfd00949138388d1fd8ddc90c1d8b97bffa26", "size": 5159, "ext": "py", "lang": "Python", "max_stars_repo_path": "framegraph/pose.py", "max_stars_repo_name": "vi-robotics/framegraph", "max_stars_repo_head_hexsha": "554d3058059ef3c31f940fb38c93d67381d9df2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "framegraph/pose.py", "max_issues_repo_name": "vi-robotics/framegraph", "max_issues_repo_head_hexsha": "554d3058059ef3c31f940fb38c93d67381d9df2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "framegraph/pose.py", "max_forks_repo_name": "vi-robotics/framegraph", "max_forks_repo_head_hexsha": "554d3058059ef3c31f940fb38c93d67381d9df2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6569343066, "max_line_length": 83, "alphanum_fraction": 0.6342314402, "include": true, "reason": "import numpy", "num_tokens": 1076}
|
[STATEMENT]
lemma cf_comma_proj_left_ObjMap_vrange:
assumes "\<GG> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>" and "\<HH> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
shows "\<R>\<^sub>\<circ> (\<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>) \<subseteq>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<R>\<^sub>\<circ> (\<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>) \<subseteq>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
[PROOF STEP]
proof(rule vsv.vsv_vrange_vsubset, unfold cat_comma_cs_simps)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. vsv (\<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>)
2. \<And>x. x \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr> \<Longrightarrow> \<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>\<lparr>x\<rparr> \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
[PROOF STEP]
fix A
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. vsv (\<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>)
2. \<And>x. x \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr> \<Longrightarrow> \<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>\<lparr>x\<rparr> \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
[PROOF STEP]
assume prems: "A \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr>"
[PROOF STATE]
proof (state)
this:
A \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr>
goal (2 subgoals):
1. vsv (\<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>)
2. \<And>x. x \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr> \<Longrightarrow> \<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>\<lparr>x\<rparr> \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
\<GG> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<HH> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
A \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr>
[PROOF STEP]
obtain a b f where A_def: "A = [a, b, f]\<^sub>\<circ>" and a: "a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>"
[PROOF STATE]
proof (prove)
using this:
\<GG> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<HH> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
A \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr>
goal (1 subgoal):
1. (\<And>a b f. \<lbrakk>A = [a, b, f]\<^sub>\<circ>; a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
A = [a, b, f]\<^sub>\<circ>
a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
goal (2 subgoals):
1. vsv (\<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>)
2. \<And>x. x \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr> \<Longrightarrow> \<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>\<lparr>x\<rparr> \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
[PROOF STEP]
from assms prems a
[PROOF STATE]
proof (chain)
picking this:
\<GG> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<HH> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
A \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr>
a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
[PROOF STEP]
show "\<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>\<lparr>A\<rparr> \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>"
[PROOF STATE]
proof (prove)
using this:
\<GG> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<HH> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
A \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr>
a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
goal (1 subgoal):
1. \<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>\<lparr>A\<rparr> \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
[PROOF STEP]
unfolding A_def
[PROOF STATE]
proof (prove)
using this:
\<GG> : \<AA> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
\<HH> : \<BB> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>
[a, b, f]\<^sub>\<circ> \<in>\<^sub>\<circ> \<GG> \<^sub>C\<^sub>F\<down>\<^sub>C\<^sub>F \<HH>\<lparr>Obj\<rparr>
a \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
goal (1 subgoal):
1. \<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr> \<lparr>a, b, f\<rparr>\<^sub>\<bullet> \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
[PROOF STEP]
by (cs_concl cs_shallow cs_simp: cat_comma_cs_simps)
[PROOF STATE]
proof (state)
this:
\<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>\<lparr>A\<rparr> \<in>\<^sub>\<circ> \<AA>\<lparr>Obj\<rparr>
goal (1 subgoal):
1. vsv (\<GG> \<^sub>C\<^sub>F\<Sqinter> \<HH>\<lparr>ObjMap\<rparr>)
[PROOF STEP]
qed (auto intro: cat_comma_cs_intros)
|
{"llama_tokens": 2362, "file": "CZH_Elementary_Categories_czh_ecategories_CZH_ECAT_Comma", "length": 11}
|
import numpy as np
from configs.DataPath import TRAIN_PATH, ROOT_PATH, DET_PATH, TRAIN_JSON_PATH
from utils.rand import random_sys
import cv2
import json
import random
class DataLoader(object):
def __init__(self, data_settings, read_all_boxes=False):
self.dataset_trained = []
self.data_num = 0
self.num_train = 0
self.num_val = 0
self.sub_datasets = {}
self.val_index = []
self.read_all_boxes = read_all_boxes
for sub_dataset in data_settings['dataset_used']:
self.sub_datasets[sub_dataset] = data_settings[sub_dataset]
with open(TRAIN_JSON_PATH + self.sub_datasets[sub_dataset]['label_path']) as f:
data = json.load(f)
f.close()
self.sub_datasets[sub_dataset]['data'] = data
num_data = self.sub_datasets[sub_dataset]['num_data']
assert num_data == len(data)
multiply = self.sub_datasets[sub_dataset]['multiply']
num_train = self.sub_datasets[sub_dataset]['num_train']
num_val = self.sub_datasets[sub_dataset]['num_val']
num_train_objects = self.sub_datasets[sub_dataset]['num_train_objects']
num_val_objects = self.sub_datasets[sub_dataset]['num_val_objects']
assert num_train_objects <= num_train * multiply
assert num_val_objects <= num_val * multiply
dataset = [sub_dataset] * num_data
keys = list(data.keys())
index = list(zip(dataset, keys))
random.shuffle(index)
if num_val > 0:
train_index = index[:-num_val]
val_index = index[-num_val:]
val_index = val_index * multiply
random.shuffle(val_index)
val_index = val_index[:num_val_objects]
else:
train_index = index
val_index = []
self.sub_datasets[sub_dataset].update(dict(train_index=train_index, val_index=val_index))
self.val_index += val_index
self.num_train += num_train_objects
self.num_val += num_val_objects
print('load ' + sub_dataset + ' done, train: %d, val: %d' % (num_train_objects, num_val_objects))
print('Dataloader done. Total train number: %d, Total val number: %d' % (self.num_train, self.num_val))
random.shuffle(self.val_index)
self.build_train_index()
def build_train_index(self):
self.train_index = []
for sub_dataset in self.sub_datasets:
sub_index = self.sub_datasets[sub_dataset]['train_index'].copy()
if sub_index:
random.shuffle(sub_index)
sub_index = sub_index[:self.sub_datasets[sub_dataset]['num_train']]
sub_index *= self.sub_datasets[sub_dataset]['multiply']
random.shuffle(sub_index)
sub_index = sub_index[:self.sub_datasets[sub_dataset]['num_train_objects']]
self.dataset_trained.append(sub_dataset)
self.train_index += sub_index
random.shuffle(self.train_index)
def get_random_data(self, read_all_boxes=False):
random_dataset = random.choice(self.dataset_trained)
random_index = random.choice(self.sub_datasets[random_dataset]['train_index'])
return self.get_data(random_index, read_pair=False, read_all_boxes=read_all_boxes)
def read(self, idx, validate, positive):
if validate:
index = self.val_index[idx]
else:
index = self.train_index[idx]
if positive:
all_boxes, search_img, search_box, template_img, template_box = self.get_data(index, read_pair=True, read_all_boxes=self.read_all_boxes)
else:
all_boxes, search_img, search_box = self.get_data(index, read_pair=False, read_all_boxes=self.read_all_boxes)
_, template_img, template_box = self.get_random_data(read_all_boxes=False)
return all_boxes, search_img, search_box, template_img, template_box
def get_data(self, index, read_pair=True, read_all_boxes=False):
dataset = index[0]
index = index[1]
data = self.sub_datasets[dataset]['data'][index]
match_range = self.sub_datasets[dataset]['match_range']
path = TRAIN_PATH[dataset] + '/' + index
all_boxes = []
if dataset in ['DET', 'DET_val', 'COCO', 'COCO_val']:
if dataset == 'DET' or dataset == 'DET_val':
if index[0] == 'a':
search_path = ROOT_PATH + DET_PATH + index[:index.index('_')] + '/' + index[2:] + '.JPEG'
else:
search_path = path + '.JPEG'
else:
search_path = path + '.jpg'
samples = list(data.keys())
num_sample = len(data)
if num_sample > 1:
search_index = random.randint(0, num_sample - 1)
else:
search_index = 0
search_box = data[samples[search_index]]['000000']
if read_pair:
template_path = search_path
if read_all_boxes:
for i in range(num_sample):
if i != search_index:
all_boxes.append(np.array(data[samples[i]]['000000'], dtype=np.float32))
elif dataset in ['VID', 'VID_val']:
num_sample = len(data)
samples = list(data.keys())
if num_sample == 1:
sample_index = 0
else:
sample_index = random.randint(0, num_sample - 1)
sample_data = data[samples[sample_index]]
frames = list(sample_data.keys())
num_frame = len(frames)
search_index = random.randint(0, num_frame - 1)
search_frame = frames[search_index]
search_path = path + '/' + search_frame + '.JPEG'
search_box = sample_data[search_frame]
if read_pair:
if match_range == 'all':
template_index = random.randint(0, num_frame - 1)
elif match_range == 'init':
template_index = 0
elif match_range == 'mix':
if random_sys() > 0.5:
template_index = 0
else:
template_index = random.randint(0, num_frame - 1)
else:
template_index = random.randint(max(search_index - match_range, 0),
min(search_index + match_range, num_frame) - 1)
template_path = path + '/' + frames[template_index] + '.JPEG'
template_box = sample_data[frames[template_index]]
if read_all_boxes:
if num_sample > 1:
for i in range(num_sample):
if i != sample_index:
sample_frames = list(data[samples[i]].keys())
if search_frame in sample_frames:
all_boxes.append(np.array(data[samples[i]][search_frame], dtype=np.float32))
elif dataset in ['GOT', 'GOT_val', 'LaSOT']:
if dataset == 'LaSOT':
path = path + '/img/'
frames = list(data.keys())
num_frame = len(frames)
search_index = random.randint(0, num_frame - 1)
search_frame = frames[search_index]
search_path = path + '/' + search_frame + '.jpg'
search_box = data[search_frame]
if read_pair:
if match_range == 'all':
template_index = random.randint(0, num_frame - 1)
elif match_range == 'init':
template_index = 0
elif match_range == 'mix':
if random_sys() > 0.5:
template_index = 0
else:
template_index = random.randint(0, num_frame - 1)
else:
template_index = random.randint(max(search_index - match_range, 0),
min(search_index + match_range, num_frame) - 1)
template_path = path + '/' + frames[template_index] + '.jpg'
template_box = data[frames[template_index]]
search_img = cv2.imread(search_path)
search_box = np.array(search_box, dtype=np.float32)
if read_pair:
if template_path == search_path:
template_img = search_img
template_box = search_box
else:
template_img = cv2.imread(template_path)
template_box = np.array(template_box, dtype=np.float32)
return all_boxes, search_img, search_box, template_img, template_box
else:
return all_boxes, search_img, search_box
|
{"hexsha": "1cc39b563237297c0916e6886975b5e5bc43f314", "size": 9192, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/DataLoader.py", "max_stars_repo_name": "bit-bcilab/SiamDCA", "max_stars_repo_head_hexsha": "78a520f2bf6b89f8dee8b05ca7a9399813f77e92", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "training/DataLoader.py", "max_issues_repo_name": "bit-bcilab/SiamDCA", "max_issues_repo_head_hexsha": "78a520f2bf6b89f8dee8b05ca7a9399813f77e92", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training/DataLoader.py", "max_forks_repo_name": "bit-bcilab/SiamDCA", "max_forks_repo_head_hexsha": "78a520f2bf6b89f8dee8b05ca7a9399813f77e92", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2197309417, "max_line_length": 149, "alphanum_fraction": 0.5467798085, "include": true, "reason": "import numpy", "num_tokens": 1826}
|
# -*- coding: utf-8 -*-
import sys
from PySide2 import QtWidgets
from PySide2.QtTest import QTest
from numpy import pi
from Tests.GUI import gui_option # Set unit as [m]
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotM18 import SlotM18
from pyleecan.GUI.Dialog.DMachineSetup.SMSlot.PMSlot18.PMSlot18 import PMSlot18
import pytest
class TestPMSlot18(object):
"""Test that the widget PMSlot18 behave like it should"""
def setup_method(self):
self.test_obj = LamSlotMag(Rint=0.1, Rext=0.2)
self.test_obj.slot = SlotM18(Hmag=0.15)
self.widget = PMSlot18(self.test_obj)
@classmethod
def setup_class(cls):
"""Start the app for the test"""
print("\nStart Test TestPMSlot18")
if not QtWidgets.QApplication.instance():
cls.app = QtWidgets.QApplication(sys.argv)
else:
cls.app = QtWidgets.QApplication.instance()
@classmethod
def teardown_class(cls):
"""Exit the app after the test"""
cls.app.quit()
def test_init(self):
"""Check that the Widget spinbox initialise to the lamination value"""
assert self.widget.lf_Hmag.value() == 0.15
def test_set_Hmag(self):
"""Check that the Widget allow to update Hmag"""
# Check Unit
assert self.widget.unit_Hmag.text() == "[m]"
# Change value in GUI
self.widget.lf_Hmag.clear()
QTest.keyClicks(self.widget.lf_Hmag, "0.36")
self.widget.lf_Hmag.editingFinished.emit() # To trigger the slot
assert self.widget.slot.Hmag == pytest.approx(0.36)
assert self.test_obj.slot.Hmag == pytest.approx(0.36)
def test_output_txt(self):
"""Check that the Output text is computed and correct"""
self.test_obj.slot = SlotM18(Hmag=0.005)
self.widget = PMSlot18(self.test_obj)
assert self.widget.w_out.out_slot_height.text() == "Slot height: 0 [m]"
def test_check(self):
"""Check that the check is working correctly"""
self.test_obj = LamSlotMag(Rint=0.1, Rext=0.2)
# Hmag
self.test_obj.slot = SlotM18(Hmag=None)
assert self.widget.check(self.test_obj) == "You must set Hmag !"
if __name__ == "__main__":
a = TestPMSlot18()
a.setup_class()
a.setup_method()
a.test_init()
a.test_output_txt()
a.teardown_class()
print("Done")
|
{"hexsha": "8687b976809ba757ffbdd486ac876e9b5d6d9250", "size": 2406, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tests/GUI/DMachineSetup/PMSlot/test_PMSlot18.py", "max_stars_repo_name": "tobsen2code/pyleecan", "max_stars_repo_head_hexsha": "5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-03-05T15:22:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T15:26:08.000Z", "max_issues_repo_path": "Tests/GUI/DMachineSetup/PMSlot/test_PMSlot18.py", "max_issues_repo_name": "Eomys/Pyleecan", "max_issues_repo_head_hexsha": "4d7f0cbabf0311006963e7a2f435db2ecd901118", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-07-09T07:43:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T12:52:06.000Z", "max_forks_repo_path": "Tests/GUI/DMachineSetup/PMSlot/test_PMSlot18.py", "max_forks_repo_name": "Eomys/Pyleecan", "max_forks_repo_head_hexsha": "4d7f0cbabf0311006963e7a2f435db2ecd901118", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-12-23T12:38:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T10:47:48.000Z", "avg_line_length": 30.8461538462, "max_line_length": 79, "alphanum_fraction": 0.6525353283, "include": true, "reason": "from numpy", "num_tokens": 622}
|
import numpy as np
class Pilha:
def __init__(self, capacidade):
self.__capacidade = capacidade
self.__topo = -1
self.__valores = np.chararray(self.__capacidade, unicode=True)
def __pilha_cheia(self):
if self.__topo == self.__capacidade - 1:
return True
else:
return False
def pilha_vazia(self):
if self.__topo == -1:
return True
else:
return False
def empilhar(self, valor):
if self.__pilha_cheia():
print('A pilha está cheia.')
else:
self.__topo += 1
self.__valores[self.__topo] = valor
def desempilhar(self):
if self.pilha_vazia():
print('A pilha está vazia.')
else:
self.__topo -= 1
def ver_topo(self):
if self.__topo != -1:
return self.__valores[self.__topo]
else:
return -1
exp = str(input('Digite uma expressão: '))
pilha = Pilha(len(exp))
for i in exp:
if i in '{[(':
pilha.empilhar(i)
elif i in '}])':
if pilha.ver_topo() == '{' and i == '}':
pilha.desempilhar()
elif pilha.ver_topo() == '[' and i == ']':
pilha.desempilhar()
elif pilha.ver_topo() == '(' and i == ')':
pilha.desempilhar()
else:
print(f'Expressão inválida.\nErro no {i}')
break
elif not pilha.pilha_vazia():
print(f'Expressão inválida.\nEstá incompleta')
|
{"hexsha": "2fd0be5d98bb51020c1d8d1205fe2ab06a3142ff", "size": 1331, "ext": "py", "lang": "Python", "max_stars_repo_path": "05_pilha_validador_expressoes.py", "max_stars_repo_name": "AlissonRaphael/algorithm_and_data_structures", "max_stars_repo_head_hexsha": "d970299c40ce779e6826d36ca28ebfb1ec6f8a88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "05_pilha_validador_expressoes.py", "max_issues_repo_name": "AlissonRaphael/algorithm_and_data_structures", "max_issues_repo_head_hexsha": "d970299c40ce779e6826d36ca28ebfb1ec6f8a88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "05_pilha_validador_expressoes.py", "max_forks_repo_name": "AlissonRaphael/algorithm_and_data_structures", "max_forks_repo_head_hexsha": "d970299c40ce779e6826d36ca28ebfb1ec6f8a88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1833333333, "max_line_length": 66, "alphanum_fraction": 0.5995492111, "include": true, "reason": "import numpy", "num_tokens": 419}
|
from collections import OrderedDict
from typing import List, Dict
import cma
import numpy as np
import torch
from torch import nn
def rnn_adjust_parameters(state_dict: Dict[str, torch.Tensor]) -> OrderedDict:
state_dict = {
k.replace("model.", ""): v for k, v in state_dict.items() if "vae" not in k
}
return OrderedDict({k.strip("_l0"): v for k, v in state_dict.items()})
def flatten_parameters(params: torch.Tensor) -> np.ndarray:
"""Flattening parameters.
:args params: generator of parameters (as returned by module.parameters())
:returns: flattened parameters (i.e. one tensor of dimension 1 with all
parameters concatenated)
"""
return torch.cat([p.detach().view(-1) for p in params], dim=0).cpu().numpy()
def unflatten_parameters(
params: np.ndarray, example: torch.Tensor, device: str
) -> List[torch.Tensor]:
"""Unflatten parameters.
:args params: parameters as a single 1D np array
:args example: generator of parameters (as returned by module.parameters()),
used to reshape params
:args device: where to store unflattened parameters
:returns: unflattened parameters
"""
params = torch.Tensor(params).to(device)
idx = 0
unflattened = []
for e_p in example:
unflattened += [params[idx : idx + e_p.numel()].view(e_p.size())]
idx += e_p.numel()
return unflattened
def load_parameters(params: List[torch.Tensor], controller: nn.Module) -> None:
"""Load flattened parameters into controller.
:args params: parameters as a single 1D np array
:args controller: module in which params is loaded
"""
proto = next(controller.parameters())
params = unflatten_parameters(params, controller.parameters(), proto.device)
for p, p_0 in zip(controller.parameters(), params):
p.data.copy_(p_0)
def to_log_cma(es: cma.CMAEvolutionStrategy) -> Dict[str, float]:
to_log = {}
# rewards (best, worst, mean, std)
to_log["best_performance"] = -min(es.fit.fit)
to_log["worst_performance"] = -max(es.fit.fit)
to_log["mean_performance"] = -np.mean(es.fit.fit)
to_log["std_performance"] = -np.std(es.fit.fit)
to_log["performance_mean_plus_std"] = (
to_log["mean_performance"] + to_log["std_performance"]
)
to_log["performance_mean_minus_std"] = (
to_log["mean_performance"] - to_log["std_performance"]
)
# iterations and number of evaluations
to_log["iteration"] = es.countiter
to_log["function_evals"] = es.countevals
# misc
to_log["axis_ratio"] = es.D.max() / es.D.min()
to_log["sigma"] = es.sigma
to_log["min_max"] = es.sigma * min(es.sigma_vec * es.dC ** 0.5)
to_log["std"] = es.sigma * max(es.sigma_vec * es.dC ** 0.5)
return to_log
|
{"hexsha": "789fb27819504aeedf9713d08fb8cc2749ae602a", "size": 2783, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pl_modules/controller_utils.py", "max_stars_repo_name": "mikcnt/dlai-project", "max_stars_repo_head_hexsha": "56fa0d1e682d07cd89cb011400b0a4ef92ec9265", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-09-09T20:23:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T22:29:49.000Z", "max_issues_repo_path": "src/pl_modules/controller_utils.py", "max_issues_repo_name": "mikcnt/dlai-project", "max_issues_repo_head_hexsha": "56fa0d1e682d07cd89cb011400b0a4ef92ec9265", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pl_modules/controller_utils.py", "max_forks_repo_name": "mikcnt/dlai-project", "max_forks_repo_head_hexsha": "56fa0d1e682d07cd89cb011400b0a4ef92ec9265", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5301204819, "max_line_length": 83, "alphanum_fraction": 0.6690621631, "include": true, "reason": "import numpy", "num_tokens": 714}
|
# inspired by https://github.com/drawbridge/keras-mmoe
import tensorflow as tf
import os
import numpy as np
import pandas as pd
from utils_mod import tf_itr, MAP_at_10
from keras import backend as K
from keras.optimizers import Adam
from keras.layers import Input, Dense, Concatenate
from keras.initializers import VarianceScaling
from keras.models import Model
# from FCNmodel import build_model
from mmoe import MMoE
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
def train(train_relative_path, val_relative_path, FOLDER):
weight_folder = 'weights'
if not os.path.exists(weight_folder): os.mkdir(weight_folder)
batch = 128
n_itr = 100
n_eph = 15
label_num = 3862
_, x1_val, x2_val, y_val = next(tf_itr(val_relative_path, 10000, label_num=label_num, FOLDER=FOLDER))
val_in = np.concatenate((x1_val, x2_val), axis=1)
cate = pd.read_csv('Category.csv', sep=',')['Label_num'].values
# Set up the input layer
input_layer = Input(shape=(1152,))
# Set up MMoE layer
mmoe_layers = MMoE(
units=128,
num_experts=8,
num_tasks=25
)(input_layer)
output_layers = []
#Build tower layer from MMoE layer
for index, task_layer in enumerate(mmoe_layers):
tower_layer = Dense(
units=16,
activation='relu',
kernel_initializer=VarianceScaling())(task_layer)
output_layer = Dense(
units=cate[index],
activation='sigmoid',
kernel_initializer=VarianceScaling())(tower_layer)
output_layers.append(output_layer)
out = Concatenate(axis=1)(output_layers)
# Compile model
model = Model(inputs=input_layer, outputs=out)
adam_optimizer = Adam(lr=0.001)
model.compile(
loss='categorical_crossentropy',
optimizer=adam_optimizer
)
# Print out model architecture summary
model.summary()
cnt = 0
for e in range(n_eph):
for d in tf_itr(train_relative_path, batch, label_num=label_num, FOLDER=FOLDER):
_, x1_trn, x2_trn, y_trn = d
trn_in = np.concatenate((x1_trn, x2_trn), axis=1)
loss = model.train_on_batch(trn_in, y_trn)
cnt += 1
if cnt % n_itr == 0:
y_prd = model.predict(val_in, verbose=False, batch_size=1000)
g = MAP_at_10(y_prd, y_val)
print('loss %.5f val GAP %0.5f; epoch: %d; iters: %d' % (loss, g, e, cnt))
model.save_weights(weight_folder + '/%0.5f_%d_%d.h5' % (g, e, cnt))
if __name__ == '__main__':
train_relative_path = 'train'
val_relative_path = 'validation'
FOLDER = ''
train(train_relative_path, val_relative_path, FOLDER)
|
{"hexsha": "04f0d803b9c583e2e6916cce81c68e72607eee6e", "size": 2810, "ext": "py", "lang": "Python", "max_stars_repo_path": "MMOE/train_mmoe.py", "max_stars_repo_name": "innovator-zero/CS410_AI_Project2", "max_stars_repo_head_hexsha": "2d33eb43274dcf6875f48b656ab7c7504ad2f7fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MMOE/train_mmoe.py", "max_issues_repo_name": "innovator-zero/CS410_AI_Project2", "max_issues_repo_head_hexsha": "2d33eb43274dcf6875f48b656ab7c7504ad2f7fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MMOE/train_mmoe.py", "max_forks_repo_name": "innovator-zero/CS410_AI_Project2", "max_forks_repo_head_hexsha": "2d33eb43274dcf6875f48b656ab7c7504ad2f7fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5434782609, "max_line_length": 105, "alphanum_fraction": 0.6576512456, "include": true, "reason": "import numpy", "num_tokens": 738}
|
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Tuple, List, Union
import numpy as np
import tensorflow as tf
from nncf.tensorflow.layers.operation import InputType
from nncf.tensorflow.quantization.initializers.utils import get_per_channel_history
from nncf.tensorflow.quantization.initializers.utils import discard_zeros
from nncf.tensorflow.quantization.initializers.utils import get_axes
class MinMaxStatisticCollector:
"""
Collector uses min of minimum values and max of maximum values.
"""
def __init__(self, per_channel: bool, channel_axes: Union[int, Tuple[int], List[int]], input_type: str):
self._per_channel = per_channel
self._channel_axes = channel_axes if isinstance(channel_axes, (list, tuple)) else [channel_axes]
self._input_type = input_type
self._all_min_values = []
self._all_max_values = []
@property
def min(self) -> tf.Tensor:
return tf.math.reduce_min(tf.stack(self._all_min_values), axis=0)
@property
def max(self) -> tf.Tensor:
return tf.math.reduce_max(tf.stack(self._all_max_values), axis=0)
def prepare_statistics(self):
if self._per_channel:
new_shape = np.prod(self._all_min_values[0].shape).item()
for i, _ in enumerate(self._all_min_values):
self._all_min_values[i] = tf.reshape(self._all_min_values[i], shape=new_shape)
self._all_max_values[i] = tf.reshape(self._all_max_values[i], shape=new_shape)
def call(self, inputs: tf.Tensor):
# No need to store extra statistics in memory since weights won't change during range init
if not self._all_min_values or self._input_type == InputType.INPUTS:
ndims = len(inputs.shape)
axis = get_axes(ndims, self._per_channel, self._channel_axes)
if self._input_type == InputType.INPUTS:
axis.remove(0)
self._all_min_values.extend(tf.unstack(tf.reduce_min(inputs, axis=axis)))
self._all_max_values.extend(tf.unstack(tf.reduce_max(inputs, axis=axis)))
elif self._input_type == InputType.WEIGHTS:
self._all_min_values.append(tf.reduce_min(inputs, axis=axis))
self._all_max_values.append(tf.reduce_max(inputs, axis=axis))
def __call__(self, *args, **kwargs):
self.call(*args, **kwargs)
class MeanMinMaxStatisticsCollector:
"""
Collector uses mean of minimum values and mean of maximum values.
"""
def __init__(self, per_channel: bool, channel_axes: Union[int, Tuple[int], List[int]], input_type: str):
self._per_channel = per_channel
self._channel_axes = channel_axes if isinstance(channel_axes, (list, tuple)) else [channel_axes]
self._input_type = input_type
self._all_min_values = []
self._all_max_values = []
@property
def min(self) -> tf.Tensor:
return tf.math.reduce_mean(tf.stack(self._all_min_values), axis=0)
@property
def max(self) -> tf.Tensor:
return tf.math.reduce_mean(tf.stack(self._all_max_values), axis=0)
def prepare_statistics(self):
if self._per_channel:
new_shape = np.prod(self._all_min_values[0].shape).item()
for i, _ in enumerate(self._all_min_values):
self._all_min_values[i] = tf.reshape(self._all_min_values[i], shape=new_shape)
self._all_max_values[i] = tf.reshape(self._all_max_values[i], shape=new_shape)
def call(self, inputs: tf.Tensor):
# No need to store extra statistics in memory since weights won't change during range init
if not self._all_min_values or self._input_type == InputType.INPUTS:
ndims = len(inputs.shape)
axis = get_axes(ndims, self._per_channel, self._channel_axes)
if self._input_type == InputType.INPUTS:
axis.remove(0)
self._all_min_values.extend(tf.unstack(tf.reduce_min(inputs, axis=axis)))
self._all_max_values.extend(tf.unstack(tf.reduce_max(inputs, axis=axis)))
elif self._input_type == InputType.WEIGHTS:
self._all_min_values.append(tf.reduce_min(inputs, axis=axis))
self._all_max_values.append(tf.reduce_max(inputs, axis=axis))
def __call__(self, *args, **kwargs):
self.call(*args, **kwargs)
class MedianMADStatisticCollector:
"""
Collector uses three-sigma approach with the assumption of normal distribution by default.
"""
def __init__(self, per_channel: bool, channel_axes: Union[int, Tuple[int], List[int]], input_type: str):
self._per_channel = per_channel
self._channel_axes = channel_axes if isinstance(channel_axes, (list, tuple)) else [channel_axes]
self._input_type = input_type
self._samples = []
self._median = None
self._mad = None
# Constant factor depends on the distribution form. Assuming normal distribution - the factor is 1.4826.
self.distribution_factor = 1.4826230
@property
def min(self) -> np.ndarray:
return (self._median - 3 * self.distribution_factor * self._mad).astype(np.float32)
@property
def max(self) -> np.ndarray:
return (self._median + 3 * self.distribution_factor * self._mad).astype(np.float32)
def prepare_statistics(self):
ndims = len(self._samples[0].shape)
axis = get_axes(ndims, self._per_channel, self._channel_axes, add_dim=True)
inputs_tensor = np.array(self._samples)
if self._per_channel:
per_channel_histories = get_per_channel_history(inputs_tensor, axis)
per_channel_median = []
per_channel_mad = []
for channel_history in per_channel_histories:
channel_history = discard_zeros(channel_history)
median = np.median(channel_history)
per_channel_median.append(median)
inputs_median_diff = abs(channel_history - median)
per_channel_mad.append(np.median(inputs_median_diff))
self._median = np.array(per_channel_median)
self._mad = np.array(per_channel_mad)
else:
inputs_tensor = inputs_tensor.flatten()
inputs_tensor_flat = discard_zeros(inputs_tensor)
self._median = np.median(inputs_tensor_flat)
self._mad = np.median(abs(inputs_tensor_flat - self._median))
def call(self, inputs: tf.Tensor):
# No need to store extra statistics in memory since weights won't change during range init
if not self._samples or self._input_type == InputType.INPUTS:
self._samples.append(inputs.numpy())
def __call__(self, *args, **kwargs):
self.call(*args, **kwargs)
class PercentileStatisticCollector:
"""
Collector uses percentiles to estimate min and max of all data history.
"""
def __init__(self, per_channel: bool, channel_axes: Union[int, Tuple[int], List[int]], input_type: str,
min_percentile: float, max_percentile: float):
self._per_channel = per_channel
self._channel_axes = channel_axes if isinstance(channel_axes, (list, tuple)) else [channel_axes]
self._input_type = input_type
self._min_percentile = min_percentile
self._max_percentile = max_percentile
self._samples = []
self._min_values = None
self._max_values = None
@property
def min(self) -> np.ndarray:
return self._min_values.astype(np.float32)
@property
def max(self) -> np.ndarray:
return self._max_values.astype(np.float32)
def prepare_statistics(self):
ndims = len(self._samples[0].shape)
axis = get_axes(ndims, self._per_channel, self._channel_axes, add_dim=True)
inputs_tensor = np.array(self._samples)
if self._per_channel:
per_channel_histories = get_per_channel_history(inputs_tensor, axis)
per_channel_max_vals = []
per_channel_min_vals = []
for channel_history in per_channel_histories:
min_val = np.percentile(channel_history, self._min_percentile)
max_val = np.percentile(channel_history, self._max_percentile)
per_channel_min_vals.append(min_val)
per_channel_max_vals.append(max_val)
self._min_values = np.array(per_channel_min_vals)
self._max_values = np.array(per_channel_max_vals)
else:
inputs_tensor_flat = inputs_tensor.flatten()
self._min_values = np.percentile(inputs_tensor_flat, self._min_percentile)
self._max_values = np.percentile(inputs_tensor_flat, self._max_percentile)
def call(self, inputs: tf.Tensor):
# No need to store extra statistics in memory since weights won't change during range init
if not self._samples or self._input_type == InputType.INPUTS:
self._samples.append(inputs.numpy())
def __call__(self, *args, **kwargs):
self.call(*args, **kwargs)
class MeanPercentileStatisticCollector:
"""
Collector uses percentiles to estimate min and max of data per step
and then averages the statistics.
"""
def __init__(self, per_channel: bool, channel_axes: Union[int, Tuple[int], List[int]], input_type: str,
min_percentile: float, max_percentile: float):
self._per_channel = per_channel
self._channel_axes = channel_axes if isinstance(channel_axes, (list, tuple)) else [channel_axes]
self._input_type = input_type
self._min_percentile = min_percentile
self._max_percentile = max_percentile
self._all_min_values = []
self._all_max_values = []
@property
def min(self) -> tf.Tensor:
return tf.math.reduce_mean(tf.stack(self._all_min_values), axis=0)
@property
def max(self) -> tf.Tensor:
return tf.math.reduce_mean(tf.stack(self._all_max_values), axis=0)
def prepare_statistics(self):
if self._per_channel:
new_shape = np.prod(self._all_min_values[0].shape).item()
for i, _ in enumerate(self._all_min_values):
self._all_min_values[i] = tf.reshape(self._all_min_values[i], shape=new_shape)
self._all_max_values[i] = tf.reshape(self._all_max_values[i], shape=new_shape)
def _percentile(self, inputs: tf.Tensor, pc: float, axis: list):
return np.percentile(inputs.numpy(), pc, axis)
def call(self, inputs: tf.Tensor):
# No need to store extra statistics in memory since weights won't change during range init
if not self._all_min_values or self._input_type == InputType.INPUTS:
ndims = len(inputs.shape)
axis = get_axes(ndims, self._per_channel, self._channel_axes)
if self._input_type == InputType.INPUTS:
axis.remove(0)
min_vals = tf.py_function(self._percentile, [inputs, self._min_percentile, axis], Tout=tf.float32)
max_vals = tf.py_function(self._percentile, [inputs, self._max_percentile, axis], Tout=tf.float32)
self._all_min_values.extend(tf.unstack(min_vals))
self._all_max_values.extend(tf.unstack(max_vals))
elif self._input_type == InputType.WEIGHTS:
min_vals = tf.py_function(self._percentile, [inputs, self._min_percentile, axis], Tout=tf.float32)
max_vals = tf.py_function(self._percentile, [inputs, self._max_percentile, axis], Tout=tf.float32)
self._all_min_values.append(min_vals)
self._all_max_values.append(max_vals)
def __call__(self, *args, **kwargs):
self.call(*args, **kwargs)
|
{"hexsha": "77fb0f3514fd36d10e75f5c49a87fd7c5f8b0deb", "size": 12310, "ext": "py", "lang": "Python", "max_stars_repo_path": "nncf/tensorflow/quantization/initializers/collectors.py", "max_stars_repo_name": "sarthakpati/nncf", "max_stars_repo_head_hexsha": "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 310, "max_stars_repo_stars_event_min_datetime": "2020-10-29T09:22:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T04:53:34.000Z", "max_issues_repo_path": "nncf/tensorflow/quantization/initializers/collectors.py", "max_issues_repo_name": "sarthakpati/nncf", "max_issues_repo_head_hexsha": "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 615, "max_issues_repo_issues_event_min_datetime": "2020-10-28T10:22:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T18:09:23.000Z", "max_forks_repo_path": "nncf/tensorflow/quantization/initializers/collectors.py", "max_forks_repo_name": "sarthakpati/nncf", "max_forks_repo_head_hexsha": "29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 86, "max_forks_repo_forks_event_min_datetime": "2020-10-28T11:34:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:00:35.000Z", "avg_line_length": 44.1218637993, "max_line_length": 114, "alphanum_fraction": 0.6707554833, "include": true, "reason": "import numpy", "num_tokens": 2751}
|
using SuccessiveConvexProgrammings
using LinearAlgebra
function my_func_array(x, u, t, k)
A = [1 2;
3 4;
5 6]
B = Matrix(I, 3, 3)
return A*x + B*u
end
x = [1, 2]
u = [3, 4, 5]
t = [6, 7]
k = [8, 9]
jacob = get_jacobian(my_func_array, x, u, t, k)
|
{"hexsha": "62fd8d9370558bcea4583722ceeb68a99f7051a7", "size": 279, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/linearisers.jl", "max_stars_repo_name": "JinraeKim/SuccessiveConvexProgrammings.jl", "max_stars_repo_head_hexsha": "a367a5dac91a459d889a745611227cb14b0152e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/linearisers.jl", "max_issues_repo_name": "JinraeKim/SuccessiveConvexProgrammings.jl", "max_issues_repo_head_hexsha": "a367a5dac91a459d889a745611227cb14b0152e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-23T03:23:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-02T04:46:49.000Z", "max_forks_repo_path": "test/linearisers.jl", "max_forks_repo_name": "JinraeKim/SuccessiveConvexProgrammings.jl", "max_forks_repo_head_hexsha": "a367a5dac91a459d889a745611227cb14b0152e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-11T22:46:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-11T22:46:02.000Z", "avg_line_length": 15.5, "max_line_length": 47, "alphanum_fraction": 0.5483870968, "num_tokens": 121}
|
from random import shuffle, choice, random, sample, randint
from typing import List, Type, Set, Dict, Tuple
from datetime import datetime
import networkx as nx
import numpy as np
import math
from LAMARCK_ML.architectures import DataFlow
from LAMARCK_ML.architectures.IOMapping_pb2 import IOMappingProto
from LAMARCK_ML.architectures.NeuralNetwork_pb2 import NeuralNetworkProto
from LAMARCK_ML.architectures.functions import Function, InvalidFunctionType
from LAMARCK_ML.architectures.interface import ArchitectureInterface
from LAMARCK_ML.architectures.variables import Variable
from LAMARCK_ML.data_util import TypeShape, IOLabel
from LAMARCK_ML.data_util.attribute import attr2pb, pb2attr, value2pb, pb2val
from LAMARCK_ML.reproduction.methods import Mutation, Recombination
class NeuralNetwork(ArchitectureInterface, DataFlow, Mutation.Interface, Recombination.Interface):
arg_INPUTS = 'inputs'
arg_OUTPUT_TARGETS = 'output_targets'
arg_FUNCTIONS = 'functions'
arg_NAMELESS = 'nameless'
arg_RECOMBINATION_PROBABILITY = 'cross_prob'
arg_MIN_DEPTH = 'min_depth'
arg_MAX_DEPTH = 'max_depth'
arg_MAX_BRANCH = 'max_branch'
# _nameIdx = 0
def __init__(self, **kwargs):
super(NeuralNetwork, self).__init__(**kwargs)
self.attr = dict()
self.input_mapping = kwargs.get(self.arg_INPUTS, {})
self.output_targets = kwargs.get(self.arg_OUTPUT_TARGETS, {})
# TODO: check parameter types
self.attr[self.arg_RECOMBINATION_PROBABILITY] = kwargs.get(self.arg_RECOMBINATION_PROBABILITY, .5)
self.function_cls = kwargs.get(self.arg_FUNCTIONS)
self.attr[self.arg_MIN_DEPTH] = kwargs.get(self.arg_MIN_DEPTH, 2)
self.attr[self.arg_MAX_DEPTH] = kwargs.get(self.arg_MAX_DEPTH, 8)
self.attr[self.arg_MAX_BRANCH] = kwargs.get(self.arg_MAX_BRANCH, 3)
try:
if not (isinstance(self.function_cls, list) and all(
[isinstance(f, type) and issubclass(f, Function) for f in self.function_cls])):
raise InvalidFunctionType("Invalid function type in 'functions'!")
except Exception:
raise InvalidFunctionType("Invalid function type in 'functions'!")
self.variable_pool = dict()
blueprint, self.output_mapping, input_df_obj = NeuralNetwork.random_networks(
function_pool=set(self.function_cls),
input_data_flow=self.input_mapping,
output_targets=self.output_targets,
min_depth=self.attr[self.arg_MIN_DEPTH],
max_depth=self.attr[self.arg_MAX_DEPTH],
max_recursion_depth=self.attr[self.arg_MAX_BRANCH]
)
self.functions, network_outputs = NeuralNetwork.build_network(
data_flow_inputs=input_df_obj,
blueprint=blueprint,
variable_pool=self.variable_pool,
output_mapping=self.output_mapping
)
self._inputs = dict()
for in_label, (nts_id_name, nts, obj) in self.input_mapping.items():
self._inputs[in_label] = (nts_id_name, obj)
self._id_name = self.getNewName() if not kwargs.get(self.arg_NAMELESS, False) else None
def __eq__(self, other):
if (isinstance(other, self.__class__)
and self.id_name == other.id_name
and len(self.function_cls) == len(other.function_cls) == len(
[f for f in self.function_cls if f in other.function_cls])
and len(self._inputs) == len(other._inputs) == len(
[self._inputs.get(key) == other._inputs.get(key) for key in self._inputs])
and len(self.output_targets) == len(other.output_targets)
and all([any([ot == st for ot in other.output_targets]) for st in self.output_targets])
and len(self.variable_pool) == len(other.variable_pool) == len(
[v for v in self.variable_pool if set(self.variable_pool.get(v)) == set(other.variable_pool.get(v))])
and len(self.output_mapping) == len(other.output_mapping) == len(
[True for om in self.output_mapping if om in other.output_mapping])
and len(self.functions) == len(other.functions) == len(
[True for f in self.functions if f in other.functions])
and len(self.attr) == len(other.attr) == len(
{k: self.attr.get(k) for k in self.attr if self.attr.get(k) == other.attr.get(k)})
):
return True
return False
def __getstate__(self):
self.get_pb().SerializeToString()
def __setstate__(self, state):
if isinstance(state, str) or isinstance(state, bytes):
_nn = NeuralNetworkProto()
_nn.ParseFromString(state)
elif isinstance(state, NeuralNetworkProto):
_nn = state
else:
return
self._id_name = _nn.id_name
self.function_cls = [Function.getClassByName(f_cls) for f_cls in _nn.function_cls]
self.output_targets = dict([pb2val(v) for v in _nn.output_ntss.v])
self._inputs = dict([(ioMP.in_label, (ioMP.out_label, ioMP.df_id_name)) for ioMP in _nn.input_mapping])
self.output_mapping = dict([(ioMP.in_label, (ioMP.out_label, ioMP.df_id_name)) for ioMP in _nn.output_mapping])
self.functions = [Function.get_instance(_f) for _f in _nn.functions]
self.variable_pool = dict()
for _v in _nn.variables:
v_ = Variable.__new__(Variable)
v_.__setstate__(_v)
self.variable_pool[v_.name] = self.variable_pool.get(v_.name, []) + [v_]
self.attr = dict([pb2attr(attr) for attr in _nn.attr])
def distinct_copy(self):
result = NeuralNetwork.__new__(NeuralNetwork)
result._id_name = self.id_name
result.function_cls = list(self.function_cls)
result.output_targets = dict([(label, value.__copy__()) for label, value in self.output_targets.items()])
result._inputs = dict(self._inputs)
result.output_mapping = dict(self.output_mapping)
result.functions = list()
function_mapping = dict()
for _f in self.functions:
new_f = Function.__new__(Function)
new_f.__setstate__(_f.get_pb())
new_f._id_name = new_f.getNewName(new_f)
result.functions.append(new_f)
function_mapping[_f.id_name] = new_f.id_name
for _f in result.functions:
for key, (v1, old_f_id) in _f.input_mapping.items():
_f.input_mapping[key] = (v1, function_mapping.get(old_f_id, old_f_id))
for in_label, (out_label, f_id) in result.output_mapping.items():
result.output_mapping[in_label] = (out_label, function_mapping.get(f_id, f_id))
result.variable_pool = dict([(key, list(value_l)) for key, value_l in self.variable_pool.items()])
result.attr = dict([pb2attr(attr) for attr in [attr2pb(key, value) for key, value in self.attr.items()]])
return result
@classmethod
def getNewName(cls):
name = cls.__name__ + '_' + str(datetime.now().timestamp()) + '_%09i'%randint(0, 1e9 - 1)
return name
@staticmethod
def build_network(data_flow_inputs: List[str], blueprint: nx.DiGraph, variable_pool, output_mapping):
stack = list(blueprint.nodes)
build_nodes = dict()
build_functions = list()
output_functions = dict()
while stack:
node = stack.pop(0)
node_inputs = dict()
node_param_dict = blueprint.nodes[node]
f_class = node_param_dict['DataFlowObj']
if f_class in data_flow_inputs:
build_nodes[node] = (node_param_dict['ntss'], f_class)
else:
node2key = blueprint.nodes[node]['node2key']
all_found = True
for pred in blueprint.predecessors(node):
if pred in build_nodes:
for (key, value) in node2key[pred]:
_ntss, _id_name = build_nodes[pred]
node_inputs[key] = (value, _ntss, _id_name)
else:
all_found = False
break
if all_found:
parameters, possibilities = f_class.generateParameters(input_dict=node_inputs,
expected_outputs=node_param_dict['ntss'],
variable_pool=variable_pool)
chosen_parameters = np.random.choice(parameters, size=1, replace=False, p=possibilities)[0]
build_f = f_class(**chosen_parameters)
build_nodes[node] = (build_f.outputs, build_f.id_name)
build_functions.append(build_f)
if node in output_mapping:
out_nts_id, target_nts_id = output_mapping.pop(node)
output_mapping[target_nts_id] = (out_nts_id, build_f.id_name)
output_functions[target_nts_id] = build_f
else:
stack.append(node)
return build_functions, output_functions
@staticmethod
def reachable(
input_nts: TypeShape,
target_nts: TypeShape,
max_depth: int,
function_pool: set,
):
class FoundException(Exception):
pass
if input_nts.__cmp__(target_nts) == -1 and target_nts.__cmp__(input_nts) == -1:
return True
low_list, high_list = [input_nts], [input_nts]
try:
for _ in range(max_depth):
n_low_list = list(low_list)
for low_ in low_list:
for func in function_pool:
_min = func.min_transform(low_)
if _min is None:
continue
if _min.__cmp__(target_nts) == -1:
raise FoundException()
new_nts = True
for curr in n_low_list:
cmp = _min.__cmp__(curr)
if cmp == -1:
n_low_list.remove(curr)
n_low_list.append(_min)
new_nts = False
break
if cmp == 0:
continue
break
if new_nts:
n_low_list.append(_min)
low_list = n_low_list
except Exception as e:
if not isinstance(e, FoundException):
return False
try:
for i in range(max_depth):
n_high_list = list(high_list)
for high_ in high_list:
for func in function_pool:
_max = func.max_transform(high_)
if _max is None:
continue
if target_nts.__cmp__(_max) == -1:
raise FoundException()
new_nts = True
for curr in n_high_list:
cmp = curr.__cmp__(_max)
if cmp == -1:
n_high_list.remove(curr)
n_high_list.append(_max)
new_nts = False
break
if cmp == 0:
continue
break
if new_nts:
n_high_list.append(_max)
high_list = n_high_list
except Exception as e:
if not isinstance(e, FoundException):
return False
return True
# min_reached = False
# max_reached = False
# stack = [(input_nts, input_nts)]
# for _ in range(max_depth):
# new_stack = list()
# for _min, _max in stack:
# for func in function_pool:
# min_, max_ = func.min_transform(_min), func.max_transform(_max)
# if min_ is None or max_ is None:
# continue
# if not min_reached and min_.__cmp__(target_nts) == -1:
# min_reached = True
# if not max_reached and target_nts.__cmp__(max_) == -1:
# max_reached = True
# if min_reached and max_reached:
# return True
# new_stack.append((min_, max_))
# stack = new_stack
return False
@staticmethod
def children_iter(input_ntss: Dict[str, TypeShape], target_output: TypeShape,
is_reachable, function_pool,
recursion_depth, max_recursion_depth=1):
random_f_pool = list(function_pool)
pool_dict = dict()
while random_f_pool:
_f = choice(random_f_pool)
possibility_iter = pool_dict.get(_f)
if possibility_iter is None:
possibility_iter = _f.possible_output_shapes(input_ntss, target_output,
is_reachable,
max_possibilities=5)
pool_dict[_f] = possibility_iter
dts = next(possibility_iter, None)
if dts is None:
random_f_pool.remove(_f)
continue
remaining_inputs, out_nts, in_out_mapping = dts
if recursion_depth > max_recursion_depth and len(remaining_inputs) > 0:
continue
yield (out_nts, _f, remaining_inputs, in_out_mapping)
@staticmethod
def simple_path(
input_node: str,
input_ntss: Dict[str, TypeShape],
output_shape: TypeShape,
output_label: str,
blueprint: nx.DiGraph(),
min_depth: int,
max_depth: int,
function_pool: set,
max_recursion_depth: int = 3,
recursion_depth: int = 0,
):
def clean_up(network, node_name):
dep_stack = network.nodes[node_name].get('dep', list())
while dep_stack:
node = dep_stack.pop()
dep_stack.extend(network.nodes[node].get('dep', []))
network.remove_node(node)
network.remove_node(node_name)
if max_depth < 0:
return
input_ntss = dict(
[(label, nts) for label, nts in input_ntss.items() if
NeuralNetwork.reachable(nts, output_shape, max_depth, function_pool)
])
if not len(input_ntss) > 0:
return
elif max_depth == 0:
for label, nts in input_ntss.items():
if NeuralNetwork.reachable(nts, output_shape, 0, function_pool):
yield input_node, {output_label: label}, []
stack = [(0, input_node, NeuralNetwork.children_iter(input_ntss=input_ntss, target_output=output_shape,
is_reachable=lambda x, y:
NeuralNetwork.reachable(x, y, max_depth - 1, function_pool),
function_pool=function_pool,
recursion_depth=recursion_depth,
max_recursion_depth=max_recursion_depth))]
created_nodes = []
while stack:
depth, last_node, children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
if depth > 0:
clean_up(blueprint, last_node)
created_nodes.remove(last_node)
elif depth <= max_depth:
out_ntss, _f, remaining_inputs, in_out_mapping = child
dep = list()
node2key = dict([(last_node, [(key, value)]) for key, value in in_out_mapping.items()])
out_nodes = list()
for rem_in_label, rem_in_shape in remaining_inputs.items():
temp_nodes = list(blueprint.nodes)
out_node = None
while temp_nodes:
node = choice(temp_nodes)
temp_nodes.remove(node)
out_node, nts_id, nodes = next(NeuralNetwork.simple_path(input_node=node,
input_ntss=blueprint.nodes[node]['ntss'],
output_shape=rem_in_shape,
output_label=rem_in_label,
blueprint=blueprint,
min_depth=0,
max_depth=depth,
recursion_depth=recursion_depth + 1,
function_pool=function_pool,
max_recursion_depth=max_recursion_depth
), (None, None, None))
if out_node is not None:
for key, value in nts_id.items():
node2key[out_node] = node2key.get(out_node, []) + [(key, value)]
dep.extend(nodes)
out_nodes.append(out_node)
break
if not temp_nodes and not out_node:
break
if sum([len(v) for v in node2key.values()]) <= len(remaining_inputs):
while dep:
node = dep.pop()
dep.extend(blueprint.nodes[node]).get('dep', [])
blueprint.remove_node(node)
continue
n_name = '{' + ', '.join(
[label + ': ' + nts.dtype.__str__() + ', ' + str(nts.shape) for label, nts in out_ntss.items()]) + '}'
index = 0
while True:
if n_name + str(index) not in blueprint.nodes:
n_name += str(index)
break
index += 1
created_nodes.append(n_name)
blueprint.add_node(n_name, ntss=out_ntss, DataFlowObj=_f,
dep=dep, node2key=node2key)
for out_node in out_nodes:
blueprint.add_edge(out_node, n_name)
blueprint.add_edge(last_node, n_name)
for label, nts in out_ntss.items():
if nts.dtype == output_shape.dtype and \
nts.shape == output_shape.shape and \
depth >= min_depth:
yield n_name, {output_label: label}, created_nodes
pot = max_depth - depth - 1
if pot >= 0:
stack.append((depth + 1, n_name, NeuralNetwork.children_iter(input_ntss=out_ntss,
target_output=output_shape,
function_pool=function_pool,
recursion_depth=recursion_depth,
max_recursion_depth=max_recursion_depth,
is_reachable=lambda x, y:
NeuralNetwork.reachable(x, y, pot,
function_pool),
)))
else:
clean_up(blueprint, n_name)
created_nodes.remove(n_name)
else:
stack.pop()
if depth > 0:
clean_up(blueprint, last_node)
created_nodes.remove(last_node)
@staticmethod
def random_networks(function_pool: Set[Type[Function]] = None,
input_data_flow: Dict[str, Tuple[str, TypeShape, str]] = None,
output_targets: Dict[str, TypeShape] = None,
min_depth: int = 2,
max_depth: int = 5,
max_recursion_depth: int = 3
):
blueprint = nx.DiGraph()
nn_inputs = []
for inkey, (out_label, out_nts, df_obj_id) in input_data_flow.items():
if df_obj_id not in nn_inputs:
nn_inputs.append(df_obj_id)
n_name = '{' + inkey + ': ' + out_nts.dtype.__str__() + ', ' + str(out_nts.shape) + '}'
index = 0
while True:
if n_name + str(index) not in blueprint.nodes:
n_name += str(index)
break
index += 1
blueprint.add_node(n_name, ntss={out_label: out_nts}, DataFlowObj=df_obj_id)
inputs = list(blueprint.nodes)
shuffled_outputs = list(output_targets.items())
shuffle(shuffled_outputs)
output_mapping = dict()
for _out_label, _output in shuffled_outputs:
temp_inputs = list(inputs)
out_node = None
nts_id = None
while True and temp_inputs:
_input = choice(temp_inputs)
temp_inputs.remove(_input)
out_node, nts_id, _ = next(NeuralNetwork.simple_path(input_node=_input,
input_ntss=blueprint.nodes[_input]['ntss'],
output_shape=_output,
output_label=_out_label,
blueprint=blueprint,
min_depth=min_depth,
max_depth=max_depth,
function_pool=function_pool,
max_recursion_depth=max_recursion_depth
), (None, None, None))
if out_node is not None:
break
if out_node is None:
raise Exception('Failed to generate Network!')
output_mapping[out_node] = (nts_id[_out_label], _out_label)
return blueprint, output_mapping, nn_inputs
def outputs(self) -> Set[TypeShape]:
return set(self.output_targets)
@property
def inputs(self) -> Dict[IOLabel, Tuple[IOLabel, str]]:
return self._inputs
@property
def id_name(self) -> str:
return self._id_name
def get_pb(self, result=None):
if not isinstance(result, NeuralNetworkProto):
result = NeuralNetworkProto()
result.id_name = self._id_name
for f_cls in self.function_cls:
result.function_cls.append(f_cls.get_cls_name())
# result.output_ntss.vs.extend([attr2pb(name=label, value=ts) for label, ts in self.output_targets.items()])
result.output_ntss.v.extend([value2pb(v) for v in self.output_targets.items()])
for in_label, (out_label, id_name) in self._inputs.items():
ioM = IOMappingProto()
ioM.in_label = in_label
ioM.out_label = out_label
ioM.df_id_name = id_name
result.input_mapping.append(ioM)
for in_label, (out_label, id_name) in self.output_mapping.items():
ioM = IOMappingProto()
ioM.in_label = in_label
ioM.out_label = out_label
ioM.df_id_name = id_name
result.output_mapping.append(ioM)
for _f in self.functions:
result.functions.append(_f.get_pb())
for v in self.variable_pool.values():
result.variables.extend([_v.get_pb() for _v in v])
result.attr.extend([attr2pb(key, value) for key, value in self.attr.items()])
return result
def norm(self, other):
if isinstance(other, self.__class__):
return sum([_f not in self.functions for _f in other.functions]) + \
sum([_f not in other.functions for _f in self.functions])
return -1
def mutate(self, prob):
def function_wise_mutation(NN, prob):
new_functions = list()
changed = False
function_mapping = dict()
for _f in NN.functions:
if isinstance(_f, Mutation.Interface) and random() < prob:
new_function = _f.mutate(prob)
else:
new_function = _f.__copy__()
new_functions.append(new_function)
if new_function != _f:
changed = True
function_mapping[_f.id_name] = new_function.id_name
for _f in new_functions:
for key, (v1, old_f_id) in _f.input_mapping.items():
_f.input_mapping[key] = (v1, function_mapping.get(old_f_id, old_f_id))
for in_label, (out_label, f_id) in NN.output_mapping.items():
NN.output_mapping[in_label] = (out_label, function_mapping.get(f_id, f_id))
NN.functions = new_functions
return changed
def structural_mutation(NN, prob):
changed = False
for _ in range(1):
if random() >= prob:
continue
id2function = dict()
predecessor = dict()
ancestor = dict()
for _f in NN.functions:
id2function[_f.id_name] = _f
for _f_input, (other_output, other_id) in _f.inputs.items():
predecessor[_f.id_name] = predecessor.get(_f.id_name, set()) | {other_id}
ancestor[other_id] = ancestor.get(other_id, set()) | {_f.id_name}
random_function = choice(NN.functions)
ancestors = dict()
stack = {random_function.id_name: 1}
while stack:
curr, depth = stack.popitem()
for anc in ancestor.get(curr, set()):
ancestors[anc] = max(ancestors.get(anc, 0), depth + 1)
stack[anc] = max(stack.get(anc, 0), depth + 1)
if len(ancestors) <= 0:
continue
changed = True
target_function, depth = choice(list(ancestors.items()))
target_function = id2function[target_function]
# remove target function
NN.functions.remove(target_function)
# create a new path
target_ancestor = set()
stack = {target_function.id_name}
while stack:
ancs = set(ancestor.get(stack.pop(), set()))
stack.update(ancs)
target_ancestor.update(ancs)
blueprint = nx.DiGraph()
random_node = None
build_nodes = dict()
for _f in NN.functions:
if _f.id_name in target_ancestor:
continue
n_name = '{' + ', '.join(
[label + ': ' + nts.dtype.__str__() + ', ' + str(nts.shape) for label, nts in _f.outputs.items()]) + '}'
index = 0
while True:
if n_name + str(index) not in blueprint.nodes:
n_name += str(index)
break
index += 1
if _f.id_name == random_function.id_name:
random_node = n_name
build_nodes[n_name] = (_f.outputs, _f.id_name)
blueprint.add_node(n_name, ntss=_f.outputs, DataFlowObj=_f)
output_labels = dict()
if target_function.id_name in ancestor:
for anc in ancestor[target_function.id_name]:
for in_label, (out_label, f_id) in id2function[anc].inputs.items():
if not f_id == target_function.id_name:
continue
output_labels[out_label] = output_labels.get(out_label, []) + [(in_label, anc)]
for in_label, (out_label, f_id) in NN.output_mapping.items():
if not f_id == target_function.id_name:
continue
output_labels[out_label] = output_labels.get(out_label, []) + [(in_label, None)]
new_functions = set()
for out_label, out_nts in target_function.outputs.items():
out_node, nts_id, new_nodes = \
next(NeuralNetwork.simple_path(input_node=random_node,
input_ntss=random_function.outputs,
output_shape=out_nts,
output_label=out_label,
blueprint=blueprint,
min_depth=0,
max_depth=depth + 1,
function_pool=NN.function_cls,
max_recursion_depth=self.attr[self.arg_MAX_BRANCH],
), (None, None, None))
while new_nodes:
node = new_nodes.pop(0)
node_inputs = dict()
node_param_dict = blueprint.nodes[node]
f_class = node_param_dict['DataFlowObj']
node2key = blueprint.nodes[node]['node2key']
all_found = True
for pred in blueprint.predecessors(node):
if pred in build_nodes:
for (key, value) in node2key[pred]:
_ntss, _id_name = build_nodes[pred]
node_inputs[key] = (value, _ntss, _id_name)
else:
if pred not in new_nodes:
new_nodes.append(pred)
all_found = False
break
if not all_found:
new_nodes.append(node)
continue
parameters, possibilities = f_class.generateParameters(input_dict=node_inputs,
expected_outputs=node_param_dict['ntss'],
variable_pool=NN.variable_pool)
chosen_parameters = np.random.choice(parameters, size=1, replace=False, p=possibilities)[0]
build_f = f_class(**chosen_parameters)
build_nodes[node] = (build_f.outputs, build_f.id_name)
NN.functions.append(build_f)
new_functions.add(build_f.id_name)
id2function[build_f.id_name] = build_f
for _f_input, (other_output, other_id) in build_f.inputs.items():
predecessor[build_f.id_name] = predecessor.get(build_f.id_name, set()) | {other_id}
ancestor[other_id] = ancestor.get(other_id, set()) | {build_f.id_name}
for in_label, f_id in output_labels[out_label]:
_, out_node_id = build_nodes[out_node]
ancestor[out_node_id] = ancestor.get(out_node_id, set()) | {f_id}
if f_id is None:
NN.output_mapping[in_label] = (out_label, build_nodes[out_node][1])
else:
id2function[f_id].input_mapping[in_label] = (out_label, build_nodes[out_node][1])
out_functions = set([f_id for _, f_id in NN.output_mapping.values()])
# remove old path or at least all not necessary functions
NN.functions.insert(0, target_function)
stack = {target_function.id_name}
while stack:
curr = stack.pop()
pred = predecessor.get(curr, set())
for _p in pred:
if ancestor[_p]:
ancestor[_p].remove(curr)
if not ancestor[_p] and _p not in out_functions:
stack.add(_p)
NN.functions.remove(id2function[curr])
predecessor.pop(curr)
return changed
result = self.distinct_copy()
changed = structural_mutation(result, prob)
changed = function_wise_mutation(result, prob) or changed
if changed:
result._id_name = NeuralNetwork.getNewName()
return [result]
def recombine(self, other):
self_id2depth = dict([(id_name, 0) for _, id_name in self.inputs.values()])
self_depth2id = {0: [id_name for _, id_name in self.inputs.values()]}
self_id2function = dict()
self_inputs = set([id_name for _, id_name in self.inputs.values()])
self_outputs = dict()
for out_id, (label, key) in self.output_mapping.items():
self_outputs[key] = self_outputs.get(key, []) + [(out_id, label)]
# map function to network depth
stack = list(self.functions)
while stack:
f = stack.pop(0)
max_depth = 0
all_found = True
for _, f_id in f.inputs.values():
if f_id not in self_id2function and f_id not in self_inputs:
stack.append(f)
all_found = False
break
max_depth = max(max_depth, self_id2depth[f_id])
if not all_found:
continue
self_id2function[f.id_name] = f
self_id2depth[f.id_name] = max_depth + 1
self_depth2id[max_depth + 1] = self_depth2id.get(max_depth + 1, []) + [f.id_name]
other_id2depth = dict([(id_name, 0) for _, id_name in other.inputs.values()])
other_depth2id = {0: [id_name for _, id_name in other.inputs.values()]}
other_id2function = dict()
other_inputs = set([id_name for _, id_name in other.inputs.values()])
other_outputs = dict()
for out_id, (label, key) in other.output_mapping.items():
other_outputs[key] = other_outputs.get(key, []) + [(out_id, label)]
stack = list(other.functions)
while stack:
f = stack.pop(0)
max_depth = 0
all_found = True
for _, f_id in f.inputs.values():
if f_id not in other_id2function and f_id not in other_inputs:
stack.append(f)
all_found = False
break
max_depth = max(max_depth, other_id2depth[f_id])
if not all_found:
continue
other_id2function[f.id_name] = f
other_id2depth[f.id_name] = max_depth + 1
other_depth2id[max_depth + 1] = other_depth2id.get(max_depth + 1, []) + [f.id_name]
# create new network
result = NeuralNetwork.__new__(NeuralNetwork)
result.output_targets = dict([(label, value.__copy__()) for label, value in self.output_targets.items()])
result.function_cls = list(set(self.function_cls + other.function_cls))
result.variable_pool = dict()
for key in set(self.variable_pool.keys()).union(set(other.variable_pool.keys())):
self_pool = self.variable_pool.get(key, [])
self_pool = sample(self_pool, k=int(math.ceil(len(self_pool) / 2))) if len(self_pool) > 0 else []
other_pool = other.variable_pool.get(key, [])
other_pool = sample(other_pool, k=int(math.ceil(len(other_pool) / 2))) if len(other_pool) > 0 else []
result.variable_pool[key] = self_pool + other_pool
result.functions = list()
result.output_mapping = dict()
new_functions = dict()
_current = [max(self_depth2id.keys()),
self_depth2id,
self_id2depth,
self_id2function,
self_inputs,
self_outputs,
self.attr.get(self.arg_RECOMBINATION_PROBABILITY, .5)]
_other = [max(other_depth2id.keys()),
other_depth2id,
other_id2depth,
other_id2function,
other_inputs,
other_outputs,
other.attr.get(self.arg_RECOMBINATION_PROBABILITY, .5)]
# switch functions
depth, depth2id, id2depth, id2function, _inputs, _outputs, cross_prob = _current
required_inputs = dict()
for f_ in depth2id[depth]:
for out_label, label in _outputs.get(f_, []):
nts_dict = required_inputs.get(f_, dict())
out_nts = id2function[f_].outputs.get(label, None)
nts_dict[out_nts] = nts_dict.get(out_nts, []) + [(None, out_label, label)]
required_inputs[f_] = nts_dict
_outputs.pop(f_)
runs = 0
while depth >= 0:
runs += 1
all_reached = False
connect = list()
o_depth, o_depth2id, o_id2depth, o_id2function, _, _, _ = _other
other_new_depth = o_depth
if random() < cross_prob:
required_input_keys = [(_f, nts) for _f in required_inputs.keys() for nts in required_inputs[_f].keys()]
shuffle(required_input_keys)
connect = list()
try:
for d in range(min(o_depth, depth-1), 0, -1):
other_new_depth = d
shuffle(o_depth2id[d])
for out_nts_label, out_nts, _id in [(nts_label, nts, f_id) for f_id in o_depth2id[d] for nts_label, nts in
o_id2function[f_id].outputs.items()]:
for _f, target_nts in required_input_keys:
connect_param = next(
NeuralNetwork.children_iter(input_ntss={out_nts_label: out_nts},
target_output=target_nts,
function_pool=result.function_cls,
recursion_depth=1,
max_recursion_depth=0,
is_reachable=lambda x, y:
NeuralNetwork.reachable(x, y, 0, function_pool=set())), None)
# TODO: think about using functions with more than one input
if connect_param is not None:
connect.append((_f, target_nts, out_nts, _id, connect_param))
required_input_keys.remove((_f, target_nts))
if len(required_input_keys) <= 0:
all_reached = True
raise Exception
except:
pass
if all_reached:
other_used_functions = set()
# Create intermediate connection functions
for _f, target_nts, out_nts, out_id, (out_ntss, f_class, _, in_out_mapping) in connect:
out_f = o_id2function[out_id]
node_inputs = dict([(in_label, (out_label, out_f.outputs, out_id))
for in_label, out_label in in_out_mapping.items()])
parameters, probs = f_class.generateParameters(input_dict=node_inputs,
expected_outputs=out_ntss,
variable_pool=result.variable_pool)
build_f = f_class(**np.random.choice(parameters, size=1, replace=False, p=probs)[0])
result.functions.append(build_f)
new_functions[build_f.id_name] = build_f
# connect intermediate functions to existing functions
for f_id, in_label, out_label in required_inputs[_f][target_nts]:
if f_id is None:
result.output_mapping[in_label] = (choice([label for label, ts in build_f.outputs.items()
if ts.__eq__(target_nts)]),
build_f.id_name)
else:
new_f = new_functions[f_id]
new_f.input_mapping[in_label] = (choice([out_label
for out_label, nts in out_ntss.items()
if nts.__eq__(target_nts)]), build_f.id_name)
required_inputs[_f].pop(target_nts)
if not required_inputs[_f]:
required_inputs.pop(_f)
new_out_f = out_f.__copy__()
new_out_f._id_name = new_out_f.getNewName(new_out_f)
result.functions.append(new_out_f)
new_functions[new_out_f.id_name] = new_out_f
other_used_functions.add(new_out_f.id_name)
for in_label, out_label in in_out_mapping.items():
build_f.input_mapping[in_label] = (out_label, new_out_f.id_name)
# Update missing network outputs
missing_outputs = set([out_key for t_list in _outputs.values() for out_key, _ in t_list])
o_outputs = _other[5]
for other_f in list(o_outputs.keys()):
n_list = [(out_key, label) for out_key, label in o_outputs[other_f] if out_key in missing_outputs]
if len(n_list) > 0:
o_outputs[other_f] = n_list
else:
o_outputs.pop(other_f)
# check network outputs
for f_ in list(o_outputs.keys()):
f_depth = o_id2depth[f_]
if f_depth >= other_new_depth:
new_out_f = o_id2function[f_].__copy__()
new_out_f._id_name = new_out_f.getNewName(new_out_f)
result.functions.append(new_out_f)
new_functions[new_out_f.id_name] = new_out_f
other_used_functions.add(new_out_f.id_name)
for out_label, label in o_outputs.get(f_, []):
result.output_mapping[out_label] = (label, new_out_f.id_name)
o_outputs.pop(f_)
elif f_depth == other_new_depth - 1:
for out_label, label in o_outputs.get(f_, []):
nts_dict = required_inputs.get(f_, dict())
out_nts = o_id2function[f_].outputs.get(label, None)
nts_dict[out_nts] = nts_dict.get(out_nts, []) + [(None, out_label, label)]
required_inputs[f_] = nts_dict
o_outputs.pop(f_)
# update required inputs
while other_used_functions:
other_f_id = other_used_functions.pop()
for in_label, (out_label, in_id) in new_functions[other_f_id].inputs.items():
if in_id in o_id2function:
if o_id2depth[in_id] >= other_new_depth:
in_f = o_id2function[in_id]
new_in_f = in_f.__copy__()
new_in_f._id_name = new_in_f.getNewName()
result.functions.append(new_in_f)
new_functions[new_in_f.id_name] = new_in_f
other_used_functions.add(new_in_f.id_name)
new_functions[other_f_id].inputs[in_label] = (out_label, new_in_f.id_name)
else:
nts_dict = required_inputs.get(in_id, dict())
out_nts = o_id2function[in_id].outputs.get(out_label, None)
nts_dict[out_nts] = nts_dict.get(out_nts, []) + [(other_f_id, in_label, out_label)]
required_inputs[in_id] = nts_dict
_current[0] = depth - 1
_other[0] = other_new_depth - 1
_other, _current = _current, _other
else:
for _f in depth2id[depth]:
# only add functions if they are required
if _f not in required_inputs:
continue
f_ = id2function.get(_f, None)
if f_ is None:
continue
new_out_f = f_.__copy__()
new_out_f._id_name = new_out_f.getNewName(new_out_f)
result.functions.append(new_out_f)
new_functions[new_out_f.id_name] = new_out_f
for target_nts in required_inputs.get(_f, []):
for f_id, in_label, out_label in required_inputs[_f][target_nts]:
if f_id is None:
result.output_mapping[in_label] = (out_label, new_out_f.id_name)
else:
required_by = new_functions[f_id]
required_by.input_mapping[in_label] = (out_label, new_out_f.id_name)
required_inputs.pop(_f, None)
for in_label, (out_label, in_id) in f_.inputs.items():
if in_id in id2function:
nts_dict = required_inputs.get(in_id, dict())
out_nts = id2function[in_id].outputs.get(out_label, None)
nts_dict[out_nts] = nts_dict.get(out_nts, []) + [(new_out_f.id_name, in_label, out_label)]
required_inputs[in_id] = nts_dict
for _f in list(_outputs.keys()):
if id2depth[_f] == depth - 1:
nts_dict = required_inputs.get(_f, dict())
for out_label, label in _outputs[_f]:
out_nts = id2function[_f].outputs.get(label, None)
nts_dict[out_nts] = nts_dict.get(out_nts, []) + [(None, out_label, label)]
required_inputs[_f] = nts_dict
_outputs.pop(_f)
_current[0] = depth - 1
depth, depth2id, id2depth, id2function, _inputs, _outputs, cross_prob = _current
result._inputs = dict()
result._inputs = dict(self._inputs)
result._id_name = result.getNewName()
result.attr = {**self.attr, **other.attr}
return [result]
def update_state(self, *args, **kwargs):
for f in self.functions:
value_dict = kwargs.get(f.id_name)
for variable in f.variables:
variable.value = value_dict[variable.name]
variable.trainable = False
self.variable_pool[variable.name] = self.variable_pool.get(variable.name, []) + [variable]
@property
def inputLabels(self) -> List[str]:
return list(self._inputs.keys())
|
{"hexsha": "32576376237bd3f90b3248a4fcb2c48f5d736380", "size": 42624, "ext": "py", "lang": "Python", "max_stars_repo_path": "LAMARCK_ML/architectures/neuralNetwork.py", "max_stars_repo_name": "JonasDHomburg/LAMARCK", "max_stars_repo_head_hexsha": "0e372c908ff59effc6fd68e6477d04c4d89e6c26", "max_stars_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-09-20T08:03:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-10T11:02:09.000Z", "max_issues_repo_path": "LAMARCK_ML/architectures/neuralNetwork.py", "max_issues_repo_name": "JonasDHomburg/LAMARCK_ML", "max_issues_repo_head_hexsha": "0e372c908ff59effc6fd68e6477d04c4d89e6c26", "max_issues_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LAMARCK_ML/architectures/neuralNetwork.py", "max_forks_repo_name": "JonasDHomburg/LAMARCK_ML", "max_forks_repo_head_hexsha": "0e372c908ff59effc6fd68e6477d04c4d89e6c26", "max_forks_repo_licenses": ["Apache-2.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3076923077, "max_line_length": 118, "alphanum_fraction": 0.5816206832, "include": true, "reason": "import numpy,import networkx", "num_tokens": 9561}
|
import unittest
import numpy as np
from matplotlib.pylab import plt
from mpl_toolkits.mplot3d import Axes3D as _3d
def mean_squared_error(y, t):
"""均方误差(mean squared error)"""
return .5 * np.sum((y - t) ** 2)
def cross_entropy_error(y, t):
"""交叉熵误差(cross entropy error)"""
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
batch_size = y.shape[0]
return -np.sum(t * np.log(y + 1e-7)) / batch_size
class LearningTest(unittest.TestCase):
def test_mean_squared_error(self):
t = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
y = [.1, .05, .6, .0, .05, .1, .0, .1, .0, .0]
print(mean_squared_error(np.array(y), np.array(t)))
y = [.1, .05, .1, .0, .05, .1, .0, .6, 0, .0]
print(mean_squared_error(np.array(y), np.array(t)))
def test_cross_entropy_error(self):
t = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
y = [.1, .05, .6, .0, .05, .1, .0, .1, .0, .0]
print(cross_entropy_error(np.array(y), np.array(t)))
y = [.1, .05, .1, .0, .05, .1, .0, .6, 0, .0]
print(cross_entropy_error(np.array(y), np.array(t)))
class NumericalDifferentiationTest(unittest.TestCase):
def numerical_diff(self, f, x):
h = 1e-4
return (f(x + h) - f(x - h)) / (2 * h)
def function_1(self, x):
return .01 * x ** 2 + .1 * x
def test_numerical_diff(self):
x = np.arange(.0, 20.0, .1)
y = self.function_1(x)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.plot(x, y)
plt.show()
print(self.numerical_diff(self.function_1, 5))
print(self.numerical_diff(self.function_1, 10))
def function_2(self, x, y):
return x ** 2 + y ** 2
def test_function_2(self):
fig = plt.figure()
ax = _3d(fig)
x = np.arange(-3.0, 3.0, .1)
y = np.arange(-3.0, 3.0, .1)
x, y = np.meshgrid(x, y)
z = self.function_2(x, y)
ax.plot_surface(x, y, z,
rstride=1,
cstride=1,
cmap='rainbow'
)
plt.show()
def function_tmp1(self, x0):
return x0 * x0 + 4.0 ** 2.0
def function_tmp2(self, x1):
return 3.0 ** 2.0 + x1 * x1
def test_tmp(self):
x, y = 3.0, 4.0
print(self.numerical_diff(self.function_tmp1, x))
print(self.numerical_diff(self.function_tmp2, y))
class GradientTest(unittest.TestCase):
def function_2(self, x):
if x.ndim == 1:
return np.sum(x ** 2)
else:
return np.sum(x ** 2, axis=1)
def _numerical_gradient_no_batch(self, f, x):
h = 1e-4
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = tmp_val + h
fxh1 = f(x)
x[idx] = tmp_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val
return grad
def numerical_gradient(self, f, x):
if x.ndim == 1:
return self._numerical_gradient_no_batch(f, x)
else:
grad = np.zeros_like(x)
for idx, x in enumerate(x):
grad[idx] = self._numerical_gradient_no_batch(f, x)
return grad
def tangent_line(self, f, x):
d = self.numerical_gradient(f, x)
print(d)
y = f(x) - d * x
return lambda t: d * t + y
def test_numerical_gradient(self):
print(self.numerical_gradient(self.function_2, np.array([3.0, 4.0])))
print(self.numerical_gradient(self.function_2, np.array([.0, 2.0])))
print(self.numerical_gradient(self.function_2, np.array([3.0, .0])))
def test_ng_2d(self):
x0 = np.arange(-2, 2.5, .25)
x1 = np.arange(-2, 2.5, .25)
x, y = np.meshgrid(x0, x1)
x = x.flatten()
y = y.flatten()
grad = self.numerical_gradient(self.function_2, np.array([x, y]))
plt.figure()
plt.quiver(x, y, -grad[0], -grad[1], angles='xy', color='#666666')
plt.xlim([-2, 2])
plt.ylim([-2, 2])
plt.xlabel('x0')
plt.ylabel('x1')
plt.grid()
plt.legend()
plt.draw()
plt.show()
def gradient_descent(self, f, init_x, lr=.01, step_num=100):
"""梯度下降法"""
x = init_x
x_history = []
for i in range(step_num):
x_history.append(x.copy())
grad = self.numerical_gradient(f, x)
x -= lr * grad
return x, np.array(x_history)
def test_gradient_descent(self):
init_x = np.array([-3.0, 4.0])
lr = 0.1
step_num = 20
x, x_history = self.gradient_descent(self.function_2, init_x, lr=lr, step_num=step_num)
plt.plot([-5, 5], [0, 0], '--b')
plt.plot([0, 0], [-5, 5], '--b')
plt.plot(x_history[:, 0], x_history[:, 1], 'o')
plt.xlim(-3.5, 3.5)
plt.ylim(-4.5, 4.5)
plt.xlabel('X0')
plt.ylabel('X1')
plt.show()
|
{"hexsha": "1a92e3cdc07a64d45c742c95aba845bae6f89c63", "size": 5066, "ext": "py", "lang": "Python", "max_stars_repo_path": "third_party/deep_leaning_from_scratch/c4/test_neural_network_learning.py", "max_stars_repo_name": "KentWangYQ/py3-poc", "max_stars_repo_head_hexsha": "52b993716192acaf13094dc77f3f6347ee580996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "third_party/deep_leaning_from_scratch/c4/test_neural_network_learning.py", "max_issues_repo_name": "KentWangYQ/py3-poc", "max_issues_repo_head_hexsha": "52b993716192acaf13094dc77f3f6347ee580996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/deep_leaning_from_scratch/c4/test_neural_network_learning.py", "max_forks_repo_name": "KentWangYQ/py3-poc", "max_forks_repo_head_hexsha": "52b993716192acaf13094dc77f3f6347ee580996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5326086957, "max_line_length": 95, "alphanum_fraction": 0.5094749309, "include": true, "reason": "import numpy", "num_tokens": 1608}
|
#!/usr/bin/env python
from pbpl.common.units import *
import numpy as np
E0 = 3.5*MeV
gamma0 = (me*c_light**2 + E0)/(me*c_light**2)
p0 = gamma0 * me * c_light
quad_f = 250*mm
quad_length = 10*mm
quad_gradient = p0 / (quad_f * quad_length * eplus)
print('gradient = ', quad_gradient / (tesla/meter))
Ld = quad_f * (np.sqrt(5)-1) / 5
print('Ld = ', Ld/mm)
#quad_K = eplus * quad_gradient / p0 # K = kappa0^2
|
{"hexsha": "0e8a37ea8d76fbdf0cf56f05a08e0e5ea8bb17b1", "size": 411, "ext": "py", "lang": "Python", "max_stars_repo_path": "share/double-quad/calc.py", "max_stars_repo_name": "ucla-pbpl/pbpl-gpt", "max_stars_repo_head_hexsha": "783d8ce3e72debed6ab20b1828d99102bfbb9360", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "share/double-quad/calc.py", "max_issues_repo_name": "ucla-pbpl/pbpl-gpt", "max_issues_repo_head_hexsha": "783d8ce3e72debed6ab20b1828d99102bfbb9360", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "share/double-quad/calc.py", "max_forks_repo_name": "ucla-pbpl/pbpl-gpt", "max_forks_repo_head_hexsha": "783d8ce3e72debed6ab20b1828d99102bfbb9360", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8333333333, "max_line_length": 52, "alphanum_fraction": 0.6520681265, "include": true, "reason": "import numpy", "num_tokens": 154}
|
"""Utilities for performing affine transformations on image data.
"""
import numpy as np
from .utils import array_to_img, img_to_array
try:
import scipy
# scipy.ndimage cannot be accessed until explicitly imported
from scipy import ndimage
except ImportError:
scipy = None
try:
from PIL import Image as pil_image
from PIL import ImageEnhance
except ImportError:
pil_image = None
ImageEnhance = None
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range described as a single integer, in degrees.
Will be applied as `(-rg,rg)`.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x,
theta=theta,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(x,
tx=tx,
ty=ty,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(x,
shear=shear,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: %s' % (zoom_range,))
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(x,
zx=zx,
zy=zy,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
def apply_channel_shift(x, intensity, channel_axis=0):
"""Performs a channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity,
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_channel_shift(x, intensity_range, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity_range: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
intensity = np.random.uniform(-intensity_range, intensity_range)
return apply_channel_shift(x, intensity, channel_axis=channel_axis)
def apply_brightness_shift(x, brightness, scale=True):
"""Performs a brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness: Float. The new brightness value.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively.
Default: True.
# Returns
Numpy image tensor.
# Raises
ImportError: if PIL is not available.
"""
if ImageEnhance is None:
raise ImportError('Using brightness shifts requires PIL. '
'Install PIL or Pillow.')
x_min, x_max = np.min(x), np.max(x)
local_scale = (x_min < 0) or (x_max > 255)
x = array_to_img(x, scale=local_scale or scale)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
x = imgenhancer_Brightness.enhance(brightness)
x = img_to_array(x)
if not scale and local_scale:
x = x / 255 * (x_max - x_min) + x_min
return x
def random_brightness(x, brightness_range, scale=True):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively.
Default: True.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (brightness_range,))
u = np.random.uniform(brightness_range[0], brightness_range[1])
return apply_brightness_shift(x, u, scale)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 - 0.5
o_y = float(y) / 2 - 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,
row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., order=1):
"""Applies an affine transformation specified by the parameters given.
# Arguments
x: 3D numpy array - a 2D image with one or more channels.
theta: Rotation angle in degrees.
tx: Width shift.
ty: Heigh shift.
shear: Shear angle in degrees.
zx: Zoom in x direction.
zy: Zoom in y direction
row_axis: Index of axis for rows (aka Y axis) in the input image.
Direction: left to right.
col_axis: Index of axis for columns (aka X axis) in the input image.
Direction: top to bottom.
channel_axis: Index of axis for channels in the input image.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
order: int, order of interpolation
# Returns
The transformed version of the input.
"""
if scipy is None:
raise ImportError('Image transformations require SciPy. '
'Install SciPy.')
# Input sanity checks:
# 1. x must 2D image with one or more channels (i.e., a 3D tensor)
# 2. channels must be either first or last dimension
if np.unique([row_axis, col_axis, channel_axis]).size != 3:
raise ValueError("'row_axis', 'col_axis', and 'channel_axis'"
" must be distinct")
# TODO: shall we support negative indices?
valid_indices = set([0, 1, 2])
actual_indices = set([row_axis, col_axis, channel_axis])
if actual_indices != valid_indices:
raise ValueError(
f"Invalid axis' indices: {actual_indices - valid_indices}")
if x.ndim != 3:
raise ValueError("Input arrays must be multi-channel 2D images.")
if channel_axis not in [0, 2]:
raise ValueError("Channels are allowed and the first and last dimensions.")
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = np.rollaxis(x, channel_axis, 0)
# Matrix construction assumes that coordinates are x, y (in that order).
# However, regular numpy arrays use y,x (aka i,j) indexing.
# Possible solution is:
# 1. Swap the x and y axes.
# 2. Apply transform.
# 3. Swap the x and y axes again to restore image-like data ordering.
# Mathematically, it is equivalent to the following transformation:
# M' = PMP, where P is the permutation matrix, M is the original
# transformation matrix.
if col_axis > row_axis:
transform_matrix[:, [0, 1]] = transform_matrix[:, [1, 0]]
transform_matrix[[0, 1]] = transform_matrix[[1, 0]]
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndimage.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
|
{"hexsha": "01b8f52a21cfa4d0fe5a998342ef2ec27e7abed4", "size": 15048, "ext": "py", "lang": "Python", "max_stars_repo_path": "keras_preprocessing/image/affine_transformations.py", "max_stars_repo_name": "smedegaard/keras-preprocessing", "max_stars_repo_head_hexsha": "1b36f97450b14ed8a88018891473944be1587b47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "keras_preprocessing/image/affine_transformations.py", "max_issues_repo_name": "smedegaard/keras-preprocessing", "max_issues_repo_head_hexsha": "1b36f97450b14ed8a88018891473944be1587b47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "keras_preprocessing/image/affine_transformations.py", "max_forks_repo_name": "smedegaard/keras-preprocessing", "max_forks_repo_head_hexsha": "1b36f97450b14ed8a88018891473944be1587b47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8090452261, "max_line_length": 83, "alphanum_fraction": 0.5833997873, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 3478}
|
import os
import random
import numpy as np
from PIL import Image
import torch.utils.data as data_utils
class ImagePairDataset(data_utils.dataset.Dataset):
"""
"""
def __init__(self, data_dir=u'image_data/',
view_size=48, train=True, transform=None):
self.data_dir = data_dir.replace("'", "")
self.transform = transform
self.train = train
self.view_size = view_size
self.batch_iter = 0
self.data_iter = 0
self.corr_list = self.get_corr_list()
#print("data generation begin")
"""
corr = self.corr_list[self.data_iter]
self.data_generator.delete_sample()
self.data_generator.delete_image()
self.data_generator.generate_pair_view(corr[0], corr[1], corr[2])
self.data_iter += 1
"""
#print("data generation end")
self.data_iter += 1
self.pos_data = []
self.neg_data = []
self.get_data()
def __getitem__(self, index):
"""
"""
#print("getitem:: " + str(index) + ", " + str(self.__len__()))
#print("pos length: " + str(len(self.pos_data)))
if self.data_iter >= len(self.corr_list):
self.data_iter = 0
random.shuffle(self.corr_list)
if self.batch_iter >= self.view_size:
self.batch_iter = 0
#corr = self.corr_list[self.data_iter]
#self.data_generator.delete_sample()
#self.data_generator.delete_image()
#self.data_generator.generate_pair_view(corr[0], corr[1], corr[2])
self.data_iter += 1
self.get_data()
# make pairs (1 positive pair + 1 negative pair)
p_images = self.pos_data[index]
n_images = self.neg_data[index]
p_images[0] = Image.open(p_images[0]).convert('RGB')
p_images[1] = Image.open(p_images[1]).convert('RGB')
n_images[0] = Image.open(n_images[0]).convert('RGB')
n_images[1] = Image.open(n_images[1]).convert('RGB')
if self.transform is not None:
p_images[0] = self.transform(p_images[0])
p_images[1] = self.transform(p_images[1])
n_images[0] = self.transform(n_images[0])
n_images[1] = self.transform(n_images[1])
self.batch_iter += 1
return p_images, n_images
def __len__(self):
"""
"""
return len(self.pos_data)
def get_corr_list(self, corr_path='Corr/'):
"""
"""
clist_path = 'train_corr_list.npy'
if os.path.isfile(clist_path):
return np.load(clist_path).tolist()
corr_list = []
for dir_path, _, f_name in os.walk(corr_path):
cat_list = []
for f in f_name:
corr = []
idx = dir_path.find('Corr')
category = dir_path[idx+5:]
corr.append(category)
model_idx = f.find('___')
model_a = f[:model_idx]
model_b = f[model_idx+10:len(f)-4]
corr.append(model_a)
corr.append(model_b)
#corr_list.append(corr)
cat_list.append(corr)
if not cat_list:
continue
"""
for c in cat_list:
print(c)
"""
# make a corr_list without redundant objects, only 3 corr for one category
visited = []
cnt = 0
while cnt < 15:
'''
if cnt < 11:
cnt += 1
continue
'''
sample = cat_list[random.randrange(len(cat_list))]
if sample[1] in visited or sample[2] in visited:
continue
else:
corr_list.append(sample)
cnt += 1
visited.append(sample[1])
visited.append(sample[2])
cat_list.remove(sample)
"""
sample = random.sample(cat_list, 25)
for s in sample:
corr_list.append(s)
"""
random.shuffle(corr_list)
# use this line to generate pair views using samples corr list
# self.data_generator.generate_sample_view(corr_list)
clist_path = os.path.join('./', 'corr_list')
np.save(clist_path, np.asarray(corr_list))
return corr_list
def get_data(self):
"""
store image names in pos, neg lists
only store data for one batch (this is for a full training purpose)
"""
a_path = os.path.join(self.data_dir, self.corr_list[self.data_iter-1][1])
b_path = os.path.join(self.data_dir, self.corr_list[self.data_iter-1][2])
self.pos_data.clear()
self.neg_data.clear()
for d, _, f_name in os.walk(a_path):
for f in f_name:
a_file = os.path.join(d, f).replace("\\", "/")
b_file = os.path.join(b_path, f).replace("\\", "/")
idx = f.find('_')
if f[0:idx] == 'positive':
self.pos_data.append([a_file, b_file])
else:
self.neg_data.append([a_file, b_file])
def get_fulldata(self):
"""
store image names in pos, neg lists
store full data in the list (this is for small dataset only,
for a transfer learning purpose)
prone to memory error
"""
for corr in self.corr_list:
a_path = os.path.join(self.data_dir, corr[1])
b_path = os.path.join(self.data_dir, corr[2])
#self.pos_data.clear()
#self.neg_data.clear()
for d, _, f_name in os.walk(a_path):
for f in f_name:
a_file = os.path.join(d, f).replace("\\", "/")
b_file = os.path.join(b_path, f).replace("\\", "/")
idx = f.find('_')
if f[0:idx] == 'positive':
self.pos_data.append([a_file, b_file])
else:
self.neg_data.append([a_file, b_file])
if __name__ == '__main__':
ipd = ImagePairDataset()
ipd.get_corr_list()
"""
for i, data in enumerate(loader, 0):
pos, neg, label = data
num = len(pos)
ax = plt.subplot(1, image_show, i+1)
plt.tight_layout()
ax.set_title('Sample #{}'.format(i))
ax.axis('off')
for n in range(num):
p_img = pos[n]
n_img = neg[n]
l = label[n]
plt.imshow(p_img)
if i == image_show-1:
break
"""
|
{"hexsha": "3fb24722f9f057b2aeeed81131f3f653818ed079", "size": 6819, "ext": "py", "lang": "Python", "max_stars_repo_path": "pair_datasets.py", "max_stars_repo_name": "minz95/lmvcnn_pytorch", "max_stars_repo_head_hexsha": "ff60996c8fd45ffe370b1ba31533276d6eb6c440", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-08T04:07:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T19:06:51.000Z", "max_issues_repo_path": "pair_datasets.py", "max_issues_repo_name": "minz95/lmvcnn_pytorch", "max_issues_repo_head_hexsha": "ff60996c8fd45ffe370b1ba31533276d6eb6c440", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-12-05T08:22:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T10:05:26.000Z", "max_forks_repo_path": "pair_datasets.py", "max_forks_repo_name": "minz95/lmvcnn_pytorch", "max_forks_repo_head_hexsha": "ff60996c8fd45ffe370b1ba31533276d6eb6c440", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2634146341, "max_line_length": 86, "alphanum_fraction": 0.5078457252, "include": true, "reason": "import numpy", "num_tokens": 1520}
|
# -*- coding: utf-8 -*-
"""This functions are based on my own technical analysis library:
https://github.com/bukosabino/ta
You should check it if you need documentation of this functions.
"""
import pandas as pd
import numpy as np
"""
Volatility Indicators
"""
def bollinger_hband(close, n=20, ndev=2):
mavg = close.rolling(n).mean()
mstd = close.rolling(n).std()
hband = mavg + ndev*mstd
return pd.Series(hband, name='hband')
def bollinger_lband(close, n=20, ndev=2):
mavg = close.rolling(n).mean()
mstd = close.rolling(n).std()
lband = mavg - ndev*mstd
return pd.Series(lband, name='lband')
def bollinger_mavg(close, n=20):
mavg = close.rolling(n).mean()
return pd.Series(mavg, name='mavg')
def bollinger_hband_indicator(close, n=20, ndev=2):
df = pd.DataFrame([close]).transpose()
mavg = close.rolling(n).mean()
mstd = close.rolling(n).std()
hband = mavg + ndev*mstd
df['hband'] = 0.0
df.loc[close > hband, 'hband'] = 1.0
return pd.Series(df['hband'], name='bbihband')
def bollinger_lband_indicator(close, n=20, ndev=2):
df = pd.DataFrame([close]).transpose()
mavg = close.rolling(n).mean()
mstd = close.rolling(n).std()
lband = mavg - ndev*mstd
df['lband'] = 0.0
df.loc[close < lband, 'lband'] = 1.0
return pd.Series(df['lband'], name='bbilband')
def donchian_channel_hband(close, n=20):
hband = close.rolling(n).max()
return pd.Series(hband, name='dchband')
def donchian_channel_lband(close, n=20):
lband = close.rolling(n).min()
return pd.Series(lband, name='dclband')
def donchian_channel_hband_indicator(close, n=20):
df = pd.DataFrame([close]).transpose()
df['hband'] = 0.0
hband = close.rolling(n).max()
df.loc[close > hband, 'hband'] = 1.0
return pd.Series(df['hband'], name='dcihband')
def donchian_channel_lband_indicator(close, n=20):
df = pd.DataFrame([close]).transpose()
df['lband'] = 0.0
lband = close.rolling(n).min()
df.loc[close < lband, 'lband'] = 1.0
return pd.Series(df['lband'], name='dcilband')
"""
Volume Indicators
"""
def on_balance_volume(close, volume):
df = pd.DataFrame([close, volume]).transpose()
df['OBV'] = 0
c1 = close < close.shift(1)
c2 = close > close.shift(1)
if c1.any():
df.loc[c1, 'OBV'] = - volume
if c2.any():
df.loc[c2, 'OBV'] = volume
return df['OBV']
def on_balance_volume_mean(close, volume, n=10):
df = pd.DataFrame([close, volume]).transpose()
df['OBV'] = 0
c1 = close < close.shift(1)
c2 = close > close.shift(1)
if c1.any():
df.loc[c1, 'OBV'] = - volume
if c2.any():
df.loc[c2, 'OBV'] = volume
return pd.Series(df['OBV'].rolling(n).mean(), name='obv_mean')
def force_index(close, volume, n=2):
return pd.Series(close.diff(n) * volume.diff(n), name='fi_'+str(n))
def volume_price_trend(close, volume):
vpt = volume * ((close - close.shift(1)) / close.shift(1).astype(float))
vpt = vpt.shift(1) + vpt
return pd.Series(vpt, name='vpt')
|
{"hexsha": "15046f842f58b09826585fce6e2d9718bc450df2", "size": 3076, "ext": "py", "lang": "Python", "max_stars_repo_path": "ta.py", "max_stars_repo_name": "Abxhor/Coldairarrow", "max_stars_repo_head_hexsha": "3735beec8a6fa7ad9356375081229c68f0e83f3d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2018-05-09T03:34:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T12:17:17.000Z", "max_issues_repo_path": "ta.py", "max_issues_repo_name": "Abxhor/Coldairarrow", "max_issues_repo_head_hexsha": "3735beec8a6fa7ad9356375081229c68f0e83f3d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ta.py", "max_forks_repo_name": "Abxhor/Coldairarrow", "max_forks_repo_head_hexsha": "3735beec8a6fa7ad9356375081229c68f0e83f3d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2018-04-28T09:30:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-15T23:23:18.000Z", "avg_line_length": 25.8487394958, "max_line_length": 76, "alphanum_fraction": 0.6271131339, "include": true, "reason": "import numpy", "num_tokens": 985}
|
/*
Copyright Rene Rivera 2011-2012
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
#include <string>
#include <iostream>
#include <set>
#define BOOST_PREDEF_INTERNAL_GENERATE_TESTS
namespace
{
struct predef_info
{
std::string name;
std::string description;
unsigned value;
predef_info(
std::string const & n,
std::string const & d,
unsigned v);
predef_info(
predef_info const & other)
: name(other.name)
, description(other.description)
, value(other.value)
{
}
bool operator < (predef_info const & other) const
{
return name < other.name;
}
};
std::set<predef_info> * predefs = 0;
predef_info::predef_info(
std::string const & n,
std::string const & d,
unsigned v)
: name(n)
, description(d)
, value(v)
{
if (!predefs)
{
predefs = new std::set<predef_info>();
}
predefs->insert(*this);
}
}
#define BOOST_PREDEF_DECLARE_TEST(x,s) \
namespace { \
predef_info x##_predef_init(#x,s,x); \
}
#include <boost/predef.h>
int main()
{
std::set<predef_info>::iterator i;
std::set<predef_info>::iterator e = predefs->end();
std::cout << "** Detected **" << std::endl;
for (i = predefs->begin(); i != e; ++i)
{
if (i->value > 0)
std::cout
<< i->name << " = "
<< i->value
<< " (" << (i->value/10000000)%100 << "," << (i->value/100000)%100 << "," << (i->value)%100000 << ") | "
<< i->description
<< std::endl;
}
std::cout << "** Not Detected **" << std::endl;
for (i = predefs->begin(); i != e; ++i)
{
if (i->value == 0)
std::cout
<< i->name << " = "
<< i->value << " | "
<< i->description
<< std::endl;
}
return 0;
}
|
{"hexsha": "d7835e16989e854bff1079ab251c73a9f5f7b2e0", "size": 2245, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/predef/test/info_as_cpp.cpp", "max_stars_repo_name": "Abce/boost", "max_stars_repo_head_hexsha": "2d7491a27211aa5defab113f8e2d657c3d85ca93", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 85.0, "max_stars_repo_stars_event_min_datetime": "2015-02-08T20:36:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T20:38:31.000Z", "max_issues_repo_path": "libs/boost/libs/predef/test/info_as_cpp.cpp", "max_issues_repo_name": "flingone/frameworks_base_cmds_remoted", "max_issues_repo_head_hexsha": "4509d9f0468137ed7fd8d100179160d167e7d943", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2015-01-28T16:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-12T23:03:28.000Z", "max_forks_repo_path": "libs/boost/libs/predef/test/info_as_cpp.cpp", "max_forks_repo_name": "flingone/frameworks_base_cmds_remoted", "max_forks_repo_head_hexsha": "4509d9f0468137ed7fd8d100179160d167e7d943", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 27.0, "max_forks_repo_forks_event_min_datetime": "2015-01-28T16:33:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T05:04:39.000Z", "avg_line_length": 24.6703296703, "max_line_length": 121, "alphanum_fraction": 0.4623608018, "num_tokens": 555}
|
# measure version 1.8.7
from measure import npfarray,sqrt,ln,exp,arctan,lst,tbl,sig,val,mv,dsto_mv,dsys_mv,dtot_mv,plt,pltext,expreg,pi,curve_fit
# Aufgabe 1
R_A1 = npfarray([1,10,1])*1e3
R_A1_dsys = 0.05 * R_A1
C_A1 = npfarray([470,4.7,47])*1e-9
C_A1_dsys = 0.10 * C_A1
g_thalb = npfarray([312,32.6,32.6])*1e-6
g_thalb_dsys = npfarray([4,0.6,0.6])*1e-6
tau = R_A1 * C_A1
tau_dsys = sqrt((R_A1 * C_A1_dsys)**2 + (R_A1_dsys * C_A1)**2)
b_thalb = ln(2) * tau
b_thalb_dsys = ln(2) * tau_dsys
print()
print('Aufgabe 1:\n')
print(tbl([lst(R_A1,R_A1_dsys,'R'),lst(C_A1,C_A1_dsys,'C'),lst(tau,tau_dsys,'Tau')]))
print(tbl([lst(b_thalb,b_thalb_dsys,'T_1/2 (b)'),lst(g_thalb,g_thalb_dsys,'T_1/2 (g)'),['Abw']+[sig('',b_thalb[i],b_thalb_dsys[i],g_thalb[i],g_thalb_dsys[i]) for i in range(len(b_thalb))]]))
# Aufgabe 3
R_A3 = 1e3
R_A3_dsys = 0.05 * R_A3
C_A3 = 47e-9
C_A3_dsys = 0.05 * C_A3
f_A3 = npfarray([1,2,3,3.58,4,5,6,7,8,9,10])*1e3
dt = npfarray([200,83,46,30,27.2,18.8,14.0,10.4,8.0,6.8,4.1])*1e-6
dt_dsys = npfarray([20,5,5,5,3.0,2.5,2.5,2.0,1.5,1.5,1.0])*1e-6
Phi = 2*pi * f_A3 * dt
Phi_dsys = 2*pi * f_A3 * dt_dsys
from numpy import linspace
from scipy.optimize import fsolve
def phase_b(f):
return arctan(1/(2*pi*f*R_A3*C_A3))
def phase_b_dys(f):
return arctan(1/(2*pi*f*(R_A3+R_A3_dsys)*(C_A3+C_A3_dsys)))
def phase_b_45deg(f):
return phase_b(f) - pi/4
def phase_b_45deg_dsys(f):
return phase_b_dys(f) - pi/4
fgr_phase = fsolve(phase_b_45deg,x0=3.4e3)[0]
fgr_phase_dsys = abs(fsolve(phase_b_45deg_dsys,x0=0.3e3)[0] - fgr_phase)
fgr_fgang = npfarray([3.16,3.58])*1e3
fgr_fgang_dsys = npfarray([0.15,0.15])*1e3
fgr_fgang_mv = mv(fgr_fgang)
fgr_fgang_mv_dtot = dtot_mv(fgr_fgang,fgr_fgang_dsys)
fgr_calc = 1/(2*pi * R_A3 * C_A3)
fgr_calc_dsys = 1/(2*pi * R_A3 * C_A3) * sqrt((R_A3_dsys/R_A3)**2 + (C_A3_dsys/C_A3)**2)
f_array = linspace(1e3,10e3,1000)
pltext.initplot(num=1,title='Abbildung : Phase in Abhängigkeit der Frequenz',xlabel='Frequenz in Hz',ylabel='Phase in rad',scale='loglin')
pltext.plotdata(f_A3,Phi,Phi_dsys,label='gemessene Phase')
plt.plot([1e3,10e3],[pi/4,pi/4],label='45°')
plt.plot(f_array,phase_b(f_array),label='berechnet')
plt.plot(f_array,phase_b_dys(f_array),label='berechnet, Fehler')
plt.legend()
print()
print('Aufgabe 3:\n')
print(tbl([['Messgröße','bei 45° Phase','Frequenzgang','berechnet'],lst([fgr_phase,fgr_fgang_mv,fgr_calc],[fgr_phase_dsys,fgr_fgang_mv_dtot,fgr_calc_dsys],'f_gr in Hz')]))
print(sig('Phase/Fgang',fgr_phase,fgr_phase_dsys,fgr_fgang_mv,fgr_fgang_mv_dtot))
print(sig('Phase/calc ',fgr_phase,fgr_phase_dsys,fgr_calc,fgr_calc_dsys))
print(sig('Fgang/calc ',fgr_fgang_mv,fgr_fgang_mv_dtot,fgr_calc,fgr_calc_dsys))
# Aufgabe 4
f_R = npfarray([3.93,3.73,3.70])*1e3
f_R_dsys = npfarray([0.10,0.05,0.03])*1e3
df = npfarray([4.90,1.26,0.57])*1e3
df_dsys = npfarray([0.05,0.05,0.05])*1e3
Ue = npfarray([0.99,0.96,0.95])
Ua = npfarray([0.95,0.77,0.35])
Ua_dsys = npfarray([0.01,0.01,0.01])
C_A4 = 47e-9
C_A4_dsys = 0.10 * C_A4
R_A4 = npfarray([1e3,220,47])
R_A4_dsys = 0.05 * R_A4
L_A4 = 1/(C_A4 * (2*pi * f_R)**2)
L_A4_dsys = 1/(C_A4 * (2*pi * f_R)**2) * sqrt((C_A4_dsys/C_A4)**2 + (2*f_R_dsys/f_R)**2)
L_A4_mv = mv(L_A4)
L_A4_mv_dtot = dtot_mv(L_A4,L_A4_dsys)
R_ges_A4 = 2*pi * df * L_A4
R_ges_A4_dsys = 2*pi * sqrt((df * L_A4_dsys)**2 + (df_dsys * L_A4)**2)
R_ges_A4_mv = mv(R_ges_A4)
R_ges_A4_mv_dtot = dtot_mv(R_ges_A4,R_ges_A4_dsys)
R_V_df_A4 = R_ges_A4 - R_A4
R_V_df_A4_dsys = sqrt(R_ges_A4_dsys**2 + R_A4_dsys**2)
R_V_df_A4_mv = mv(R_V_df_A4)
R_V_df_A4_mv_dtot = dtot_mv(R_V_df_A4,R_ges_A4_dsys)
R_V_U_A4 = R_A4 * (Ue / Ua - 1)
R_V_U_A4_dsys = sqrt(((Ue / Ua - 1) * R_A4_dsys)**2 + (R_A4 * Ue / Ua**2 * Ua_dsys)**2)
R_V_U_A4_mv = mv(R_V_U_A4)
R_V_U_A4_mv_dtot = dtot_mv(R_V_U_A4,R_V_U_A4_dsys)
print()
print('Aufgabe 4:\n')
print(val(L_A4_mv,L_A4_mv_dtot,'Induktivität L'))
print()
print(tbl([lst(R_A4,R_A4_dsys,'R'),lst(R_ges_A4,R_ges_A4_dsys,'R_ges (df)'),lst(R_V_df_A4,R_V_df_A4_dsys,'R_V (df)'),lst(R_V_U_A4,R_V_U_A4_dsys,'R_V (U)')]))
print(tbl([lst([R_V_df_A4_mv],[R_V_df_A4_mv_dtot],'mv(R_V) (df)'),lst([R_V_U_A4_mv],[R_V_U_A4_mv_dtot],'mv(R_V) (U)'),['Abw']+[sig('',R_V_df_A4_mv,R_V_df_A4_mv_dtot,R_V_U_A4_mv,R_V_U_A4_mv_dtot)]]))
# Aufgabe 5
A = npfarray([1.58,1.13,0.80,0.58,0.41])
A_dsys = npfarray([0.05,0.05,0.05,0.05,0.05])
T = npfarray([260,260,257,257,258])*1e-6
T_dsys = npfarray([10,10,5,5,5])*1e-6
T_mv = mv(T)
T_mv_dtot = dtot_mv(T,T_dsys)
t = npfarray([n*T_mv for n in range(0,5)])
dt = npfarray([T_mv_dtot for n in range(0,5)])
pltext.initplot(num=2,title='Abbildung : Bestimmung der Dämpfungskonstante',xlabel='Zeit in s',ylabel='Amplitude in V',scale='linlog')
dc,dc_dsys,yitc,dyitc = expreg(t,A,A_dsys,dt,plot=True)
dc *= -1
R_A5 = 47.
R_A5_dsys = 0.05 * R_A5
R_ges_A5 = dc * 2 * L_A4_mv
R_ges_A5_dsys = 2* sqrt((dc * L_A4_mv_dtot)**2 + (dc_dsys * L_A4_mv)**2)
R_V_A5 = R_ges_A5 - R_A5
R_V_A5_dsys = sqrt(R_ges_A5_dsys**2 + R_A5_dsys**2)
print()
print('Aufgabe 5:\n')
print(val(dc,dc_dsys,'Dämpfungskonstante d'))
print(val(R_ges_A5,R_ges_A5_dsys,'Gesamtwiderstand R'))
print()
print(tbl([lst([R_V_A5],[R_V_A5_dsys],'R_V (A5)'),['Abw df',sig('',R_V_A5,R_V_A5_dsys,R_V_df_A4_mv,R_V_df_A4_mv_dtot)],['Abw U',sig('',R_V_A5,R_V_A5_dsys,R_V_U_A4_mv,R_V_U_A4_mv_dtot)]]))
# Aufgabe 6
R_A6 = 220.
R_A6_dsys = 0.05 * R_A6
C_A6 = 47e-9
C_A6_dsys = 0.05 * C_A6
f_R_g = npfarray([3.75,3.94,3.85])*1e3
f_R_g_dsys = npfarray([0.03,0.03,0.05])*1e3
f_R_b = npfarray([0,0,0])
f_R_b_dtot = npfarray([0,0,0])
wr = 1/sqrt(L_A4_mv * C_A6)
wr_dtot = 1/sqrt(L_A4_mv * C_A6) * sqrt((C_A6_dsys / (2 * C_A6))**2 + (L_A4_mv_dtot / (2 * L_A4_mv))**2)
delta = R_A6 / (2 * L_A4_mv)
delta_dtot = 1/(2 * L_A4_mv) * sqrt(R_A6_dsys**2 + (R_A6 * L_A4_mv_dtot / L_A4_mv)**2)
f_R_b[0] = sqrt(wr**2 - 2 * delta**2)
f_R_b_dtot[0] = 1/f_R_b[0] * sqrt((wr * wr_dtot)**2 + (2 * delta * delta_dtot)**2)
f_R_b[1] = sqrt(wr**2 + 2 * delta**2)
f_R_b_dtot[1] = 1/f_R_b[1] * sqrt((wr * wr_dtot)**2 + (2 * delta * delta_dtot)**2)
f_R_b[2] = wr
f_R_b_dtot[2] = wr_dtot
f_R_b /= 2*pi
f_R_b_dtot /= 2*pi
print()
print('Aufgabe 6:\n')
print(tbl([ ['Größe','Kondensator','Spule','Widerstand'], lst(f_R_g,f_R_g_dsys,'f_R (g)'), lst(f_R_b,f_R_b_dtot,'f_R (b)'), ['Abw']+[sig('',f_R_g[i],f_R_g_dsys[i],f_R_b[i],f_R_b_dtot[i]) for i in range(0,3)] ]))
# Aufgabe 7
f_R_g = 3.89e3
f_R_g_dsys = 0.05e3
print()
print('Aufgabe 7:\n')
print(tbl([ lst([f_R_g],[f_R_g_dsys],'f_R (g)'), lst([f_R_b[2]],[f_R_b_dtot[2]],'f_R (b)'), ['Abw',sig('',f_R_g,f_R_g_dsys,f_R_b[2],f_R_b_dtot[2])] ]))
# Aufgabe 8
def V(dbV):
return 10**(dbV/20)
dbV_vals = npfarray([[-2.8,-10.9,-20.0],[-32.3,-13.6,-20.8],[-2.8,-14.7,-27.8],[-2.7,8.3,-30.5],[-32.2,-11.3,-23.8]])
V_vals = V(dbV_vals)
perc_100 = [V_vals[i][0] / V_vals[0][0] for i in range(1,5)]
perc_4k = [V_vals[i][1] / V_vals[0][1] for i in range(1,5)]
perc_8k = [V_vals[i][2] / V_vals[0][2] for i in range(1,5)]
print()
print('Aufgabe 8:\n')
print('Spannungen in Volt')
print(tbl([ ['Signal','ohne Filter','RC-Hochpass','RC-Tiefpass','LC-Tiefpass','Bandpass 1k'], lst(V_vals[:,0],name='100Hz'), lst(V_vals[:,1],name='4kHz'), lst(V_vals[:,2],name='8kHz') ]))
print()
print('Verhältnis zum ungefilterten Signal')
print(tbl([ ['Signal','RC-Hochpass','RC-Tiefpass','LC-Tiefpass','Bandpass 1k'], lst(perc_100,name='100Hz'), lst(perc_4k,name='4kHz'), lst(perc_8k,name='8kHz') ]))
# Plot
print()
plt.show()
|
{"hexsha": "53ef4f6f300f56f970b130bf04a1470f97f04c00", "size": 7402, "ext": "py", "lang": "Python", "max_stars_repo_path": "PAP22-241.py", "max_stars_repo_name": "stephanlachnit/PAP", "max_stars_repo_head_hexsha": "13dad27dadac706edaa6ec61a522d5ba0b9b100d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PAP22-241.py", "max_issues_repo_name": "stephanlachnit/PAP", "max_issues_repo_head_hexsha": "13dad27dadac706edaa6ec61a522d5ba0b9b100d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PAP22-241.py", "max_forks_repo_name": "stephanlachnit/PAP", "max_forks_repo_head_hexsha": "13dad27dadac706edaa6ec61a522d5ba0b9b100d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.01, "max_line_length": 211, "alphanum_fraction": 0.6811672521, "include": true, "reason": "from numpy,from scipy", "num_tokens": 3483}
|
using DrWatson
@quickactivate "PECUZAL_Julia"
using DynamicalSystems
using DelayEmbeddings
using DelimitedFiles
using BenchmarkTools
include("../../src/pecuzal_method.jl")
include("../../src/data_analysis_functions.jl")
## We analyze the computational complexity of the proposed PECUZAL method in
# comparison to TDE, the method from G&A and MDOP. We feed in the y-component
# of the Rössler system for different time series length and compute the average
# computation time.
roe = Systems.roessler()
# set time interval for integration
N = 20000 # number of samples
dt_tra = 0.05
t = 0:dt_tra:(dt_tra*N)
tr = trajectory(roe, (N*dt_tra); dt = dt_tra, Ttr = (2000*dt_tra))
tr = regularize(tr)
# define function which wrap the whole embedding process
function tde_time(data, w, taus)
# use cao's method
Y_tde, τ_tde, _ = optimal_traditional_de(data; w = w, τs = taus)
end
function GA_time(data, w, taus)
Y_GA, τ_vals_GA, ts_vals_GA, FNNs_GA , ns_GA = garcia_almeida_embedding(data;
τs = taus , w = w, T = w)
end
function mdop_time(data, w)
tw1 = mdop_maximum_delay(data)
lm1 = DelayEmbeddings.findlocalminima(tw1[2])
if lm1[1] ≤ 2
lmm1 = try lm1[2]
catch
lm1[1]
end
else
lmm1 = lm1[1]
end
taus_mdop = 0:lmm1
Y_mdop, τ_vals_mdop, ts_vals_mdop, FNNs_mdop , βs_mdop = mdop_embedding(data;
τs = taus_mdop , w = w)
end
function pec_time(data, w, taus)
Y_pec, τ_vals_pec, ts_vals_pec, Ls_pec , εs_pec = pecuzal_embedding_update(data;
τs = taus , w = w)
end
step = 1000
Nss = 1000:step:N
times_tde = zeros(length(Nss))
times_GA = zeros(length(Nss))
times_mdop = zeros(length(Nss))
times_pec = zeros(length(Nss))
for (i,Ns) in enumerate(Nss)
display("run: $i")
data = tr[1:Ns,2]
# compute Theiler window
w = estimate_delay(data, "mi_min", 0:150)
# and range of encountered time delays
taus1 = 1:(4*w)
taus2 = 0:(4*w)
b_tde = @benchmark tde_time($data, $w, $taus1)
b_GA = @benchmark GA_time($data, $w, $taus2)
b_mdop = @benchmark mdop_time($data, $w)
b_pec = @benchmark pec_time($data, $w, $taus2)
times_tde[i] = median(b_tde).time
times_GA[i] = median(b_GA).time
times_mdop[i] = median(b_mdop).time
times_pec[i] = median(b_pec).time
end
writedlm("./scripts/performance/results/times_tde.csv", times_tde)
writedlm("./scripts/performance/results/times_GA.csv", times_GA)
writedlm("./scripts/performance/results/times_mdop.csv", times_mdop)
writedlm("./scripts/performance/results/times_pec.csv", times_pec)
writedlm("./scripts/performance/results/times.csv", Nss)
|
{"hexsha": "c6d8c2ce34d1ab70ad922854e006f519da3590cf", "size": 2795, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/performance/comm_performance.jl", "max_stars_repo_name": "hkraemer/PECUZAL_Julia", "max_stars_repo_head_hexsha": "f68ae7bc3b3dc6116cbdf9682798345e80c22c0a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/performance/comm_performance.jl", "max_issues_repo_name": "hkraemer/PECUZAL_Julia", "max_issues_repo_head_hexsha": "f68ae7bc3b3dc6116cbdf9682798345e80c22c0a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-12-16T12:12:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-18T16:56:23.000Z", "max_forks_repo_path": "scripts/performance/comm_performance.jl", "max_forks_repo_name": "hkraemer/PECUZAL_Julia", "max_forks_repo_head_hexsha": "f68ae7bc3b3dc6116cbdf9682798345e80c22c0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-08T11:51:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-05T10:06:44.000Z", "avg_line_length": 29.4210526316, "max_line_length": 84, "alphanum_fraction": 0.6518783542, "num_tokens": 870}
|
"""
Copyright 2020 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, pandasDF2MD, dict2MD
from brightics.function.utils import _model_dict
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate, greater_than_or_equal_to
from brightics.common.exception import BrighticsFunctionException
from sklearn.preprocessing import normalize
import numpy as np
import pandas as pd
from .short_text_topic_modeling_gsdmm import gsdmm_rwalk
import functools
import pyLDAvis
def gsdmm(table, **params):
check_required_parameters(_gsdmm, params, ['table'])
params = get_default_from_parameters_if_required(params, _gsdmm)
param_validation_check = [greater_than_or_equal_to(params, 2, 'K'),
greater_than_or_equal_to(params, 0.0, 'alpha'),
greater_than_or_equal_to(params, 0.0, 'beta'),
greater_than_or_equal_to(params, 1, 'max_iter'),
greater_than_or_equal_to(params, 1, 'num_topic_words')]
validate(*param_validation_check)
return _gsdmm(table, **params)
def _count_to_ratio_raw(word_count):
if not word_count:
return {}
else:
word_count_list = word_count.items()
words = [pair[0] for pair in word_count_list]
counts = [[pair[1]] for pair in word_count_list]
counts_normalized = normalize(counts, norm='l1', axis=0)
word_ratio_raw = {word: ratio[0] for word, ratio in zip(words, counts_normalized)}
return word_ratio_raw
def _gen_table(word_ratio_raw, num_topic_words):
if not word_ratio_raw:
return [""]
else:
word_ratio_sorted = sorted(word_ratio_raw.items(), key=lambda item: item[1], reverse=True)
word_ratio = [["{}: {}".format(word, ratio), word, ratio] for word, ratio in word_ratio_sorted]
return np.transpose(word_ratio[:num_topic_words]).tolist()
def _gsdmm(table, input_col, topic_name='topic', K=10, alpha=0.1, beta=0.1, max_iter=50, num_topic_words=3):
docs = np.array(table[input_col])
docs_set = [set(doc) for doc in docs]
docs_preprocessed = [list(doc_set) for doc_set in docs_set]
vocab_set = list(set.union(*docs_set))
vocab_size = len(vocab_set)
# initialize and train a GSDMM model
mgp = gsdmm_rwalk.MovieGroupProcess(K=K, alpha=alpha, beta=beta, n_iters=max_iter)
topics = mgp.fit(docs_preprocessed, vocab_size)
# generate topic table
topic_word_count = mgp.cluster_word_distribution
topic_words_raw = [[ind, _count_to_ratio_raw(word_count)]
for ind, word_count in enumerate(topic_word_count) if word_count]
topic_words = [[item[0]] + _gen_table(item[1], num_topic_words) for item in topic_words_raw]
# reset topic ids
nonempty_topic_indices = [item[0] for item in topic_words]
reset_topic_ind = {old_ind: (new_ind + 1) for new_ind, old_ind in enumerate(nonempty_topic_indices)}
topics = [reset_topic_ind[old_ind] for old_ind in topics]
topic_words = [[reset_topic_ind[old_item[0]]] + old_item[1:] for old_item in topic_words]
# generate output dataframes
out_table = pd.DataFrame.copy(table, deep=True)
if topic_name in table.columns:
raise BrighticsFunctionException.from_errors(
[{'0100': "Existing table contains the topic column name. Please choose another name."}])
out_table[topic_name] = topics
columns = ['index', 'vocabularies_weights', 'vocabularies', 'weights']
topic_table = pd.DataFrame(topic_words, columns=columns)
topic_table['weights'] = topic_table['weights'].apply(pd.to_numeric)
# pyLDAvis
if len(topic_words) == 1:
html_result = None
else:
topic_words_dicts = [item[1] for item in topic_words_raw]
topic_term_dists = [[topic_words_dict.get(word, 0) for word in vocab_set] for topic_words_dict in
topic_words_dicts]
num_docs = len(topics)
num_topics = len(topic_words_raw)
doc_topic_dists = np.zeros((num_docs, num_topics))
for doc_id, topic_id in enumerate(topics):
doc_topic_dists[doc_id][topic_id - 1] = 1.0
doc_lengths = [len(doc) for doc in docs_preprocessed]
vocab_count = functools.reduce(
lambda dict_1, dict_2: {word: dict_1.get(word, 0) + dict_2.get(word, 0) for word in
set(dict_1).union(dict_2)},
topic_word_count)
term_frequency = [vocab_count.get(word) for word in vocab_set]
prepared_data = pyLDAvis.prepare(topic_term_dists, doc_topic_dists, doc_lengths, vocab_set, term_frequency)
html_result = pyLDAvis.prepared_data_to_html(prepared_data)
# generate report
params = {'Input column': input_col,
'Topic column name': topic_name,
'K': K,
'Alpha': alpha,
'Beta': beta,
'Maximum number of iterations': max_iter,
'Number of words for each topic': num_topic_words}
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## GSDMM Result
| ### Summary
|
"""))
if html_result is not None:
rb.addHTML(html_result)
rb.addMD(strip_margin("""
|
"""))
rb.addMD(strip_margin("""
| ### Final Number of Topics
| {num_topics}
|
| ### Parameters
| {params}
""".format(num_topics=len(topic_words_raw), params=dict2MD(params))))
# create model
model = _model_dict('lda_model')
model['params'] = params
model['gsdmm_model'] = mgp
model['_repr_brtc_'] = rb.get()
return {'out_table': out_table, 'topic_table': topic_table, 'model': model}
|
{"hexsha": "ecd4098ed92dc831e6e6c517adc422fdc82d0751", "size": 6408, "ext": "py", "lang": "Python", "max_stars_repo_path": "function/python/brightics/function/textanalytics/gsdmm.py", "max_stars_repo_name": "jhpark428/studio", "max_stars_repo_head_hexsha": "539457b3026dda827c1b17b4cb851946e34e3b85", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 202, "max_stars_repo_stars_event_min_datetime": "2018-10-23T04:37:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T05:51:10.000Z", "max_issues_repo_path": "function/python/brightics/function/textanalytics/gsdmm.py", "max_issues_repo_name": "sagarmk/studio", "max_issues_repo_head_hexsha": "3bc547fdf85ae6be80c1b40916f9f5d31d2b3f75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 444, "max_issues_repo_issues_event_min_datetime": "2018-11-07T08:41:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T06:48:57.000Z", "max_forks_repo_path": "function/python/brightics/function/textanalytics/gsdmm.py", "max_forks_repo_name": "sagarmk/studio", "max_forks_repo_head_hexsha": "3bc547fdf85ae6be80c1b40916f9f5d31d2b3f75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 99, "max_forks_repo_forks_event_min_datetime": "2018-11-08T04:12:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T05:36:27.000Z", "avg_line_length": 41.3419354839, "max_line_length": 115, "alphanum_fraction": 0.6766541823, "include": true, "reason": "import numpy", "num_tokens": 1549}
|
import csv
import json
import requests
import pandas as pd
import time
import numpy as np
import rawgpy
rawg = rawgpy.RAWG("student project for university")
date_from = "2019-01-01"
date_to = "2019-01-02"
results = rawg.get_request("https://api.rawg.io/api/games?dates=" + date_from + "," + date_to + "&platforms=4&stores=1")
url_list = []
def if_field_exists(field, game_data):
if field in game_data:
field = game_data[field]
else:
field = " "
return field
def get_RAWG_data(results, outfile="games.csv", steam_file="steamFile"):
is_next = True
with open(outfile, 'a+', newline='', encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(["id", "name", "released", "rating", "exceptional",
"recommended", "meh", "skip", "metacritic", "tag1",
"tag2", "tag3", "tag4", "rating_count", "developer", "publisher"])
while is_next:
save_game_info(results, writer)
if results["next"] is not None:
try:
results = rawg.get_request(results["next"])
print(results["next"])
except:
print("nie udało sie pobrac strony " + results["next"])
else:
is_next = False
with open(steam_file, "w") as outfile:
outfile.write("\n".join(str(item) for item in url_list))
def save_game_info(results, writer):
for i in range(len(results["results"])):
game = results["results"][i]["slug"]
populated_game = rawg.get_game(game)
populated_game.populate()
json_game = populated_game.json
steam_url = get_steam_url()
id = parse_id(steam_url)
rating1, rating2, rating3, rating4 = parse_rating(json_game)
tag1, tag2, tag3, tag4 = parse_tags(json_game)
name = if_field_exists("name")
release = if_field_exists("released")
rating = if_field_exists("rating")
metacritic = if_field_exists("metacritic")
ratings_count = if_field_exists("ratings_count")
developer = parse_developer(json_game)
publisher = parse_publisher(json_game)
writer.writerow([id, name, release, rating,
rating1, rating2,
rating3, rating4,
metacritic, tag1,
tag2, tag3,
tag4,
ratings_count,
developer, publisher])
def get_steam_url(json_game):
if len(json_game["stores"]) > 0:
for j in range(len(json_game["stores"])):
if json_game["stores"][j]['store']['id'] == 1:
url = json_game["stores"][j]["url"]
url_list.append(url)
return url
return None
def parse_id(steam_url):
id = " "
if steam_url is not None:
id = steam_url.split("/")[4]
return id
def parse_rating(json_game):
rating1 = rating2 = rating3 = rating4 = ""
if "ratings" in json_game:
size = len(json_game["ratings"])
rating1 = json_game["ratings"][0]["count"] if size > 0 else 0
rating2 = json_game["ratings"][1]["count"] if size > 1 else 0
rating3 = json_game["ratings"][2]["count"] if size > 2 else 0
rating4 = json_game["ratings"][3]["count"] if size > 3 else 0
return rating1, rating2, rating3, rating4
def parse_tags(json_game):
tag1 = tag2 = tag3 = tag4 = " "
if "tags" in json_game:
size = len(json_game["tags"])
tag1 = json_game["tags"][0]["name"] if size > 0 else " "
tag2 = json_game["tags"][1]["name"] if size > 1 else " "
tag3 = json_game["tags"][2]["name"] if size > 2 else " "
tag4 = json_game["tags"][3]["name"] if size > 3 else " "
return tag1, tag2, tag3, tag4
def parse_publisher(json_game):
if "publishers" in json_game:
size = len(json_game["publishers"])
publisher = json_game["publishers"][0]["name"] if size > 0 else " "
else:
publisher = ""
return publisher
def parse_developer(json_game):
if "developers" in json_game:
size = len(json_game["developers"])
developer = json_game["developers"][0]["name"] if size > 0 else " "
else:
developer = ""
return developer
def collect_steam_store_data(steam_store_url, url_list):
app_list = [url.strip().split('/')[4] for url in url_list]
game_list = []
for game_id in app_list:
r = requests.get(steam_store_url, params = {'appids':game_id})
if r.status_code == 200:
game_json = r.json()[game_id]
if game_json['success'] == True and game_json['data']['type'] == 'game':
game_data = game_json['data']
game_dict = {
'id': game_id,
'title': game_data['name'],
'developer': '',
'publisher': '',
'release_date': '',
'genres': ''
}
if 'developers' in game_data:
game_dict['developer'] = game_data['developers'][0]
if 'publishers' in game_data:
game_dict['publisher'] = game_data['publishers'][0]
if 'release_date' in game_data:
game_dict['release_date'] = game_data['release_date']['date']
if 'genres' in game_data:
game_dict['genres'] = game_data['genres'][0]['description']
game_list.append(game_dict)
else:
print('game id: {}, error code: {}'.format(game_id, r.status_code))
time.sleep(2)
steam_games_df = pd.DataFrame.from_dict(game_list)
steam_games_df.set_index('id')
steam_games_df = steam_games_df.loc[~steam_games_df.index.duplicated(keep='first')]
steam_games_df.to_csv('steam_data.csv')
return steam_games_df
def collect_steam_reviews_data(steam_games_df):
reviews_list = []
for _, row in steam_games_df.iterrows():
game_id = row['id']
try:
r = requests.get(reviews_url + str(game_id), params = {'json': 1, 'language': 'all'})
if r.status_code == 200:
game_json = r.json()
if game_json['success'] == 1:
game_json = game_json['query_summary']
game_dict = {
'id': game_id,
'review_score': None,
'total_positive': None,
'total_reviews': None
}
if 'review_score' in game_json:
game_dict['review_score'] = game_json['review_score']
if 'total_positive' in game_json:
game_dict['total_positive'] = game_json['total_positive']
if 'total_reviews' in game_json:
game_dict['total_reviews'] = game_json['total_reviews']
reviews_list.append(game_dict)
else:
print('game_id')
except Exception as e:
print('game_id')
steam_reviews_df = pd.DataFrame.from_dict(reviews_list)
steam_reviews_df = steam_reviews_df.set_index('id')
steam_reviews_df.to_csv('steam_reviews.csv')
def merge_steam_games_and_reviews(steam_games_df, steam_reviews_df):
steam_games_with_reviews_df = steam_games_df.loc[steam_reviews_df.index]
steam_games_and_reviews_df = pd.merge(steam_reviews_df, steam_games_with_reviews_df, how='inner', on=steam_games_with_reviews_df.index)
steam_games_and_reviews_df.set_index('key_0', inplace=True)
steam_games_and_reviews_df.to_csv('steam_games_and_reviews.csv')
return steam_games_and_reviews_df
def merge_steam_and_rawg_games(steam_games_and_reviews_df, rawg_games_df):
all_games_df = steam_games_and_reviews_df.join(rawg_games_df, how='inner', lsuffix='_steam', rsuffix='_rawg')
all_games_df.index.name = 'id'
all_games_df.to_csv('all_games.csv')
return all_games_df
steam_store_url = 'https://store.steampowered.com/api/appdetails'
reviews_url = 'http://store.steampowered.com/appreviews/'
|
{"hexsha": "c9b76bfde44f0997ebe039fdac65ef0d1f254c36", "size": 8256, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_collection.py", "max_stars_repo_name": "GillinedUp/video-games-data-integration", "max_stars_repo_head_hexsha": "8ed6fd5af3d67dd9ba9154de2ec9e196d121b7ee", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_collection.py", "max_issues_repo_name": "GillinedUp/video-games-data-integration", "max_issues_repo_head_hexsha": "8ed6fd5af3d67dd9ba9154de2ec9e196d121b7ee", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_collection.py", "max_forks_repo_name": "GillinedUp/video-games-data-integration", "max_forks_repo_head_hexsha": "8ed6fd5af3d67dd9ba9154de2ec9e196d121b7ee", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5309734513, "max_line_length": 139, "alphanum_fraction": 0.5806686047, "include": true, "reason": "import numpy", "num_tokens": 1923}
|
from __future__ import print_function
import runai.mp
runai.mp.init(splits=2, method=runai.mp.Method.Cout)
#runai.mp.init(splits=2, method=runai.mp.Method.Cin)
import keras
from keras.applications.resnet50 import ResNet50,preprocess_input
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
import numpy as np
import os
import click
import time
num_classes = 10
# Model parameter
# ----------------------------------------------------------------------------
# | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch
# Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti
# |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)
# ----------------------------------------------------------------------------
# ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)
# ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)
# ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)
# ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)
# ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)
# ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)
# ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)
# ---------------------------------------------------------------------------
def concat_ones(src):
l = list(src.shape)
l[3] = 1
shape = tuple(l)
ones = np.zeros(shape)
return np.concatenate((src, ones), 3)
def get_data(subtract_pixel_mean):
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
x_train = concat_ones(x_train)
x_test = concat_ones(x_test)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return x_train,y_train,x_test,y_test
'''
def lr_schedule(epoch, lr):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
if epoch == 181:
lr *= 0.5
elif epoch == 161:
lr *= 1e-1
elif epoch == 121:
lr *= 1e-1
elif epoch == 81:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
'''
def lr_schedule_with_init(epoch,init_lr, lr):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr1 = init_lr
if epoch > 180:
lr1 *= 0.5e-3
elif epoch > 160:
lr1 *= 1e-3
elif epoch > 120:
lr1 *= 1e-2
elif epoch > 80:
lr1 *= 1e-1
if True:# lr > lr1:
lr = lr1
print('Learning rate: ', lr)
return lr
class StepTimeReporter(keras.callbacks.Callback):
def __init__(self):
self.start = 0
def on_batch_begin(self, batch, logs={}):
self.start = time.time()
def on_batch_end(self, batch, logs={}):
print(' >> Step %d took %g sec' % (batch, time.time() - self.start))
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides)
y = resnet_layer(inputs=y,
num_filters=num_filters,
activation=None)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet50_builtin():
model = ResNet50(include_top=True, weights=None, classes=10,input_shape=[32,32,3])
return model
def get_model(version, input_shape, depth, lr):
if version == 2:
model = resnet_v2(input_shape=input_shape, depth=depth)
if version == 1:
model = resnet_v1(input_shape=input_shape, depth=depth)
if version == 3:
model = resnet50_builtin()
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=lr),
metrics=['accuracy'])
model.summary()
return model
def run(epochs=200, batch_size=32, init_lr=1e-3, version=2, n=6, subtract_pixel_mean=True, data_augmentation=True):
# Get Data
# Subtracting pixel mean improves accuracy
x_train,y_train,x_test,y_test = get_data(subtract_pixel_mean)
# Input image dimensions.
input_shape = x_train.shape[1:]
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
model_type = 'ResNet%dv%d' % (depth, version)
elif version == 2:
depth = n * 9 + 2
model_type = 'ResNet%dv%d' % (depth, version)
elif version == 3:
model_type='ResNet50builtin'
depth = 0
print(model_type)
# Get Model
model = get_model(version, input_shape, depth, init_lr)
# Tensorboard location
if os.getenv('RUNAI_TENSORBOARD_DIR'):
outputs_dir = '/runai/outputs/'
else:
outputs_dir = '/tmp/'
# Prepare model model saving directory.
save_dir = os.path.join(outputs_dir, 'saved_models')
model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# Prepare callbacks for model saving and for learning rate adjustment.
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
lr_schedule = lambda epoch,lr: lr_schedule_with_init(epoch,init_lr,lr)
lr_scheduler = LearningRateScheduler(lr_schedule,verbose=1)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6,
verbose=1)
cur_output_dir = os.getenv('RUNAI_TENSORBOARD_DIR')#RUNAI_TENSORBOARD_DIR #‘./output’
if not cur_output_dir:
cur_output_dir='/tmp/'
tensor_board = TensorBoard(log_dir=os.path.join(cur_output_dir, 'tensor_board'),
histogram_freq=0,
write_graph=False,
write_images=False)
time_reporter = StepTimeReporter()
callbacks = [checkpoint, lr_reducer, lr_scheduler, time_reporter, tensor_board]
# Run training, with or without data augmentation.
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
preprocess_function = None
if version == 3:
preprocess_function = preprocess_input
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly rotate images in the range (deg 0 to 180)
rotation_range=0,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set range for random shear
shear_range=0.,
# set range for random zoom
zoom_range=0.,
# set range for random channel shifts
channel_shift_range=0.,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# value used for fill_mode = "constant"
cval=0.,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=preprocess_function,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
steps_per_epoch = np.ceil(len(y_train) / float(batch_size))
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
validation_data=(x_test, y_test),
epochs=epochs, verbose=1, workers=4,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# exercise different output information and formats
save_dir = os.path.join(outputs_dir, 'saved_weights')
model_name = 'cifar10_%s_model.weights.h5' % model_type
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
model.save_weights(filepath)
with open(os.path.join(outputs_dir,"model.json"), "w") as f:
f.write(model.to_json())
with open(os.path.join(outputs_dir,"model.yaml"), "w") as f:
f.write(model.to_yaml())
@click.command()
@click.option('--epochs',
default=200,
show_default=True,
help='epochs',
type=int
)
@click.option(
'--batch_size',
default=32,
show_default=True,
help='batch_size',
type=int
)
@click.option('--lr',
default=1e-3,
show_default=True,
help='lr',
type=float)
@click.option(
'--version',
default='2',
type=click.Choice(['1','2','3']),
)
@click.option('--n',
type=click.Choice(['2','3','5','6','7','9','12','18','27','111']),
default='2',
show_default=True
)
@click.option(
'--subtract_pixel_mean/--no-subtract_pixel_mean',
default = True,
is_flag = True,
show_default = True,
help = 'subtract_pixel_mean',
)
@click.option(
'--data_augmentation/--no-data-augmentation',
default = True,
is_flag = True,
show_default = True,
help = '',
)
def main(epochs, batch_size, lr, version, n, subtract_pixel_mean, data_augmentation):
run(epochs=epochs,
batch_size=batch_size,
init_lr=lr,
version=int(version),
n=int(n),
subtract_pixel_mean=subtract_pixel_mean,
data_augmentation=data_augmentation)
if __name__ == "__main__":
main()
|
{"hexsha": "80a779c0fc6b3a77378c5745f715f692e807d36d", "size": 20217, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/mp/keras/cifar10.py", "max_stars_repo_name": "bamps53/runai", "max_stars_repo_head_hexsha": "0c868160f64e1e063c6eb6f660d42917322d40c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 86, "max_stars_repo_stars_event_min_datetime": "2020-01-23T18:56:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T22:32:08.000Z", "max_issues_repo_path": "examples/mp/keras/cifar10.py", "max_issues_repo_name": "bamps53/runai", "max_issues_repo_head_hexsha": "0c868160f64e1e063c6eb6f660d42917322d40c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-01-24T17:55:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-01T01:01:32.000Z", "max_forks_repo_path": "examples/mp/keras/cifar10.py", "max_forks_repo_name": "bamps53/runai", "max_forks_repo_head_hexsha": "0c868160f64e1e063c6eb6f660d42917322d40c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-02-03T14:30:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-08T16:06:59.000Z", "avg_line_length": 33.2516447368, "max_line_length": 115, "alphanum_fraction": 0.5797596083, "include": true, "reason": "import numpy", "num_tokens": 4770}
|
# Internal Imports
from os import error
from alpha import Alpha
from learnt_model import LearntModel
from model import Model
from operations import OPS
from util import load_alpha
# External Imports
from copy import deepcopy
from datetime import datetime
from lucent.modelzoo import inceptionv1, util
from lucent.misc.channel_reducer import ChannelReducer
from lucent.optvis import objectives, param, render, transform
from PIL import Image
from pprint import pprint
import random
import sys
import torch
import torch
import torch.nn as nn
import torch.nn.functional as f
import torchvision
import numpy as np
from itertools import product
''' Model Setup '''
# Seed for reproducibility
torch.manual_seed(0)
random.seed(0)
# Device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Constants
NUM_EPOCHS = 50
# Load model from argument
model = torch.load(sys.argv[1])
if torch.cuda.is_available():
model.cuda()
model.eval()
print(util.get_model_layers(model))
''' Helper Functions'''
def parse_layer(layer):
if "main_net" in layer: # FIXME: Need to change for MNAS
_, _, cell_num, _, edge, _ = tuple(layer.split("_"))
return int(cell_num), edge
else:
raise error("Invalid Layer Name")
@torch.no_grad()
def get_layer(model, layer, X):
#cell_num, edge = parse_layer(layer)
hook = render.ModuleHook(model.main_net[0].ops[str((1,2))].ops[str((0,1))].op)
model(X)
hook.close()
return f.relu(hook.features)
@objectives.wrap_objective()
def dot_compare(layer, acts, batch=1):
def inner(T):
pred = T(layer)[batch]
return -(pred * acts).sum(dim=0, keepdims=True).mean()
return inner
''' Activation Grid Functions '''
def render_activation_grid_less_naive(
img,
model,
layer="main_net_0_ops_(1, 2)_ops_(0, 1)_op",
cell_image_size=60,
n_groups=6,
n_steps=1024,
batch_size=64,
):
# First wee need, to normalize and resize the image
img = torch.tensor(np.transpose(img, [2, 0, 1])).to(device)
normalize = (
transform.preprocess_inceptionv1()
if model._get_name() == "InceptionV1"
else transform.normalize()
)
transforms = transform.standard_transforms.copy() + [
normalize,
torch.nn.Upsample(size=224, mode="bilinear", align_corners=True),
]
transforms_f = transform.compose(transforms)
# shape: (1, 3, original height of img, original width of img)
img = img.unsqueeze(0)
# shape: (1, 3, 224, 224)
img = transforms_f(img)
# Here we compute the activations of the layer `layer` using `img` as input
# shape: (layer_channels, layer_height, layer_width), the shape depends on the layer
acts = get_layer(model, layer, img)[0]
# shape: (layer_height, layer_width, layer_channels)
acts = acts.permute(1, 2, 0)
# shape: (layer_height*layer_width, layer_channels)
acts = acts.view(-1, acts.shape[-1])
acts_np = acts.cpu().numpy()
nb_cells = acts.shape[0]
# negative matrix factorization `NMF` is used to reduce the number
# of channels to n_groups. This will be used as the following.
# Each cell image in the grid is decomposed into a sum of
# (n_groups+1) images. First, each cell has its own set of parameters
# this is what is called `cells_params` (see below). At the same time, we have
# a of group of images of size 'n_groups', which also have their own image parametrized
# by `groups_params`. The resulting image for a given cell in the grid
# is the sum of its own image (parametrized by `cells_params`)
# plus a weighted sum of the images of the group. Each each image from the group
# is weighted by `groups[cell_index, group_idx]`. Basically, this is a way of having
# the possibility to make cells with similar activations have a similar image, because
# cells with similar activations will have a similar weighting for the elements
# of the group.
if n_groups > 0:
reducer = ChannelReducer(n_groups, "NMF")
groups = reducer.fit_transform(acts_np)
groups /= groups.max(0)
else:
groups = np.zeros([])
# shape: (layer_height*layer_width, n_groups)
groups = torch.from_numpy(groups)
# Parametrization of the images of the groups (we have 'n_groups' groups)
groups_params, groups_image_f = param.fft_image(
[n_groups, 3, cell_image_size, cell_image_size]
)
# Parametrization of the images of each cell in the grid (we have 'layer_height*layer_width' cells)
cells_params, cells_image_f = param.fft_image(
[nb_cells, 3, cell_image_size, cell_image_size]
)
# First, we need to construct the images of the grid
# from the parameterizations
def image_f():
groups_images = groups_image_f()
cells_images = cells_image_f()
X = []
for i in range(nb_cells):
x = 0.7 * cells_images[i] + 0.5 * sum(
groups[i, j] * groups_images[j] for j in range(n_groups)
)
X.append(x)
X = torch.stack(X)
return X
# make sure the images are between 0 and 1
image_f = param.to_valid_rgb(image_f, decorrelate=True)
# After constructing the cells images, we sample randomly a mini-batch of cells
# from the grid. This is to prevent memory overflow, especially if the grid
# is large.
def sample(image_f, batch_size):
def f():
X = image_f()
inds = torch.randint(0, len(X), size=(batch_size,))
inputs = X[inds]
# HACK to store indices of the mini-batch, because we need them
# in objective func. Might be better ways to do that
sample.inds = inds
return inputs
return f
image_f_sampled = sample(image_f, batch_size=batch_size)
# Now, we define the objective function
def objective_func(model):
# shape: (batch_size, layer_channels, cell_layer_height, cell_layer_width)
pred = f.relu(model(layer))
# use the sampled indices from `sample` to get the corresponding targets
target = acts[sample.inds].to(pred.device)
# shape: (batch_size, layer_channels, 1, 1)
target = target.view(target.shape[0], target.shape[1], 1, 1)
dot = (pred * target).sum(dim=1).mean()
return -dot
obj = objectives.Objective(objective_func)
def param_f():
# We optimize the parametrizations of both the groups and the cells
params = list(groups_params) + list(cells_params)
return params, image_f_sampled
results = render.render_vis(
model,
obj,
param_f,
thresholds=(n_steps,),
show_image=False,
progress=True,
fixed_image_size=cell_image_size,
)
# shape: (layer_height*layer_width, 3, grid_image_size, grid_image_size)
imgs = image_f()
imgs = imgs.cpu().data
imgs = imgs[:, :, 2:-2, 2:-2]
# turn imgs into a a grid
grid = torchvision.utils.make_grid(imgs, nrow=int(np.sqrt(nb_cells)), padding=0)
grid = grid.permute(1, 2, 0)
grid = grid.numpy()
render.show(grid)
return imgs
def render_activation_grid_very_naive(
img, model, layer="main_net_0_ops_(1, 2)_ops_(0, 1)_op", cell_image_size=48, n_steps=1024
):
# First wee need, to normalize and resize the image
img = torch.tensor(np.transpose(img, [2, 0, 1])).to(device)
normalize = (
transform.preprocess_inceptionv1()
if model._get_name() == "InceptionV1"
else transform.normalize()
)
transforms = [
normalize,
torch.nn.Upsample(size=224, mode="bilinear", align_corners=True),
]
transforms_f = transform.compose(transforms)
# shape: (1, 3, original height of img, original width of img)
img = img.unsqueeze(0)
# shape: (1, 3, 224, 224)
img = transforms_f(img)
# Here we compute the activations of the layer `layer` using `img` as input
# shape: (layer_channels, layer_height, layer_width), the shape depends on the layer
acts = get_layer(model, layer, img)[0]
layer_channels, layer_height, layer_width = acts.shape
# for each position `(y, x)` in the feature map `acts`, we optimize an image
# to match with the features `acts[:, y, x]`
# This means that the total number of cells (which is the batch size here)
# in the grid is layer_height*layer_width.
nb_cells = layer_height * layer_width
# Parametrization of the of each cell in the grid
param_f = lambda: param.image(
cell_image_size, batch=nb_cells
)
params, image_f = param_f()
obj = objectives.Objective.sum(
[
# for each position in `acts`, maximize the dot product between the activations
# `acts` at the position (y, x) and the features of the corresponding
# cell image on our 'grid'. The activations at (y, x) is a vector of size
# `layer_channels` (this depends on the `layer`). The features
# of the corresponding cell on our grid is a tensor of shape
# (layer_channels, cell_layer_height, cell_layer_width).
# Note that cell_layer_width != layer_width and cell_layer_height != layer_weight
# because the cell image size is smaller than the image size.
# With `dot_compare`, we maximize the dot product between
# cell_activations[y_cell, x_xcell] and acts[y,x] (both of size `layer_channels`)
# for each possible y_cell and x_cell, then take the average to get a single
# number. Check `dot_compare for more details.`
dot_compare(layer, acts[:, y:y+1, x:x+1], batch=x + y * layer_width)
for i, (x, y) in enumerate(product(range(layer_width), range(layer_height)))
]
)
results = render.render_vis(
model,
obj,
param_f,
thresholds=(n_steps,),
progress=True,
fixed_image_size=cell_image_size,
show_image=False,
)
# shape: (layer_height*layer_width, cell_image_size, cell_image_size, 3)
imgs = results[-1] # last step results
# shape: (layer_height*layer_width, 3, cell_image_size, cell_image_size)
imgs = imgs.transpose((0, 3, 1, 2))
imgs = torch.from_numpy(imgs)
imgs = imgs[:, :, 2:-2, 2:-2]
# turn imgs into a a grid
grid = torchvision.utils.make_grid(imgs, nrow=int(np.sqrt(nb_cells)), padding=0)
grid = grid.permute(1, 2, 0)
grid = grid.numpy()
render.show(grid)
return imgs
''' Visualize '''
print(sys.argv)
img = np.array(Image.open("dog.jpg"), np.float32)
_ = render_activation_grid_very_naive(
img, model, cell_image_size=int(sys.argv[2]), n_steps=int(sys.argv[3]))
|
{"hexsha": "c608ff60db6f9cd3431f78613fd57f3959601208", "size": 10751, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_visualization.py", "max_stars_repo_name": "sjoshi804/neural-architecture-search-project", "max_stars_repo_head_hexsha": "b28c23383dc5d8f9da8023a70786313dc5696cf1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-22T09:26:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-02T20:06:07.000Z", "max_issues_repo_path": "feature_visualization.py", "max_issues_repo_name": "sjoshi804/neural-architecture-search-project", "max_issues_repo_head_hexsha": "b28c23383dc5d8f9da8023a70786313dc5696cf1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-11-16T04:57:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-22T02:59:25.000Z", "max_forks_repo_path": "feature_visualization.py", "max_forks_repo_name": "sjoshi804/neural-architecture-search-project", "max_forks_repo_head_hexsha": "b28c23383dc5d8f9da8023a70786313dc5696cf1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-21T19:15:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T13:27:15.000Z", "avg_line_length": 36.5680272109, "max_line_length": 103, "alphanum_fraction": 0.6612408148, "include": true, "reason": "import numpy", "num_tokens": 2725}
|
import os,random
os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["THEANO_FLAGS"] = "device=gpu%d"%(1)
os.environ["MKL_THREADING_LAYER"] = "GNU"
import numpy as np
from keras.utils import np_utils
import keras.models as models
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten
from keras.layers.noise import GaussianNoise
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.regularizers import *
from keras.optimizers import adam
import random, sys, keras
def deepsensing_network(in_shp = [2, 128], classes = ['busy' ,'idle']):
K.set_image_dim_ordering('th')
dr = 0.5
model = models.Sequential()
model.add(Reshape([1]+in_shp, input_shape=in_shp))
model.add(ZeroPadding2D((0, 2)))
model.add(Convolution2D(256, 1, 3, border_mode='valid', activation="relu", name="conv1", init='glorot_uniform'))
model.add(Dropout(dr))
model.add(ZeroPadding2D((0, 2)))
model.add(Convolution2D(80, 2, 3, border_mode="valid", activation="relu", name="conv2", init='glorot_uniform'))
model.add(Dropout(dr))
model.add(Flatten())
model.add(Dense(256, activation='relu', init='he_normal', name="dense1"))
model.add(Dropout(dr))
model.add(Dense( len(classes), init='he_normal', name="dense2" ))
model.add(Activation('softmax'))
model.add(Reshape([len(classes)]))
model.compile(loss='categorical_crossentropy', optimizer='adam')
#model.summary()
return model
def deepsensing_train(datafile, EbN0, in_shp=[2,128], classes=['busy', 'idle'], nb_epoch=100, batch_size=1000):
from util import dataset_load
[X_train, Y_train, X_test, Y_test] = dataset_load(datafile)
model = deepsensing_network(in_shp, classes)
model_saved_path = 'QPSK.wts_' + str(EbN0) + '.h5'
history = model.fit(X_train,
Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
verbose=2,
validation_data=(X_test, Y_test),
callbacks = [
keras.callbacks.ModelCheckpoint(model_saved_path, monitor='val_loss', verbose=0, save_best_only=True, mode='auto'),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
])
model.load_weights(model_saved_path)
X = [X_train, Y_train, X_test, Y_test]
return model, model_saved_path, X
def deepsensing_load_model(modelfile, in_shp=[2,128], classes=['busy', 'idle']):
model = deepsensing_network(in_shp, classes)
model.load_weights(modelfile)
return model
|
{"hexsha": "d903fa37d9c767d4ad5d8eb67af9bf31d44f452c", "size": 2623, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn/network.py", "max_stars_repo_name": "mlcomm/deepsensing", "max_stars_repo_head_hexsha": "ede1b0431b0eb5554b63c35c094eecdf544c4795", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nn/network.py", "max_issues_repo_name": "mlcomm/deepsensing", "max_issues_repo_head_hexsha": "ede1b0431b0eb5554b63c35c094eecdf544c4795", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nn/network.py", "max_forks_repo_name": "mlcomm/deepsensing", "max_forks_repo_head_hexsha": "ede1b0431b0eb5554b63c35c094eecdf544c4795", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6349206349, "max_line_length": 127, "alphanum_fraction": 0.6778497903, "include": true, "reason": "import numpy", "num_tokens": 700}
|
#!/usr/bin/env python
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
import argparse
import json
import logging
import numpy
import subprocess
def getMinMax(json_string):
j = json.loads(json_string)
j = j["stats"]["statistic"]
minX = j[0]["minimum"]
maxX = j[0]["maximum"]
minY = j[1]["minimum"]
maxY = j[1]["maximum"]
return minX, maxX, minY, maxY
def main(args):
parser = argparse.ArgumentParser(
description='Generate a Digital Surface Model (DSM) from a point cloud')
parser.add_argument("-s", "--source_points", nargs="+", help="Source points file[s]")
parser.add_argument("destination_image", help="Destination image file name")
parser.add_argument("--bounds", nargs=4, type=float, action="store",
help="Destination image bounds (using the coordinate system "
"of the source_points file): minX, maxX, minY, maxY. "
"If not specified, it is computed from source_points files")
parser.add_argument("--gsd", help="Ground sample distance")
args = parser.parse_args(args)
if not args.gsd:
args.gsd = 0.25
print("Using gsd = 0.25 m")
else:
# Make sure the GSD is a float
args.gsd = float(args.gsd)
if not args.source_points:
raise RuntimeError("Error: At least one source_points file required")
if args.bounds:
minX, maxX, minY, maxY = args.bounds
else:
print("Computing the bounding box for {} point cloud files ...".format(
len(args.source_points)))
minX = numpy.inf
maxX = - numpy.inf
minY = numpy.inf
maxY = - numpy.inf
pdal_info_template = ["pdal", "info", "--stats", "--dimensions", "X,Y"]
for i, s in enumerate(args.source_points):
pdal_info_args = pdal_info_template + [s]
out = subprocess.check_output(pdal_info_args)
tempMinX, tempMaxX, tempMinY, tempMaxY = getMinMax(out)
if tempMinX < minX:
minX = tempMinX
if tempMaxX > maxX:
maxX = tempMaxX
if tempMinY < minY:
minY = tempMinY
if tempMaxY > maxY:
maxY = tempMaxY
if i % 10 == 0:
print("Iteration {}".format(i))
print("Bounds ({}, {}, {}, {})".format(minX, maxX, minY, maxY))
# compensate for PDAL expanding the extents by 1 pixel
maxX -= float(args.gsd)
maxY -= float(args.gsd)
# read the pdal file and project the points
jsonTemplate = """
{
"pipeline": [
%s,
{
"type": "filters.crop",
"bounds": "([%s, %s], [%s, %s])"
},
{
"resolution": %s,
"data_type": "float",
"filename":"%s",
"output_type": "max",
"window_size": "20",
"bounds": "([%s, %s], [%s, %s])",
"gdalopts": "COMPRESS=DEFLATE"
}
]
}"""
print("Generating DSM ...")
all_sources = ",\n".join("\"" + str(e) + "\"" for e in args.source_points)
pipeline = jsonTemplate % (all_sources,
minX, maxX, minY, maxY,
args.gsd, args.destination_image,
minX, maxX, minY, maxY)
pdal_pipeline_args = ["pdal", "pipeline", "--stream", "--stdin"]
response = subprocess.run(pdal_pipeline_args, input=pipeline.encode(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if response.returncode != 0:
print("STDERR")
print(response.stderr)
print("STDOUT")
print(response.stdout)
raise RuntimeError("PDAL failed with error code {}", format(response.returncode))
if __name__ == '__main__':
import sys
try:
main(sys.argv[1:])
except Exception as e:
logging.exception(e)
sys.exit(1)
|
{"hexsha": "6a55fe366c3680b389bbb0134bda8bd73ec5aeae", "size": 4240, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/generate_dsm.py", "max_stars_repo_name": "willdunklin/Danesfield", "max_stars_repo_head_hexsha": "686cfd331250c00a93b3778c6faaa646fec65de5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 96, "max_stars_repo_stars_event_min_datetime": "2018-11-30T21:35:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T04:08:59.000Z", "max_issues_repo_path": "tools/generate_dsm.py", "max_issues_repo_name": "Pandinosaurus/Danesfield", "max_issues_repo_head_hexsha": "691e48c9491aed9ebd1ca1fb85c4bbf896cad077", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2018-12-01T02:37:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T08:50:58.000Z", "max_forks_repo_path": "tools/generate_dsm.py", "max_forks_repo_name": "Pandinosaurus/Danesfield", "max_forks_repo_head_hexsha": "691e48c9491aed9ebd1ca1fb85c4bbf896cad077", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 43, "max_forks_repo_forks_event_min_datetime": "2018-12-03T18:03:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T00:11:12.000Z", "avg_line_length": 34.4715447154, "max_line_length": 89, "alphanum_fraction": 0.5384433962, "include": true, "reason": "import numpy", "num_tokens": 966}
|
/*! \file
\brief A timetable_value vocabulary.
Copyright (C) 2019-2022 kaoru https://www.tetengo.org/
*/
#include <algorithm>
#include <any>
#include <cassert>
#include <istream>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <stdexcept>
#include <string>
#include <string_view>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include <boost/algorithm/string.hpp>
#include <boost/core/noncopyable.hpp>
#include <boost/lexical_cast.hpp>
#include <tetengo/lattice/entry.hpp>
#include <tetengo/lattice/input.hpp>
#include <tetengo/lattice/string_input.hpp>
#include <tetengo/lattice/unordered_map_vocabulary.hpp>
#include <tetengo/lattice/vocabulary.hpp>
#include "timetable.hpp"
namespace
{
struct timetable_value
{
std::vector<station> stations;
std::vector<train> trains;
timetable_value(std::vector<station>&& stations, std::vector<train>&& trains) :
stations{ std::move(stations) },
trains{ std::move(trains) }
{}
};
std::size_t entry_hash(const tetengo::lattice::entry_view& entry)
{
const std::size_t key_hash = entry.p_key() ? entry.p_key()->hash_value() : 0;
std::size_t entry_train_number_hash = std::hash<std::string_view>{}(std::string_view{});
std::size_t entry_train_name_hash = std::hash<std::string_view>{}(std::string_view{});
std::size_t entry_from_hash = std::hash<std::size_t>{}(0);
std::size_t entry_to_hash = std::hash<std::size_t>{}(0);
if (entry.value()->has_value())
{
if (const auto* const p_section = std::any_cast<section>(entry.value()); p_section)
{
entry_train_number_hash = std::hash<std::string>{}(p_section->p_train()->number());
entry_train_name_hash = std::hash<std::string>{}(p_section->p_train()->name());
entry_from_hash = std::hash<std::size_t>{}(p_section->from());
entry_to_hash = std::hash<std::size_t>{}(p_section->to());
}
}
return key_hash ^ entry_train_number_hash ^ entry_train_name_hash ^ entry_from_hash ^ entry_to_hash;
}
bool entry_equal_to(const tetengo::lattice::entry_view& one, const tetengo::lattice::entry_view& another)
{
if (one.value()->has_value() && another.value()->has_value())
{
const auto* const p_one_section = std::any_cast<section>(one.value());
const auto* const p_another_section = std::any_cast<section>(another.value());
if (p_one_section && p_another_section)
{
return ((!one.p_key() && !another.p_key()) ||
(one.p_key() && another.p_key() && *one.p_key() == *another.p_key())) &&
p_one_section->p_train()->number() == p_another_section->p_train()->number() &&
p_one_section->p_train()->name() == p_another_section->p_train()->name() &&
p_one_section->from() == p_another_section->from() &&
p_one_section->to() == p_another_section->to();
}
else
{
assert(false);
throw std::logic_error{ "Unexpected entry value." };
}
}
else if (one.value()->has_value() || another.value()->has_value())
{
return false;
}
else
{
return (!one.p_key() && !another.p_key()) ||
(one.p_key() && another.p_key() && *one.p_key() == *another.p_key());
}
}
}
station::station(std::string name, std::string telegram_code) :
m_name{ std::move(name) },
m_telegram_code{ std::move(telegram_code) }
{}
const std::string& station::name() const
{
return m_name;
}
const std::string& station::telegram_code() const
{
return m_telegram_code;
}
stop::stop(std::optional<std::size_t> arrival_time, std::optional<std::size_t> departure_time) :
m_arrival_time{ std::move(arrival_time) },
m_departure_time{ std::move(departure_time) }
{}
std::optional<std::size_t> stop::arrival_time() const
{
return m_arrival_time;
}
void stop::set_arrival_time(const std::size_t time)
{
m_arrival_time = time;
}
std::optional<std::size_t> stop::departure_time() const
{
return m_departure_time;
}
void stop::set_departure_time(std::size_t time)
{
m_departure_time = time;
}
train::train(std::string number, std::string name, std::vector<stop> stops) :
m_number{ std::move(number) },
m_name{ std::move(name) },
m_stops{ std::move(stops) }
{}
const std::string& train::number() const
{
return m_number;
}
const std::string& train::name() const
{
return m_name;
}
const std::vector<stop>& train::stops() const
{
return m_stops;
}
std::vector<stop>& train::stops()
{
return m_stops;
}
class timetable::impl : private boost::noncopyable
{
public:
// constructors and destructor
explicit impl(std::unique_ptr<std::istream>&& p_input_stream) :
m_timetable{ build_timetable(std::move(p_input_stream)) }
{}
// functions
const std::vector<station>& stations() const
{
return m_timetable.stations;
}
std::size_t station_index(const std::string& name_or_telegram_code) const
{
for (auto i = static_cast<std::size_t>(0); i < std::size(m_timetable.stations); ++i)
{
if (const auto& station = m_timetable.stations[i];
boost::algorithm::to_lower_copy(station.name()) ==
boost::algorithm::to_lower_copy(name_or_telegram_code) ||
boost::algorithm::to_upper_copy(station.telegram_code()) ==
boost::algorithm::to_upper_copy(name_or_telegram_code))
{
return i;
}
}
return std::size(m_timetable.stations);
}
std::unique_ptr<tetengo::lattice::vocabulary> create_vocabulary(const std::size_t departure_time) const
{
auto entries = build_entries(m_timetable);
auto connections = build_connections(entries, departure_time);
return std::make_unique<tetengo::lattice::unordered_map_vocabulary>(
std::move(entries), std::move(connections), entry_hash, entry_equal_to);
}
private:
// static functions
static timetable_value build_timetable(std::unique_ptr<std::istream>&& p_input_stream)
{
assert(p_input_stream);
auto timetable_value = parse_input(*p_input_stream);
guess_arrival_times(timetable_value);
return timetable_value;
}
static timetable_value parse_input(std::istream& input_stream)
{
if (!input_stream)
{
throw std::runtime_error{ "Input file format error: Empty data." };
}
std::vector<station> stations{};
{
auto line1 = read_line(input_stream);
auto line2 = read_line(input_stream);
stations = parse_stations(std::move(line1), std::move(line2));
}
std::vector<train> trains{};
while (input_stream)
{
auto line = read_line(input_stream);
if (std::empty(line) || (std::size(line) == 1 && std::empty(line[0])))
{
continue;
}
trains.push_back(parse_train(std::move(line), std::size(stations)));
}
return timetable_value{ std::move(stations), std::move(trains) };
}
static std::vector<std::string> read_line(std::istream& input_stream)
{
std::string line;
std::getline(input_stream, line);
std::vector<std::string> elements;
boost::algorithm::split(elements, std::move(line), boost::is_any_of(","));
std::for_each(
std::begin(elements), std::end(elements), [](auto& element) { return boost::algorithm::trim(element); });
return elements;
}
static std::vector<station> parse_stations(std::vector<std::string>&& line1, std::vector<std::string>&& line2)
{
line1.erase(std::begin(line1), std::next(std::begin(line1), 2));
line2.erase(std::begin(line2), std::next(std::begin(line2), 2));
if (std::size(line1) != std::size(line2))
{
throw std::runtime_error{ "Input file format error: Station names and telegram codes unmatch." };
}
std::vector<station> stations{};
stations.reserve(std::size(line1));
for (auto i = static_cast<std::size_t>(0); i < std::size(line1); ++i)
{
stations.emplace_back(std::move(line1[i]), std::move(line2[i]));
}
return stations;
}
static train parse_train(std::vector<std::string>&& line, const std::size_t station_count)
{
if (std::size(line) > station_count + 2)
{
throw std::runtime_error{ "Input file format error: Invalid train line found." };
}
line.resize(station_count + 2);
std::vector<stop> stops{};
stops.reserve(station_count);
std::transform(std::next(std::begin(line), 2), std::end(line), std::back_inserter(stops), [](auto&& e) {
return to_stop(std::move(e));
});
return train{ std::move(line[0]), std::move(line[1]), std::move(stops) };
}
static stop to_stop(std::string&& element)
{
std::vector<std::string> string_times{};
boost::algorithm::split(string_times, std::move(element), boost::is_any_of("/"));
std::for_each(std::begin(string_times), std::end(string_times), [](auto&& e) { return boost::trim(e); });
if (std::size(string_times) == 0 || std::size(string_times) > 2)
{
throw std::runtime_error{ "Input file format error: Invalid arrival/depature time found." };
}
else if (std::size(string_times) == 1)
{
return stop{ std::nullopt, to_minutes(std::move(string_times[0])) };
}
else
{
assert(std::size(string_times) == 2);
return stop{ to_minutes(std::move(string_times[0])), to_minutes(std::move(string_times[1])) };
}
}
static std::optional<std::size_t> to_minutes(std::string&& string_time)
{
if (std::empty(string_time) || string_time == "-")
{
return std::nullopt;
}
auto int_time = static_cast<std::size_t>(0);
try
{
int_time = boost::lexical_cast<std::size_t>(string_time);
}
catch (const boost::bad_lexical_cast&)
{
throw std::runtime_error{ "Input file format error: Invalid time found." };
}
const auto hour = int_time / 100;
const auto minute = int_time - hour * 100;
if (hour >= 24 || minute >= 60)
{
throw std::runtime_error{ "Input file format error: Invalid time found." };
}
return hour * 60 + minute;
}
static void guess_arrival_times(timetable_value& timetable_)
{
for (auto from = static_cast<std::size_t>(0); from < std::size(timetable_.stations) - 1; ++from)
{
for (auto to = from + 1; to < std::size(timetable_.stations); ++to)
{
const auto minimum_duration_ = minimum_duration(timetable_.trains, from, to);
for (auto& train: timetable_.trains)
{
if (!all_passing(train.stops(), from, to))
{
continue;
}
if (!train.stops()[to].arrival_time())
{
train.stops()[to].set_arrival_time(
add_time(*train.stops()[from].departure_time(), minimum_duration_));
}
else if (!train.stops()[from].departure_time())
{
train.stops()[from].set_departure_time(
add_time(*train.stops()[to].arrival_time(), -minimum_duration_));
}
}
}
}
}
static std::ptrdiff_t
minimum_duration(const std::vector<train>& trains, const std::size_t from, const std::size_t to)
{
auto minimum = std::numeric_limits<std::ptrdiff_t>::max();
for (const auto& train: trains)
{
if (!all_passing(train.stops(), from, to))
{
continue;
}
const auto from_time = train.stops()[from].departure_time() ? *train.stops()[from].departure_time() :
*train.stops()[from].arrival_time();
const auto to_time = train.stops()[to].arrival_time() ? *train.stops()[to].arrival_time() :
*train.stops()[to].departure_time();
if (diff_time(to_time, from_time) < minimum)
{
minimum = diff_time(to_time, from_time);
}
}
return minimum;
}
static std::vector<std::pair<std::string, std::vector<tetengo::lattice::entry>>>
build_entries(const timetable_value& timetable_)
{
std::unordered_map<std::string, std::vector<tetengo::lattice::entry>> map{};
for (const auto& train_: timetable_.trains)
{
for (auto from = static_cast<std::size_t>(0); from + 1 < std::size(timetable_.stations); ++from)
{
for (auto to = from + 1; to < std::size(timetable_.stations); ++to)
{
if (!all_passing(train_.stops(), from, to))
{
continue;
}
auto section_name = make_section_name(timetable_.stations, from, to);
auto found = map.find(section_name);
if (found == std::end(map))
{
auto inserted =
map.insert(std::make_pair(section_name, std::vector<tetengo::lattice::entry>{}));
found = inserted.first;
}
section section_{ &train_, from, to };
const auto section_duration = make_section_duration(train_.stops(), from, to);
found->second.emplace_back(
std::make_unique<tetengo::lattice::string_input>(std::move(section_name)),
std::move(section_),
static_cast<int>(section_duration));
}
}
}
std::vector<std::pair<std::string, std::vector<tetengo::lattice::entry>>> entries{};
entries.reserve(std::size(map));
for (auto& map_entry: map)
{
entries.emplace_back(map_entry.first, std::move(map_entry.second));
}
return entries;
}
static std::string
make_section_name(const std::vector<station>& stations, const std::size_t from, const std::size_t to)
{
std::string name;
for (auto i = from; i + 1 <= to; ++i)
{
name += stations[i].telegram_code() + "-" + stations[i + 1].telegram_code() + "/";
}
return name;
}
static std::size_t
make_section_duration(const std::vector<stop>& stops, const std::size_t from, const std::size_t to)
{
assert(stops[from].departure_time());
assert(stops[to].arrival_time());
return diff_time(*stops[to].arrival_time(), *stops[from].departure_time());
}
static bool all_passing(const std::vector<stop>& stops, const std::size_t from, const std::size_t to)
{
if (!stops[from].arrival_time() && !stops[from].departure_time())
{
return false;
}
if (!stops[to].arrival_time() && !stops[to].departure_time())
{
return false;
}
for (auto i = from + 1; i + 1 < to + 1; ++i)
{
if (stops[i].arrival_time() || stops[i].departure_time())
{
return false;
}
}
return true;
}
static std::vector<std::pair<std::pair<tetengo::lattice::entry, tetengo::lattice::entry>, int>> build_connections(
const std::vector<std::pair<std::string, std::vector<tetengo::lattice::entry>>>& entries,
const std::size_t departure_time)
{
std::vector<std::pair<std::pair<tetengo::lattice::entry, tetengo::lattice::entry>, int>> connections{};
for (const auto& from: entries)
{
for (const auto& to: entries)
{
for (const auto& from_entry: from.second)
{
for (const auto& to_entry: to.second)
{
const auto* const p_from_value = std::any_cast<section>(&from_entry.value());
const auto* const p_to_value = std::any_cast<section>(&to_entry.value());
if (p_from_value->to() != p_to_value->from())
{
continue;
}
const auto from_arrival_time =
p_from_value->p_train()->stops()[p_from_value->to()].arrival_time();
const auto to_departure_time =
p_to_value->p_train()->stops()[p_to_value->from()].departure_time();
assert(from_arrival_time);
assert(to_departure_time);
auto cost = static_cast<int>(diff_time(*to_departure_time, *from_arrival_time));
if (cost > 60)
{
continue;
}
if (p_from_value->p_train()->number() != p_to_value->p_train()->number())
{
cost += 1;
}
connections.emplace_back(std::make_pair(from_entry, to_entry), cost);
}
}
}
}
for (const auto& key_and_entries: entries)
{
for (const auto& entry: key_and_entries.second)
{
const auto* const p_section = std::any_cast<section>(&entry.value());
const auto section_departure_time =
p_section ? *p_section->p_train()->stops()[p_section->from()].departure_time() : 0;
if (const auto bos_cost = static_cast<int>(diff_time(section_departure_time, departure_time));
bos_cost <= 240)
{
connections.emplace_back(std::make_pair(tetengo::lattice::entry::bos_eos(), entry), bos_cost);
}
connections.emplace_back(std::make_pair(entry, tetengo::lattice::entry::bos_eos()), 0);
}
}
return connections;
}
static std::size_t add_time(const std::size_t time, const std::ptrdiff_t duration)
{
assert(time < 1440);
assert(-1440 < duration && duration < 1440);
return (time + 1440 + duration) % 1440;
}
static std::ptrdiff_t diff_time(const std::size_t time1, const std::size_t time2)
{
assert(time1 < 1440);
assert(time2 < 1440);
return (time1 + 1440 - time2) % 1440;
}
// variables
const timetable_value m_timetable;
};
timetable::timetable(std::unique_ptr<std::istream>&& p_input_stream) :
m_p_impl{ std::make_unique<impl>(std::move(p_input_stream)) }
{}
timetable::~timetable() = default;
const std::vector<station>& timetable::stations() const
{
return m_p_impl->stations();
}
std::size_t timetable::station_index(const std::string& name_or_telegram_code) const
{
return m_p_impl->station_index(name_or_telegram_code);
}
std::unique_ptr<tetengo::lattice::vocabulary> timetable::create_vocabulary(const std::size_t departure_time) const
{
return m_p_impl->create_vocabulary(departure_time);
}
|
{"hexsha": "c4f146906c3a1f45ea0459a7da752a4a3c79f8ba", "size": 20788, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "sample/transfer_trains/src/timetable.cpp", "max_stars_repo_name": "tetengo/tetengo", "max_stars_repo_head_hexsha": "66e0d03635583c25be4320171f3cc1e7f40a56e6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sample/transfer_trains/src/timetable.cpp", "max_issues_repo_name": "tetengo/tetengo", "max_issues_repo_head_hexsha": "66e0d03635583c25be4320171f3cc1e7f40a56e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 41.0, "max_issues_repo_issues_event_min_datetime": "2021-06-25T14:20:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-16T02:50:50.000Z", "max_forks_repo_path": "sample/transfer_trains/src/timetable.cpp", "max_forks_repo_name": "tetengo/tetengo", "max_forks_repo_head_hexsha": "66e0d03635583c25be4320171f3cc1e7f40a56e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4744027304, "max_line_length": 119, "alphanum_fraction": 0.5392534154, "num_tokens": 4616}
|
import os
from os import listdir
from os import makedirs
from os.path import join, isdir
import json
import random
import shutil
import argparse
import numpy as np
import pandas as pd
import librosa
import sox
def filter_single_labeled(ann, inter_nodes):
# get single-labeled filenames and classes
class_to_file = dict()
file_to_class = dict()
for idx in range(len(ann)):
fname = str(ann['fname'][idx])
labels = ann['labels'][idx].split(',')
leaf_labels = list(set(labels) - set(inter_nodes))
if len(leaf_labels) == 1:
leaf_label = leaf_labels[0]
if leaf_label not in class_to_file:
class_to_file[leaf_label] = []
class_to_file[leaf_label].append(fname)
file_to_class[fname] = leaf_label
return class_to_file, file_to_class
def filter_pp_rating(ratings, vocab, inter_nodes, files):
# get all files that have a single leaf label with pp rating from all annotators
singlePP_files = []
for file in files:
mids = list(ratings[file].keys())
# convert mids to labels
labels = [vocab[1][np.where(vocab[2] == mid)[0][0]] for mid in mids if mid in vocab[2].values]
leaf_labels = list(set(labels) - set(inter_nodes))
# covert leaf_labels to mids
leaf_mids = [vocab[2][np.where(vocab[1] == label)[0][0]] for label in leaf_labels if label in vocab[1].values]
if len(leaf_labels) == 1 and all(x == 1.0 for x in ratings[file][leaf_mids[0]]):
singlePP_files.append(file)
return singlePP_files
def filter_duration(max_duration, audiopath, files):
# only keep files with duration shorter than max_duration
return [
f for f in files if librosa.get_duration(filename=join(audiopath, f+'.wav')) < max_duration
]
def filter_class_occrrences(min_occur, class_to_file):
return {cl:class_to_file[cl] for cl in class_to_file if len(class_to_file[cl]) >= min_occur}
def trim_edge_silence(audiofile, outfile, silence_threshold, min_silence_duration):
tfm = sox.Transformer()
tfm.silence(location=1, silence_threshold=silence_threshold, min_silence_duration=min_silence_duration)
tfm.silence(location=-1, silence_threshold=silence_threshold, min_silence_duration=min_silence_duration)
tfm.build(audiofile, outfile)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--fsdpath', type=str, required=True, help='path to FSD50K data folder.')
parser.add_argument('--outpath', type=str, default='./', help='path to save reorganized FSD50K audio files for the'
'following Scaper generation.')
parser.add_argument('--max_clip_duration', type=int, default=4, help='max duration for each clip in sec)')
parser.add_argument('--min_class_occurrence', type=int, default=10, help='min number of examples for a class to be included')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
# Load annotations
ann_dev = pd.read_csv(join(args.fsdpath, 'FSD50K.ground_truth', 'dev.csv'))
ann_eval = pd.read_csv(join(args.fsdpath, 'FSD50K.ground_truth', 'eval.csv'))
# Load annotation ratings
with open(join(args.fsdpath, 'FSD50K.metadata/pp_pnp_ratings_FSD50K.json')) as f:
ratings = json.load(f)
# Load vocab
vocab = pd.read_csv(join(args.fsdpath, '/FSD50K.ground_truth/vocabulary.csv'), header=None)
# Load intermediate nodes
with open("inter_nodes.json") as f:
inter_nodes = json.load(f)
# Get dictionaries with single-labeled files
class_to_file_dev, file_to_class_dev = filter_single_labeled(ann_dev, inter_nodes)
class_to_file_eval, file_to_class_eval = filter_single_labeled(ann_eval, inter_nodes)
# For dev set, only keep files with PP rating
# Didn't do this for the eval set since it has been more carefully and thoroughly curated
# according to the original FSD50k paper
PP_files_dev = filter_pp_rating(ratings, vocab, inter_nodes, files=list(file_to_class_dev.keys()))
# Get dev and eval files with duration shorter than max_duration
audiopath_dev = join(args.fsdpath, 'FSD50K.dev_audio')
audiopath_eval = join(args.fsdpath, 'FSD50K.eval_audio')
short_files_dev = filter_duration(max_duration=args.max_clip_duration, audiopath=audiopath_dev, files=PP_files_dev)
short_files_eval = filter_duration(max_duration=args.max_clip_duration, audiopath=audiopath_eval, files=list(file_to_class_eval.keys()))
# Get dictionaries with filtered files: single-labeled, PP-rating, shorter than max duration
class_to_shortPP_file_dev = {cl: list(set(class_to_file_dev[cl]) & set(short_files_dev)) for cl in class_to_file_dev}
class_to_short_file_eval = {cl: list(set(class_to_file_eval[cl]) & set(short_files_eval)) for cl in class_to_file_eval}
# Filter out rare classes
common_class_to_shortPP_file_dev = filter_class_occrrences(min_occur=args.min_class_occurrence,
class_to_file=class_to_shortPP_file_dev)
common_class_to_short_file_eval = filter_class_occrrences(min_occur=args.min_class_occurrence,
class_to_file=class_to_short_file_eval)
# Load class lists for each split
with open("all_tag.json") as f:
all_tag = json.load(f)
with open("train_tag.json") as f:
train_tag = json.load(f)
with open("val_tag.json") as f:
val_tag = json.load(f)
with open("test_tag.json") as f:
test_tag = json.load(f)
# Trim files and save to new folders where each folder is named by a class label
for cl in common_class_to_shortPP_file_dev:
if cl in train_tag:
outpath = join(args.outpath, 'foreground', 'base', 'train', str(all_tag.index(cl)))
elif cl in val_tag:
outpath = join(args.outpath, 'foreground', 'val', str(all_tag.index(cl)))
else:
outpath = join(args.outpath, 'foreground', 'test', str(all_tag.index(cl)))
if not isdir(outpath):
makedirs(outpath)
for file in common_class_to_shortPP_file_dev[cl]:
trim_edge_silence(
audiofile=join(audiopath_dev, file+ '.wav'), outfile=join(outpath, file+ '.wav'), silence_threshold=0.1, min_silence_duration=0.05
)
for cl in common_class_to_short_file_eval:
if cl in train_tag:
outpath = join(args.outpath, 'foreground', 'base', 'test', str(all_tag.index(cl)))
elif cl in val_tag:
outpath = join(args.outpath, 'foreground', 'val', str(all_tag.index(cl)))
else:
outpath = join(args.outpath, 'foreground', 'test', str(all_tag.index(cl)))
if not isdir(outpath):
makedirs(outpath)
for file in common_class_to_shortPP_file_dev[cl]:
trim_edge_silence(
audiofile=join(audiopath_eval, file+'.wav'), outfile=join(outpath, file+'.wav'), silence_threshold=0.1, min_silence_duration=0.05
)
# Split train/val examples in base classes
for cl in train_tag:
train_path = join(args.outpath, 'foreground', 'base', 'train', str(cl))
val_path = join(args.outpath, 'foreground', 'base', 'val', str(cl))
if not isdir(val_path):
makedirs(val_path)
# shuffle all files in the folder
fnames = [f for f in listdir(train_path) if '.wav' in f]
n_val = int(np.ceil(len(fnames) * 0.15))
random.shuffle(fnames)
# move a portion of files to the val folder
f_val = fnames[:n_val]
for f in f_val:
shutil.move(join(train_path, f), join(val_path, f))
|
{"hexsha": "c938cf9cdfde788518d07e07c92fd11ed679f563", "size": 7820, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/preprocess_foreground_sounds.py", "max_stars_repo_name": "wangyu/rethink-audio-fsl", "max_stars_repo_head_hexsha": "6e9626efc0fddadfe2f032e18d1794066a08c8b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-10-20T21:30:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T11:45:13.000Z", "max_issues_repo_path": "data/preprocess_foreground_sounds.py", "max_issues_repo_name": "wangyu/rethink-audio-fsl", "max_issues_repo_head_hexsha": "6e9626efc0fddadfe2f032e18d1794066a08c8b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/preprocess_foreground_sounds.py", "max_forks_repo_name": "wangyu/rethink-audio-fsl", "max_forks_repo_head_hexsha": "6e9626efc0fddadfe2f032e18d1794066a08c8b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-10-19T04:44:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T06:39:42.000Z", "avg_line_length": 42.5, "max_line_length": 146, "alphanum_fraction": 0.6717391304, "include": true, "reason": "import numpy", "num_tokens": 1925}
|
import torch
import numpy as np
from torch.nn import functional as F
import matplotlib.pylab as plt
def logsumexp(inputs, dim=None, keepdim=True):
# From: https://github.com/YosefLab/scVI/issues/13
return (inputs - F.log_softmax(inputs, dim=dim)).sum(dim, keepdim=keepdim)
class VAE(torch.nn.Module):
def __init__(self, n_input=40, n_hidden=64, n_latent=2, importance_sampling=False):
super(VAE, self).__init__()
self.importance = importance_sampling
# Encoder layers
self.enc_hidden = torch.nn.Linear(n_input, n_hidden)
self.enc_mu = torch.nn.Linear(n_hidden, n_latent)
self.enc_logvar = torch.nn.Linear(n_hidden, n_latent)
# decoder layers
self.dec_hidden = torch.nn.Linear(n_latent, n_hidden)
# Experiments with convolutional decoder
self.dec_mu = torch.nn.Linear(n_hidden, n_input)
self.dec_logvar = torch.nn.Linear(n_hidden, 1)
def encode(self, x):
h = F.relu(self.enc_hidden(x))
return self.enc_mu(h), self.enc_logvar(h)
def sample(self, mu, logvar, k=1):
batch_size, n_latent = logvar.shape
std = (0.5*logvar).exp()
eps = torch.randn(batch_size, k, n_latent, device=std.device, requires_grad=False)
return eps.mul(std.unsqueeze(1)).add(mu.unsqueeze(1))
def decode(self, z):
h = F.relu(self.dec_hidden(z))
hatx = self.dec_mu(h)
return hatx, (self.dec_logvar(h))
def forward(self, x, k=1):
enc_mu, enc_logvar = self.encode(x)
z = self.sample(enc_mu, enc_logvar, k)
dec_mu, dec_logvar = self.decode(z)
return dec_mu, dec_logvar, enc_mu, enc_logvar, z
def ELBO(self, x, dec_mu, dec_logvar, enc_mu, enc_logvar, z):
logpxz = -0.5*(dec_logvar + (x - dec_mu).pow(2)/dec_logvar.exp()).sum(dim=-1)
mc_samples = z.shape[1]
if self.importance: # Importance-Weighted autoencoder (IWAE)
logqzxpz = 0.5 * (z.pow(2) - z.sub(enc_mu.unsqueeze(1)).pow(2)/enc_logvar.unsqueeze(1).exp() - enc_logvar.unsqueeze(1)).sum(dim=-1)
else: # Variational autoencoder
logqzxpz = -0.5 * (1.0 + enc_logvar - enc_mu.pow(2) - enc_logvar.exp()).sum(dim=-1).unsqueeze_(1)
ELBO = torch.sum(logsumexp(logqzxpz - logpxz, dim=1) + np.log(mc_samples))
return ELBO, logpxz.sum()/mc_samples, logqzxpz.sum()/logqzxpz.shape[1]
|
{"hexsha": "b1c275c5ee5b3f6e8164826b38d4e3dd98bdbed7", "size": 2441, "ext": "py", "lang": "Python", "max_stars_repo_path": "vae.py", "max_stars_repo_name": "phuijse/tutorial", "max_stars_repo_head_hexsha": "0b6c6ada8509e4ceff52a2c05d962b6a82f461d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-12-13T20:32:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T21:16:58.000Z", "max_issues_repo_path": "vae.py", "max_issues_repo_name": "phuijse/tutorial_periodic_stars", "max_issues_repo_head_hexsha": "0b6c6ada8509e4ceff52a2c05d962b6a82f461d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vae.py", "max_forks_repo_name": "phuijse/tutorial_periodic_stars", "max_forks_repo_head_hexsha": "0b6c6ada8509e4ceff52a2c05d962b6a82f461d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-04-03T03:35:53.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-03T03:35:53.000Z", "avg_line_length": 42.8245614035, "max_line_length": 143, "alphanum_fraction": 0.635804998, "include": true, "reason": "import numpy", "num_tokens": 704}
|
import numpy as np
import torch
from model import Actor, Critic
class Memory():
def __init__(self,batch_size):
# init state, action, reward, state_, done
self.state = []
self.action = []
self.reward = []
self.val = []
self.prob = []
self.done = []
self.batch_size = batch_size
def get_memory(self):
self.n_states = len(self.state)
batch_st = np.arange(0, self.n_states, self.batch_size)
idx = np.arange(self.n_states, dtype=np.int16)
np.random.shuffle(idx)
batches = [idx[i:i+self.batch_size] for i in batch_st]
return np.array(self.state),\
np.array(self.action),\
np.array(self.reward),\
np.array(self.val),\
np.array(self.prob),\
np.array(self.done),\
batches
def store_memory(self, state, action, reward, val, prob, done):
self.state.append(state)
self.action.append(action)
self.reward.append(reward)
self.val.append(val)
self.prob.append(prob)
self.done.append(done)
def clear_memory(self):
self.state.clear()
self.action.clear()
self.reward.clear()
self.val.clear()
self.prob.clear()
self.done.clear()
class Agent():
def __init__(self, num_state, num_action, ep=0.2, beta=3, c1=0.1, layer_1_nodes=512, layer_2_nodes=256, batch_size=64):
self.ep = ep
self.beta = beta
self.c1 = c1
self.gamma = .99
self.g_lambda = 0.95
self.actor = Actor(num_state, num_action, layer_1_nodes, layer_2_nodes)
self.critic = Critic(num_state, layer_1_nodes, layer_2_nodes)
self.memory = Memory(batch_size)
def take_action(self,state):
state = torch.tensor([state], dtype=torch.float).to(self.actor.device)
prob_dist = self.actor(state)
value = self.critic(state)
action = prob_dist.sample()
prob = torch.squeeze(prob_dist.log_prob(action)).item()
action = torch.squeeze(action).item()
value = torch.squeeze(value).item()
return prob, action, value
def store_memory(self, state,action, prob, val, reward, done):
self.memory.store_memory(state, action, reward, val, prob, done)
def train(self):
epochs = 5
for epoch in range(epochs):
state_mem, action_mem, reward_mem, val_mem, prob_mem, done_mem, batches = self.memory.get_memory()
# Calcualte the advantage
advan = np.zeros(len(reward_mem))
for T in range(len(reward_mem)-1):
a_t = 0
discount = 1
for k in range(T, len(reward_mem)-1):
a_t += discount * (reward_mem[k] + self.gamma * val_mem[k+1]*(1-done_mem[k]) \
- val_mem[k])
discount *= self.gamma * self.g_lambda
advan[T] = a_t
advan = torch.tensor(advan).to(self.actor.device)
values = torch.tensor(val_mem).to(self.actor.device)
for batch in batches:
states = torch.tensor(state_mem[batch], dtype=torch.float).to(self.actor.device)
old_prob = torch.tensor(prob_mem[batch], dtype=torch.float).to(self.actor.device)
actions = torch.tensor(action_mem[batch], dtype=torch.float).to(self.actor.device)
# calculate r_t(theta)
dist_new = self.actor(states)
prob_new = dist_new.log_prob(actions)
r_t = prob_new.exp() / old_prob.exp()
# L_clip
prob_clip = torch.clamp(r_t, 1-self.ep, 1+self.ep) * advan[batch]
weight_prob = advan[batch] * r_t
actor_loss = -torch.min(weight_prob, prob_clip).mean()
# critic loss
V_t = torch.squeeze(self.critic(states))
V_t1 = advan[batch] + values[batch]
critic_loss = (V_t1 - V_t)**2
critic_loss = critic_loss.mean()
tot_loss = actor_loss + self.c1*critic_loss
self.actor.optim.zero_grad()
self.critic.optim.zero_grad()
tot_loss.backward()
self.actor.optim.step()
self.critic.optim.step()
self.memory.clear_memory()
|
{"hexsha": "0fb1a241e57cfb4b0dc6ecf6badc0d7aa3b82676", "size": 4476, "ext": "py", "lang": "Python", "max_stars_repo_path": "ppo.py", "max_stars_repo_name": "dkolosa/carla_pilot", "max_stars_repo_head_hexsha": "05ab3cb50788ad9a9af7c0db06d99307c7454aa6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-25T22:09:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-28T19:23:11.000Z", "max_issues_repo_path": "ppo.py", "max_issues_repo_name": "dkolosa/carla_pilot", "max_issues_repo_head_hexsha": "05ab3cb50788ad9a9af7c0db06d99307c7454aa6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-28T21:09:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-06T19:34:13.000Z", "max_forks_repo_path": "ppo.py", "max_forks_repo_name": "dkolosa/carla_pilot", "max_forks_repo_head_hexsha": "05ab3cb50788ad9a9af7c0db06d99307c7454aa6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1679389313, "max_line_length": 123, "alphanum_fraction": 0.5549597855, "include": true, "reason": "import numpy", "num_tokens": 1004}
|
from qtpy.QtGui import QGuiApplication
from qtpy.QtWidgets import QMenu
from qtpy import QtGui
import numpy as np
import logging
from __code._utilities.list_widget import ListWidget
from __code._utilities.status_message import StatusMessageStatus, show_status_message
from __code.extract_evenly_spaced_files.manual_mode_interface_handler import Interface as ManualModeInterface
from __code.extract_evenly_spaced_files.load import load_file
from __code.extract_evenly_spaced_files.statistics import Statistics
class EventHandler:
def __init__(self, parent=None):
self.parent = parent
def load_files(self):
logging.info("loading files ...")
show_status_message(parent=self.parent,
message="Loading ...",
status=StatusMessageStatus.working)
list_files = self.parent.list_of_files_that_will_be_extracted
nbr_files = len(list_files)
self.parent.eventProgress.setMaximum(nbr_files-1)
self.parent.eventProgress.setValue(0)
self.parent.eventProgress.setVisible(True)
list_data = list()
for _index, _file in enumerate(list_files):
# logging.info(f"-> loading file: {_file}")
_data = load_file(file=_file)
list_data.append(_data)
self.parent.eventProgress.setValue(_index+1)
QGuiApplication.processEvents()
self.parent.list_data = list_data
self.parent.eventProgress.setVisible(False)
logging.info(f"file loaded! np.shape(list_data): {np.shape(self.parent.list_data)}")
show_status_message(parent=self.parent,
message="Done loading!",
status=StatusMessageStatus.ready,
duration_s=5)
QGuiApplication.processEvents()
def select_first_file(self):
o_list = ListWidget(ui=self.parent.ui.list_of_files_listWidget)
o_list.select_element(row=0)
def image_selected_changed(self):
o_list = ListWidget(ui=self.parent.ui.list_of_files_listWidget)
index_file_selected = o_list.get_current_row()
data = self.parent.list_data[index_file_selected]
_view = self.parent.ui.image_view.getView()
_view_box = _view.getViewBox()
_state = _view_box.getState()
first_update = False
if self.parent.histogram_level is None:
first_update = True
_histo_widget = self.parent.ui.image_view.getHistogramWidget()
self.parent.histogram_level = _histo_widget.getLevels()
self.parent.ui.image_view.setImage(np.transpose(data))
_view_box.setState(_state)
if not first_update:
_histo_widget.setLevels(self.parent.histogram_level[0],
self.parent.histogram_level[1])
def list_files_right_click(self):
menu = QMenu(self.parent)
remove_action = menu.addAction("Remove")
if len(self.parent.basename_list_of_files_that_will_be_extracted) != len(self.parent.full_raw_list_of_files):
replace_with_action = menu.addAction("Replace with ...")
action = menu.exec_(QtGui.QCursor.pos())
if action == remove_action:
self.remove_this_file_clicked()
elif action == replace_with_action:
o_interface = ManualModeInterface(parent=self.parent)
QtGui.QGuiApplication.processEvents()
def remove_this_file_clicked(self):
o_list = ListWidget(ui=self.parent.ui.list_of_files_listWidget)
index_file_selected = o_list.get_current_row()
del self.parent.basename_list_of_files_that_will_be_extracted[index_file_selected]
del self.parent.list_data[index_file_selected]
self.parent.ui.list_of_files_listWidget.clear()
self.parent.ui.list_of_files_listWidget.addItems(self.parent.basename_list_of_files_that_will_be_extracted)
o_list.select_element(row=index_file_selected-1)
self.update_manual_interface()
o_statistics = Statistics(parent=self.parent)
o_statistics.full_update()
def update_manual_ui(self):
self.update_manual_interface(update_replace_by_list=False)
def update_manual_interface(self, update_replace_by_list=True):
if self.parent.manual_interface_id is None:
return
self.parent.manual_interface_id.update_current_image_name()
if update_replace_by_list:
self.parent.manual_interface_id.update_replace_by_list()
self.parent.manual_interface_id.display_before_image()
self.parent.manual_interface_id.display_after_image()
|
{"hexsha": "a8786f2aa206d399bb3b19cebd2d90c99fc438c0", "size": 4660, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/__code/extract_evenly_spaced_files/event_handler.py", "max_stars_repo_name": "mabrahamdevops/python_notebooks", "max_stars_repo_head_hexsha": "6d5e7383b60cc7fd476f6e85ab93e239c9c32330", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/__code/extract_evenly_spaced_files/event_handler.py", "max_issues_repo_name": "mabrahamdevops/python_notebooks", "max_issues_repo_head_hexsha": "6d5e7383b60cc7fd476f6e85ab93e239c9c32330", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/__code/extract_evenly_spaced_files/event_handler.py", "max_forks_repo_name": "mabrahamdevops/python_notebooks", "max_forks_repo_head_hexsha": "6d5e7383b60cc7fd476f6e85ab93e239c9c32330", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1596638655, "max_line_length": 117, "alphanum_fraction": 0.6984978541, "include": true, "reason": "import numpy", "num_tokens": 902}
|
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, List, Optional
import numpy as np
import torch
import texar.torch as tx
from forte.common.configuration import Config
from forte.common.resources import Resources
from forte.data.batchers import (
ProcessingBatcher, FixedSizeMultiPackProcessingBatcher)
from forte.data.multi_pack import MultiPack, MultiPackLink
from forte.data.types import DataRequest
from forte.processors.base.batch_processor import MultiPackBatchProcessor
from ft.onto.base_ontology import Sentence
logger = logging.getLogger(__name__)
__all__ = [
"TextGenerationProcessor"
]
class TextGenerationProcessor(MultiPackBatchProcessor):
def __init__(self):
super().__init__()
self.input_pack_name = None
self.output_pack_name = None
self.word_processor = None
self.model = None
self._get_helper = None
self.max_decoding_length = None
self.temperature = None
self.top_k = None
self.top_p = None
self.device = None
@staticmethod
def _define_input_info() -> DataRequest:
return {}
@staticmethod
def _define_context():
return Sentence
@staticmethod
def define_batcher() -> ProcessingBatcher:
return FixedSizeMultiPackProcessingBatcher()
def initialize(self, resources: Resources, configs: Optional[Config]):
"""
Args:
resources:
configs: A config with the following keys:
* input_pack_name: specify the input pack name of the MultiPack
to be processed
* output_pack_name: specify the output pack name of the
MultiPack to be processed
* max_decoding_length: the maximum decoding length.
* top_k
* top_p
* temperature
Returns:
"""
super().initialize(resources, configs)
if configs is not None:
self.input_pack_name = configs.input_pack_name
self.output_pack_name = configs.output_pack_name
self.max_decoding_length = configs.max_decoding_length
self.temperature = configs.temperature
self.top_k = configs.top_k
self.top_p = configs.top_p
self.model = tx.modules.GPT2Decoder(configs.pretrained_model_name)
self.device = torch.device("cuda" if torch.cuda.is_available()
else "cpu")
self.model.to(device=self.device)
resources.update(model=self.model)
self.word_processor = tx.data.GPT2Tokenizer(
pretrained_model_name=configs.pretrained_model_name)
end_token = self.word_processor.map_token_to_id("<|endoftext|>")
def _get_helper(start_tokens):
if self.top_p:
helper = tx.modules.TopPSampleEmbeddingHelper(
start_tokens=start_tokens,
end_token=end_token,
p=self.top_p,
softmax_temperature=self.temperature,
)
else:
helper = tx.modules.TopKSampleEmbeddingHelper(
start_tokens=start_tokens,
end_token=end_token,
top_k=self.top_k,
softmax_temperature=self.temperature,
)
return helper
self._get_helper = _get_helper
@torch.no_grad()
def predict(self, data_batch: Dict):
preds: Dict = {
# "srcSentId": data_batch["tid"],
# We may use this field if we want to add links on the fly
"input_sents_tids": [],
"output_sents": [],
}
preds['input_sents_tids'] += data_batch['tid']
context, context_length = self.get_batch_tensor(
data_batch["context"], device=self.device)
start_tokens = context[:, 0]
max_decoding_length = self.max_decoding_length
helper = self._get_helper(start_tokens)
output, _ = self.model(
context=context, context_sequence_length=context_length,
max_decoding_length=max_decoding_length, helper=helper)
sample_id = output.sample_id
instance_num = len(sample_id)
sentences = []
complete_sentences = []
for i in range(instance_num):
si = sample_id[i][context_length[i]:]
sentences.append(self.word_processor.map_id_to_text(si.tolist()))
si = sample_id[i]
complete_sentences.append(
self.word_processor.map_id_to_text(si.tolist()))
preds["output_sents"] += complete_sentences
return preds
def pack(self, data_pack: MultiPack, output_dict):
"""
Write the prediction results back to datapack. If :attr:`_overwrite`
is `True`, write the predicted ner to the original tokens.
Otherwise, create a new set of tokens and write the predicted ner
to the new tokens (usually use this configuration for evaluation.)
"""
assert output_dict is not None
output_pack = data_pack.get_pack(self.output_pack_name)
input_sent_tids = output_dict["input_sents_tids"]
output_sentences = output_dict["output_sents"]
text = output_pack.text
input_pack = data_pack.get_pack(self.input_pack_name)
for input_id, output_sentence in zip(input_sent_tids, output_sentences):
offset = len(output_pack.text)
sent = Sentence(output_pack, offset, offset + len(output_sentence))
text += output_sentence + "\n"
input_sent = input_pack.get_entry(input_id)
cross_link = MultiPackLink(data_pack, input_sent, sent)
data_pack.add_entry(cross_link)
# We may also consider adding two link with opposite directions
# Here the unidirectional link indicates the generation dependency
output_pack.set_text(text)
def get_batch_tensor(self, data: List, device):
"""
Args:
data: A list of strings(sentences)
device:
Returns:
"""
batch_size = len(data)
batch_tokens = [self.word_processor.map_text_to_token(sent)
for sent in data]
batch_length = max([len(d) for d in batch_tokens])
wid_inputs = np.empty([batch_size, batch_length], dtype=np.int64)
lengths = np.empty(batch_size, dtype=np.int64)
for i, inst in enumerate(batch_tokens):
wids = inst
inst_size = len(wids)
lengths[i] = inst_size
# word ids
wid_inputs[i, :inst_size] = \
self.word_processor.map_token_to_id(wids)
wid_inputs[i, inst_size:] = 0
# The existence of length will mask these padding positions out
# So we just set the padding value as 0,
# which could be any in-range integers
words = torch.from_numpy(wid_inputs).to(device)
lengths = torch.from_numpy(lengths).to(device)
return words, lengths
@classmethod
def default_configs(cls):
config = super().default_configs()
config.update(
{
'max_decoding_length': 128,
'temperature': 0.7,
'top_p': None,
'top_k': 40,
'pretrained_model_name': "117M",
'checkpoint': None,
'input_pack_name': None,
'output_pack_name': None,
'selector': {
'type': 'forte.data.selector.DummySelector',
'args': None,
'kwargs': {}
},
}
)
return config
|
{"hexsha": "d1506fa0db5e872b7dc14e6af47dc1ca5e844a86", "size": 8401, "ext": "py", "lang": "Python", "max_stars_repo_path": "forte/processors/text_generation_processor.py", "max_stars_repo_name": "swapnull7/forte", "max_stars_repo_head_hexsha": "737a72afd440d40c3826c3a7c5e4e44235c0f701", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-01T12:07:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T03:57:18.000Z", "max_issues_repo_path": "forte/processors/text_generation_processor.py", "max_issues_repo_name": "swapnull7/forte", "max_issues_repo_head_hexsha": "737a72afd440d40c3826c3a7c5e4e44235c0f701", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "forte/processors/text_generation_processor.py", "max_forks_repo_name": "swapnull7/forte", "max_forks_repo_head_hexsha": "737a72afd440d40c3826c3a7c5e4e44235c0f701", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2897959184, "max_line_length": 80, "alphanum_fraction": 0.6138554934, "include": true, "reason": "import numpy", "num_tokens": 1695}
|
// Copyright (C) 2016-2018 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//[ lazy_vector
// Defining this allows the assignment below of an expression to a double
// without writing any specific code to do so.
#include <boost/yap/expression.hpp>
#include <algorithm>
#include <cassert>
#include <iostream>
#include <vector>
template <boost::yap::expr_kind Kind, typename Tuple>
struct lazy_vector_expr;
// This transform turns a terminal of std::vector<double> into a terminal
// containing the nth double in that vector. Think of it as turning our
// expression of vectors into an expression of scalars.
struct take_nth
{
boost::yap::terminal<lazy_vector_expr, double>
operator() (boost::yap::terminal<lazy_vector_expr, std::vector<double>> const & expr);
std::size_t n;
};
// A custom expression template that defines lazy + and - operators that
// produce expressions, and an eager [] operator that returns the nth element
// of the expression.
//[ lazy_vector_decl
template <boost::yap::expr_kind Kind, typename Tuple>
struct lazy_vector_expr
{
static const boost::yap::expr_kind kind = Kind;
Tuple elements;
// Note that this does not return an expression; it is greedily evaluated.
auto operator[] (std::size_t n) const;
};
BOOST_YAP_USER_BINARY_OPERATOR(plus, lazy_vector_expr, lazy_vector_expr)
BOOST_YAP_USER_BINARY_OPERATOR(minus, lazy_vector_expr, lazy_vector_expr)
//]
template <boost::yap::expr_kind Kind, typename Tuple>
auto lazy_vector_expr<Kind, Tuple>::operator[] (std::size_t n) const
{ return boost::yap::evaluate(boost::yap::transform(*this, take_nth{n})); }
boost::yap::terminal<lazy_vector_expr, double>
take_nth::operator() (boost::yap::terminal<lazy_vector_expr, std::vector<double>> const & expr)
{
double x = boost::yap::value(expr)[n];
// This move is something of a hack; we're forcing Yap to take a copy of x
// by using std::move(). The move indicates that the terminal should keep
// the value of x (since, being an rvalue, it may be a temporary), rather
// than a reference to x. See the "How Expression Operands Are Treated"
// section of the tutorial for details.
return boost::yap::make_terminal<lazy_vector_expr, double>(std::move(x));
}
// In order to define the += operator with the semantics we want, it's
// convenient to derive a terminal type from a terminal instantiation of
// lazy_vector_expr. Note that we could have written a template
// specialization here instead -- either one would work. That would of course
// have required more typing.
struct lazy_vector :
lazy_vector_expr<
boost::yap::expr_kind::terminal,
boost::hana::tuple<std::vector<double>>
>
{
lazy_vector () {}
explicit lazy_vector (std::vector<double> && vec)
{ elements = boost::hana::tuple<std::vector<double>>(std::move(vec)); }
template <boost::yap::expr_kind Kind, typename Tuple>
lazy_vector & operator+= (lazy_vector_expr<Kind, Tuple> const & rhs)
{
std::vector<double> & this_vec = boost::yap::value(*this);
for (int i = 0, size = (int)this_vec.size(); i < size; ++i) {
this_vec[i] += rhs[i];
}
return *this;
}
};
int main ()
{
lazy_vector v1{std::vector<double>(4, 1.0)};
lazy_vector v2{std::vector<double>(4, 2.0)};
lazy_vector v3{std::vector<double>(4, 3.0)};
double d1 = (v2 + v3)[2];
std::cout << d1 << "\n";
v1 += v2 - v3;
std::cout << '{' << v1[0] << ',' << v1[1]
<< ',' << v1[2] << ',' << v1[3] << '}' << "\n";
// This expression is disallowed because it does not conform to the
// implicit grammar. operator+= is only defined on terminals, not
// arbitrary expressions.
// (v2 + v3) += v1;
return 0;
}
//]
|
{"hexsha": "70c89d440de7bec636b50da60ed0012a82312efd", "size": 4022, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/yap/example/lazy_vector.cpp", "max_stars_repo_name": "Talustus/boost_src", "max_stars_repo_head_hexsha": "ffe074de008f6e8c46ae1f431399cf932164287f", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 32.0, "max_stars_repo_stars_event_min_datetime": "2019-02-27T06:57:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-29T10:56:19.000Z", "max_issues_repo_path": "third_party/boost/libs/yap/example/lazy_vector.cpp", "max_issues_repo_name": "avplayer/cxxrpc", "max_issues_repo_head_hexsha": "7049b4079fac78b3828e68f787d04d699ce52f6d", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-04-04T18:00:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-04T18:00:00.000Z", "max_forks_repo_path": "third_party/boost/libs/yap/example/lazy_vector.cpp", "max_forks_repo_name": "avplayer/cxxrpc", "max_forks_repo_head_hexsha": "7049b4079fac78b3828e68f787d04d699ce52f6d", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2019-08-20T13:45:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T18:23:49.000Z", "avg_line_length": 34.9739130435, "max_line_length": 96, "alphanum_fraction": 0.658378916, "num_tokens": 1036}
|
[STATEMENT]
lemma WS_silent_move:
assumes "S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')" and "((ms\<^sub>1,s\<^sub>1),(ms\<^sub>2,s\<^sub>2)) \<in> WS S"
shows "((ms\<^sub>1',s\<^sub>1'),(ms\<^sub>2,s\<^sub>2)) \<in> WS S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>((ms\<^sub>1,s\<^sub>1),(ms\<^sub>2,s\<^sub>2)) \<in> WS S\<close>
[PROOF STATE]
proof (chain)
picking this:
((ms\<^sub>1, s\<^sub>1), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
obtain msx mx
where WSE:"\<forall>m \<in> set ms\<^sub>1. valid_node m" "\<forall>m \<in> set ms\<^sub>2. valid_node m"
"length ms\<^sub>1 = length s\<^sub>1" "length ms\<^sub>2 = length s\<^sub>2" "s\<^sub>1 \<noteq> []" "s\<^sub>2 \<noteq> []"
"ms\<^sub>1 = msx@mx#tl ms\<^sub>2" "get_proc mx = get_proc (hd ms\<^sub>2)"
"\<forall>m \<in> set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
"msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)"
"\<forall>m \<in> set (tl ms\<^sub>1). return_node m"
"\<forall>i < length ms\<^sub>2. snd (s\<^sub>1!(length msx + i)) = snd (s\<^sub>2!i)"
"\<forall>i < length ms\<^sub>2. \<forall>V \<in> rv S (CFG_node ((mx#tl ms\<^sub>2)!i)).
(fst (s\<^sub>1!(length msx + i))) V = (fst (s\<^sub>2!i)) V"
"obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
((ms\<^sub>1, s\<^sub>1), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal (1 subgoal):
1. (\<And>msx mx. \<lbrakk>Ball (set ms\<^sub>1) valid_node; Ball (set ms\<^sub>2) valid_node; length ms\<^sub>1 = length s\<^sub>1; length ms\<^sub>2 = length s\<^sub>2; s\<^sub>1 \<noteq> []; s\<^sub>2 \<noteq> []; ms\<^sub>1 = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms\<^sub>1)) return_node; \<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(fastforce elim:WS.cases)
[PROOF STATE]
proof (state)
this:
Ball (set ms\<^sub>1) valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1 = length s\<^sub>1
length ms\<^sub>2 = length s\<^sub>2
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
Ball (set (tl ms\<^sub>1)) return_node
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
Ball (set ms\<^sub>1) valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1 = length s\<^sub>1
length ms\<^sub>2 = length s\<^sub>2
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
Ball (set (tl ms\<^sub>1)) return_node
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
assume "\<forall>m \<in> set (tl ms\<^sub>1'). return_node m"
[PROOF STATE]
proof (state)
this:
Ball (set (tl ms\<^sub>1')) return_node
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
have "obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
proof(cases "obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
2. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}
goal (2 subgoals):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
2. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
with \<open>S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')\<close>
[PROOF STATE]
proof (chain)
picking this:
S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}
[PROOF STEP]
have "obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}"
[PROOF STATE]
proof (prove)
using this:
S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}
goal (1 subgoal):
1. obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}
[PROOF STEP]
by(rule silent_move_empty_obs_slice)
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}
goal (2 subgoals):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
2. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
with \<open>obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}\<close>
[PROOF STATE]
proof (chain)
picking this:
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {}
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {}
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
from this \<open>\<forall>m \<in> set (tl ms\<^sub>1'). return_node m\<close>
[PROOF STATE]
proof (chain)
picking this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {}
Ball (set (tl ms\<^sub>1')) return_node
[PROOF STEP]
obtain ms' where "obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}"
[PROOF STATE]
proof (prove)
using this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {}
Ball (set (tl ms\<^sub>1')) return_node
goal (1 subgoal):
1. (\<And>ms'. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'} \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(fastforce dest:obs_singleton_element)
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
hence "ms' \<in> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}
goal (1 subgoal):
1. ms' \<in> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
ms' \<in> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
from \<open>S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')\<close> \<open>ms' \<in> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>\<forall>m \<in> set (tl ms\<^sub>1'). return_node m\<close>
[PROOF STATE]
proof (chain)
picking this:
S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')
ms' \<in> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1')) return_node
[PROOF STEP]
have "ms' \<in> obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')
ms' \<in> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1')) return_node
goal (1 subgoal):
1. ms' \<in> obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by(fastforce intro:silent_move_obs_slice)
[PROOF STATE]
proof (state)
this:
ms' \<in> obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
from this \<open>\<forall>m \<in> set (tl ms\<^sub>1). return_node m\<close>
[PROOF STATE]
proof (chain)
picking this:
ms' \<in> obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1)) return_node
[PROOF STEP]
have "obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}"
[PROOF STATE]
proof (prove)
using this:
ms' \<in> obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1)) return_node
goal (1 subgoal):
1. obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}
[PROOF STEP]
by(rule obs_singleton_element)
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<noteq> {} \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
with \<open>obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}\<close>
\<open>obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (chain)
picking this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = {ms'}
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
Ball (set (tl ms\<^sub>1')) return_node \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')\<close> WSE
[PROOF STATE]
proof (chain)
picking this:
S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')
Ball (set ms\<^sub>1) valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1 = length s\<^sub>1
length ms\<^sub>2 = length s\<^sub>2
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
Ball (set (tl ms\<^sub>1)) return_node
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1')) return_node \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
S,kind \<turnstile> (ms\<^sub>1,s\<^sub>1) -a\<rightarrow>\<^sub>\<tau> (ms\<^sub>1',s\<^sub>1')
Ball (set ms\<^sub>1) valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1 = length s\<^sub>1
length ms\<^sub>2 = length s\<^sub>2
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
Ball (set (tl ms\<^sub>1)) return_node
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1')) return_node \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
proof(induct S f\<equiv>"kind" ms\<^sub>1 s\<^sub>1 a ms\<^sub>1' s\<^sub>1' rule:silent_move.induct)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
case (silent_move_intra a s\<^sub>1 s\<^sub>1' ms\<^sub>1 S ms\<^sub>1')
[PROOF STATE]
proof (state)
this:
pred (kind a) s\<^sub>1
transfer (kind a) s\<^sub>1 = s\<^sub>1'
valid_edge a
intra_kind (kind a)
(\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1)) return_node
length s\<^sub>1' = length s\<^sub>1
length ms\<^sub>1 = length s\<^sub>1
hd ms\<^sub>1 = sourcenode a
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
Ball (set ms\<^sub>1) valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1 = length s\<^sub>1
length ms\<^sub>2 = length s\<^sub>2
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
Ball (set (tl ms\<^sub>1)) return_node
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1')) return_node \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
note obs_eq = \<open>\<forall>a\<in>set (tl ms\<^sub>1'). return_node a \<Longrightarrow>
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (state)
this:
Ball (set (tl ms\<^sub>1')) return_node \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>s\<^sub>1 \<noteq> []\<close> \<open>s\<^sub>2 \<noteq> []\<close>
[PROOF STATE]
proof (chain)
picking this:
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
[PROOF STEP]
obtain cf\<^sub>1 cfs\<^sub>1 cf\<^sub>2 cfs\<^sub>2 where [simp]:"s\<^sub>1 = cf\<^sub>1#cfs\<^sub>1"
and [simp]:"s\<^sub>2 = cf\<^sub>2#cfs\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
goal (1 subgoal):
1. (\<And>cf\<^sub>1 cfs\<^sub>1 cf\<^sub>2 cfs\<^sub>2. \<lbrakk>s\<^sub>1 = cf\<^sub>1 # cfs\<^sub>1; s\<^sub>2 = cf\<^sub>2 # cfs\<^sub>2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(cases s\<^sub>1,auto,cases s\<^sub>2,fastforce+)
[PROOF STATE]
proof (state)
this:
s\<^sub>1 = cf\<^sub>1 # cfs\<^sub>1
s\<^sub>2 = cf\<^sub>2 # cfs\<^sub>2
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>transfer (kind a) s\<^sub>1 = s\<^sub>1'\<close> \<open>intra_kind (kind a)\<close>
[PROOF STATE]
proof (chain)
picking this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
intra_kind (kind a)
[PROOF STEP]
obtain cf\<^sub>1' where [simp]:"s\<^sub>1' = cf\<^sub>1'#cfs\<^sub>1"
[PROOF STATE]
proof (prove)
using this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
intra_kind (kind a)
goal (1 subgoal):
1. (\<And>cf\<^sub>1'. s\<^sub>1' = cf\<^sub>1' # cfs\<^sub>1 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(cases cf\<^sub>1,cases "kind a",auto simp:intra_kind_def)
[PROOF STATE]
proof (state)
this:
s\<^sub>1' = cf\<^sub>1' # cfs\<^sub>1
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>m \<in> set ms\<^sub>1. valid_node m\<close> \<open>ms\<^sub>1' = targetnode a # tl ms\<^sub>1\<close> \<open>valid_edge a\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set ms\<^sub>1) valid_node
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
valid_edge a
[PROOF STEP]
have "\<forall>m \<in> set ms\<^sub>1'. valid_node m"
[PROOF STATE]
proof (prove)
using this:
Ball (set ms\<^sub>1) valid_node
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
valid_edge a
goal (1 subgoal):
1. Ball (set ms\<^sub>1') valid_node
[PROOF STEP]
by(cases ms\<^sub>1) auto
[PROOF STATE]
proof (state)
this:
Ball (set ms\<^sub>1') valid_node
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>length ms\<^sub>1 = length s\<^sub>1\<close> \<open>length s\<^sub>1' = length s\<^sub>1\<close>
\<open>ms\<^sub>1' = targetnode a # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
length ms\<^sub>1 = length s\<^sub>1
length s\<^sub>1' = length s\<^sub>1
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
[PROOF STEP]
have "length ms\<^sub>1' = length s\<^sub>1'"
[PROOF STATE]
proof (prove)
using this:
length ms\<^sub>1 = length s\<^sub>1
length s\<^sub>1' = length s\<^sub>1
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
goal (1 subgoal):
1. length ms\<^sub>1' = length s\<^sub>1'
[PROOF STEP]
by(cases ms\<^sub>1) auto
[PROOF STATE]
proof (state)
this:
length ms\<^sub>1' = length s\<^sub>1'
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>m \<in> set (tl ms\<^sub>1). return_node m\<close> \<open>ms\<^sub>1' = targetnode a # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set (tl ms\<^sub>1)) return_node
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
[PROOF STEP]
have "\<forall>m \<in> set (tl ms\<^sub>1'). return_node m"
[PROOF STATE]
proof (prove)
using this:
Ball (set (tl ms\<^sub>1)) return_node
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
goal (1 subgoal):
1. Ball (set (tl ms\<^sub>1')) return_node
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Ball (set (tl ms\<^sub>1')) return_node
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from obs_eq[OF this]
[PROOF STATE]
proof (chain)
picking this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
have "obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>i < length ms\<^sub>2. \<forall>V \<in> rv S (CFG_node ((mx#tl ms\<^sub>2)!i)).
(fst (s\<^sub>1!(length msx + i))) V = (fst (s\<^sub>2!i)) V\<close> \<open>length ms\<^sub>2 = length s\<^sub>2\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
length ms\<^sub>2 = length s\<^sub>2
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
length ms\<^sub>2 = length s\<^sub>2
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
[PROOF STEP]
by(cases ms\<^sub>2) auto
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
goal (3 subgoals):
1. \<And>a s s' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; intra_kind (kind a); (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length s' = length s; length ms = length s; hd ms = sourcenode a; ms' = targetnode a # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
3. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
proof(cases msx)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
msx = []
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>ms\<^sub>1 = msx@mx#tl ms\<^sub>2\<close> \<open>hd ms\<^sub>1 = sourcenode a\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = []
[PROOF STEP]
have [simp]:"mx = sourcenode a" and [simp]:"tl ms\<^sub>1 = tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = []
goal (1 subgoal):
1. mx = sourcenode a &&& tl ms\<^sub>1 = tl ms\<^sub>2
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
mx = sourcenode a
tl ms\<^sub>1 = tl ms\<^sub>2
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>(\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or>
hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
(\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
have "hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
(\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>hd ms\<^sub>1 = sourcenode a\<close>
[PROOF STATE]
proof (chain)
picking this:
hd ms\<^sub>1 = sourcenode a
hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
have "sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
hd ms\<^sub>1 = sourcenode a
hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>ms\<^sub>1' = targetnode a # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
[PROOF STEP]
have "ms\<^sub>1' = [] @ targetnode a # tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
goal (1 subgoal):
1. ms\<^sub>1' = [] @ targetnode a # tl ms\<^sub>2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ms\<^sub>1' = [] @ targetnode a # tl ms\<^sub>2
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>valid_edge a\<close> \<open>intra_kind(kind a)\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
intra_kind (kind a)
[PROOF STEP]
have "get_proc (sourcenode a) = get_proc (targetnode a)"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
intra_kind (kind a)
goal (1 subgoal):
1. get_proc (sourcenode a) = get_proc (targetnode a)
[PROOF STEP]
by(rule get_proc_intra)
[PROOF STATE]
proof (state)
this:
get_proc (sourcenode a) = get_proc (targetnode a)
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>get_proc mx = get_proc (hd ms\<^sub>2)\<close>
[PROOF STATE]
proof (chain)
picking this:
get_proc mx = get_proc (hd ms\<^sub>2)
get_proc (sourcenode a) = get_proc (targetnode a)
[PROOF STEP]
have "get_proc (targetnode a) = get_proc (hd ms\<^sub>2)"
[PROOF STATE]
proof (prove)
using this:
get_proc mx = get_proc (hd ms\<^sub>2)
get_proc (sourcenode a) = get_proc (targetnode a)
goal (1 subgoal):
1. get_proc (targetnode a) = get_proc (hd ms\<^sub>2)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
get_proc (targetnode a) = get_proc (hd ms\<^sub>2)
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>transfer (kind a) s\<^sub>1 = s\<^sub>1'\<close> \<open>intra_kind (kind a)\<close>
[PROOF STATE]
proof (chain)
picking this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
intra_kind (kind a)
[PROOF STEP]
have "snd cf\<^sub>1' = snd cf\<^sub>1"
[PROOF STATE]
proof (prove)
using this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
intra_kind (kind a)
goal (1 subgoal):
1. snd cf\<^sub>1' = snd cf\<^sub>1
[PROOF STEP]
by(auto simp:intra_kind_def)
[PROOF STATE]
proof (state)
this:
snd cf\<^sub>1' = snd cf\<^sub>1
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)\<close> Nil
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = []
snd cf\<^sub>1' = snd cf\<^sub>1
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! i) = snd (s\<^sub>2 ! i)"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = []
snd cf\<^sub>1' = snd cf\<^sub>1
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! i) = snd (s\<^sub>2 ! i)
[PROOF STEP]
by auto(case_tac i,auto)
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! i) = snd (s\<^sub>2 ! i)
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
have "\<forall>V \<in> rv S (CFG_node (targetnode a)). fst cf\<^sub>1' V = fst cf\<^sub>2 V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node (targetnode a)). fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
fix V
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
assume "V \<in> rv S (CFG_node (targetnode a))"
[PROOF STATE]
proof (state)
this:
V \<in> rv S (CFG_node (targetnode a))
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
from \<open>valid_edge a\<close> \<open>intra_kind (kind a)\<close> \<open>sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
intra_kind (kind a)
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
have "obs_intra (targetnode a) \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> =
obs_intra (sourcenode a) \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
intra_kind (kind a)
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. obs_intra (targetnode a) \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs_intra (sourcenode a) \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by(rule edge_obs_intra_slice_eq)
[PROOF STATE]
proof (state)
this:
obs_intra (targetnode a) \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs_intra (sourcenode a) \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
hence "rv S (CFG_node (targetnode a)) = rv S (CFG_node (sourcenode a))"
[PROOF STATE]
proof (prove)
using this:
obs_intra (targetnode a) \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs_intra (sourcenode a) \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. rv S (CFG_node (targetnode a)) = rv S (CFG_node (sourcenode a))
[PROOF STEP]
by(rule closed_eq_obs_eq_rvs')
[PROOF STATE]
proof (state)
this:
rv S (CFG_node (targetnode a)) = rv S (CFG_node (sourcenode a))
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>V \<in> rv S (CFG_node (targetnode a))\<close>
[PROOF STATE]
proof (chain)
picking this:
V \<in> rv S (CFG_node (targetnode a))
rv S (CFG_node (targetnode a)) = rv S (CFG_node (sourcenode a))
[PROOF STEP]
have "V \<in> rv S (CFG_node (sourcenode a))"
[PROOF STATE]
proof (prove)
using this:
V \<in> rv S (CFG_node (targetnode a))
rv S (CFG_node (targetnode a)) = rv S (CFG_node (sourcenode a))
goal (1 subgoal):
1. V \<in> rv S (CFG_node (sourcenode a))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
V \<in> rv S (CFG_node (sourcenode a))
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
V \<in> rv S (CFG_node (sourcenode a))
[PROOF STEP]
obtain as n' where "sourcenode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
and "n' \<in> HRB_slice S" and "V \<in> Use\<^bsub>SDG\<^esub> n'"
and "\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as)
\<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''"
[PROOF STATE]
proof (prove)
using this:
V \<in> rv S (CFG_node (sourcenode a))
goal (1 subgoal):
1. (\<And>as n'. \<lbrakk>sourcenode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'; n' \<in> HRB_slice S; V \<in> Use\<^bsub>SDG\<^esub> n'; \<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(fastforce elim:rvE)
[PROOF STATE]
proof (state)
this:
sourcenode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'
n' \<in> HRB_slice S
V \<in> Use\<^bsub>SDG\<^esub> n'
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close> \<open>valid_edge a\<close>
[PROOF STATE]
proof (chain)
picking this:
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
valid_edge a
sourcenode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'
n' \<in> HRB_slice S
V \<in> Use\<^bsub>SDG\<^esub> n'
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
[PROOF STEP]
have "V \<notin> Def\<^bsub>SDG\<^esub> (CFG_node (sourcenode a))"
[PROOF STATE]
proof (prove)
using this:
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
valid_edge a
sourcenode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'
n' \<in> HRB_slice S
V \<in> Use\<^bsub>SDG\<^esub> n'
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
goal (1 subgoal):
1. V \<notin> Def\<^bsub>SDG\<^esub> CFG_node (sourcenode a)
[PROOF STEP]
apply(clarsimp simp:intra_path_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; valid_edge a; n' \<in> HRB_slice S; V \<in> Use\<^bsub>SDG\<^esub> n'; \<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''; sourcenode a -as\<rightarrow>* parent_node n'; \<forall>a\<in>set as. intra_kind (kind a); V \<in> Def\<^bsub>SDG\<^esub> CFG_node (sourcenode a)\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
apply(erule path.cases)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>n. \<lbrakk>sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; valid_edge a; n' \<in> HRB_slice S; V \<in> Use\<^bsub>SDG\<^esub> n'; \<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''; \<forall>a\<in>set as. intra_kind (kind a); V \<in> Def\<^bsub>SDG\<^esub> CFG_node (sourcenode a); sourcenode a = n; as = []; parent_node n' = n; valid_node n\<rbrakk> \<Longrightarrow> False
2. \<And>n'' asa n'a a n. \<lbrakk>sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; valid_edge a; n' \<in> HRB_slice S; V \<in> Use\<^bsub>SDG\<^esub> n'; \<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''; \<forall>a\<in>set as. intra_kind (kind a); V \<in> Def\<^bsub>SDG\<^esub> CFG_node (sourcenode a); sourcenode a = n; as = a # asa; parent_node n' = n'a; n'' -asa\<rightarrow>* n'a; valid_edge a; sourcenode a = n; targetnode a = n''\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
by(auto dest:valid_SDG_node_in_slice_parent_node_in_slice
simp:sourcenodes_def SDG_to_CFG_set_def)
[PROOF STATE]
proof (state)
this:
V \<notin> Def\<^bsub>SDG\<^esub> CFG_node (sourcenode a)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
from \<open>valid_edge a\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
[PROOF STEP]
have "valid_node (sourcenode a)"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
goal (1 subgoal):
1. valid_node (sourcenode a)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
valid_node (sourcenode a)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>V \<notin> Def\<^bsub>SDG\<^esub> (CFG_node (sourcenode a))\<close>
[PROOF STATE]
proof (chain)
picking this:
V \<notin> Def\<^bsub>SDG\<^esub> CFG_node (sourcenode a)
valid_node (sourcenode a)
[PROOF STEP]
have "V \<notin> Def (sourcenode a)"
[PROOF STATE]
proof (prove)
using this:
V \<notin> Def\<^bsub>SDG\<^esub> CFG_node (sourcenode a)
valid_node (sourcenode a)
goal (1 subgoal):
1. V \<notin> Def (sourcenode a)
[PROOF STEP]
by(fastforce intro:CFG_Def_SDG_Def valid_SDG_CFG_node)
[PROOF STATE]
proof (state)
this:
V \<notin> Def (sourcenode a)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>valid_edge a\<close> \<open>intra_kind (kind a)\<close> \<open>pred (kind a) s\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
intra_kind (kind a)
pred (kind a) s\<^sub>1
V \<notin> Def (sourcenode a)
[PROOF STEP]
have "state_val (transfer (kind a) s\<^sub>1) V = state_val s\<^sub>1 V"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
intra_kind (kind a)
pred (kind a) s\<^sub>1
V \<notin> Def (sourcenode a)
goal (1 subgoal):
1. state_val (transfer (kind a) s\<^sub>1) V = state_val s\<^sub>1 V
[PROOF STEP]
by(fastforce intro:CFG_intra_edge_no_Def_equal)
[PROOF STATE]
proof (state)
this:
state_val (transfer (kind a) s\<^sub>1) V = state_val s\<^sub>1 V
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>transfer (kind a) s\<^sub>1 = s\<^sub>1'\<close>
[PROOF STATE]
proof (chain)
picking this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
state_val (transfer (kind a) s\<^sub>1) V = state_val s\<^sub>1 V
[PROOF STEP]
have "fst cf\<^sub>1' V = fst cf\<^sub>1 V"
[PROOF STATE]
proof (prove)
using this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
state_val (transfer (kind a) s\<^sub>1) V = state_val s\<^sub>1 V
goal (1 subgoal):
1. fst cf\<^sub>1' V = fst cf\<^sub>1 V
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
fst cf\<^sub>1' V = fst cf\<^sub>1 V
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
from \<open>V \<in> rv S (CFG_node (sourcenode a))\<close> \<open>msx = []\<close>
\<open>\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V\<close>
[PROOF STATE]
proof (chain)
picking this:
V \<in> rv S (CFG_node (sourcenode a))
msx = []
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
[PROOF STEP]
have "fst cf\<^sub>1 V = fst cf\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
V \<in> rv S (CFG_node (sourcenode a))
msx = []
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
goal (1 subgoal):
1. fst cf\<^sub>1 V = fst cf\<^sub>2 V
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
fst cf\<^sub>1 V = fst cf\<^sub>2 V
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a)) \<Longrightarrow> fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>fst cf\<^sub>1' V = fst cf\<^sub>1 V\<close>
[PROOF STATE]
proof (chain)
picking this:
fst cf\<^sub>1' V = fst cf\<^sub>1 V
fst cf\<^sub>1 V = fst cf\<^sub>2 V
[PROOF STEP]
show "fst cf\<^sub>1' V = fst cf\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
fst cf\<^sub>1' V = fst cf\<^sub>1 V
fst cf\<^sub>1 V = fst cf\<^sub>2 V
goal (1 subgoal):
1. fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
fst cf\<^sub>1' V = fst cf\<^sub>2 V
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node (targetnode a)). fst cf\<^sub>1' V = fst cf\<^sub>2 V
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)).
(fst (s\<^sub>1 ! (length msx + i))) V = (fst (s\<^sub>2 ! i)) V\<close> Nil
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = []
\<forall>V\<in>rv S (CFG_node (targetnode a)). fst cf\<^sub>1' V = fst cf\<^sub>2 V
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a # tl ms\<^sub>2) ! i)).
(fst (s\<^sub>1' ! (length [] + i))) V = (fst (s\<^sub>2 ! i)) V"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = []
\<forall>V\<in>rv S (CFG_node (targetnode a)). fst cf\<^sub>1' V = fst cf\<^sub>2 V
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length [] + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
by auto (case_tac i,auto)
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length [] + i)) V = fst (s\<^sub>2 ! i) V
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>m \<in> set ms\<^sub>1'. valid_node m\<close> \<open>\<forall>m \<in> set ms\<^sub>2. valid_node m\<close>
\<open>length ms\<^sub>1' = length s\<^sub>1'\<close> \<open>length ms\<^sub>2 = length s\<^sub>2\<close>
\<open>ms\<^sub>1' = [] @ targetnode a # tl ms\<^sub>2\<close>
\<open>get_proc (targetnode a) = get_proc (hd ms\<^sub>2)\<close>
\<open>\<forall>m \<in> set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>\<forall>m \<in> set (tl ms\<^sub>1). return_node m\<close>
\<open>obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! i) = snd (s\<^sub>2 ! i)\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
ms\<^sub>1' = [] @ targetnode a # tl ms\<^sub>2
get_proc (targetnode a) = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1)) return_node
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! i) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length [] + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
ms\<^sub>1' = [] @ targetnode a # tl ms\<^sub>2
get_proc (targetnode a) = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1)) return_node
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! i) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length [] + i)) V = fst (s\<^sub>2 ! i) V
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
by(auto intro!:WSI)
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
case (Cons mx' msx')
[PROOF STATE]
proof (state)
this:
msx = mx' # msx'
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>ms\<^sub>1 = msx@mx#tl ms\<^sub>2\<close> \<open>hd ms\<^sub>1 = sourcenode a\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = mx' # msx'
[PROOF STEP]
have [simp]:"mx' = sourcenode a" and [simp]:"tl ms\<^sub>1 = msx'@mx#tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = mx' # msx'
goal (1 subgoal):
1. mx' = sourcenode a &&& tl ms\<^sub>1 = msx' @ mx # tl ms\<^sub>2
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
mx' = sourcenode a
tl ms\<^sub>1 = msx' @ mx # tl ms\<^sub>2
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>ms\<^sub>1' = targetnode a # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
[PROOF STEP]
have "ms\<^sub>1' = ((targetnode a)#msx')@mx#tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
goal (1 subgoal):
1. ms\<^sub>1' = (targetnode a # msx') @ mx # tl ms\<^sub>2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ms\<^sub>1' = (targetnode a # msx') @ mx # tl ms\<^sub>2
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
[PROOF STEP]
have rv:"\<forall>V\<in>rv S (CFG_node mx).
(fst (s\<^sub>1' ! length (targetnode a#msx'))) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length (targetnode a # msx')) V = state_val s\<^sub>2 V
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length (targetnode a # msx')) V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>ms\<^sub>1 = msx@mx#tl ms\<^sub>2\<close> Cons \<open>ms\<^sub>1' = targetnode a # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
msx = mx' # msx'
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
[PROOF STEP]
have "ms\<^sub>1' = ((targetnode a)#msx')@mx#tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
msx = mx' # msx'
ms\<^sub>1' = targetnode a # tl ms\<^sub>1
goal (1 subgoal):
1. ms\<^sub>1' = (targetnode a # msx') @ mx # tl ms\<^sub>2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ms\<^sub>1' = (targetnode a # msx') @ mx # tl ms\<^sub>2
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = mx' # msx'
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx + i)) = snd (s\<^sub>2 ! i)"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = mx' # msx'
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx + i)) = snd (s\<^sub>2 ! i)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx + i)) = snd (s\<^sub>2 ! i)
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1' ! length msx)) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx) V = state_val s\<^sub>2 V
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx) V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>i < length ms\<^sub>2. \<forall>V \<in> rv S (CFG_node ((mx#tl ms\<^sub>2)!i)).
(fst (s\<^sub>1!(length msx + i))) V = (fst (s\<^sub>2!i)) V\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx) V = state_val s\<^sub>2 V
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2)!i)).
(fst (s\<^sub>1'!(length (targetnode a # msx') + i))) V = (fst (s\<^sub>2!i)) V"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx) V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length (targetnode a # msx') + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
by clarsimp
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length (targetnode a # msx') + i)) V = fst (s\<^sub>2 ! i) V
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>m\<in>set ms\<^sub>1'. valid_node m\<close> \<open>\<forall>m\<in>set ms\<^sub>2. valid_node m\<close>
\<open>length ms\<^sub>1' = length s\<^sub>1'\<close> \<open>length ms\<^sub>2 = length s\<^sub>2\<close>
\<open>ms\<^sub>1' = ((targetnode a)#msx')@mx#tl ms\<^sub>2\<close>
\<open>\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>\<forall>m \<in> set (tl ms\<^sub>1'). return_node m\<close> \<open>get_proc mx = get_proc (hd ms\<^sub>2)\<close>
\<open>msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)\<close>
\<open>obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close> Cons
\<open>\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx + i)) = snd (s\<^sub>2 ! i)\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
ms\<^sub>1' = (targetnode a # msx') @ mx # tl ms\<^sub>2
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1')) return_node
get_proc mx = get_proc (hd ms\<^sub>2)
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx = mx' # msx'
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length (targetnode a # msx') + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
ms\<^sub>1' = (targetnode a # msx') @ mx # tl ms\<^sub>2
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1')) return_node
get_proc mx = get_proc (hd ms\<^sub>2)
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx = mx' # msx'
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length (targetnode a # msx') + i)) V = fst (s\<^sub>2 ! i) V
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
by -(rule WSI,clarsimp+,fastforce,clarsimp+)
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
case (silent_move_call a s\<^sub>1 s\<^sub>1' Q r p fs a' ms\<^sub>1 S ms\<^sub>1')
[PROOF STATE]
proof (state)
this:
pred (kind a) s\<^sub>1
transfer (kind a) s\<^sub>1 = s\<^sub>1'
valid_edge a
kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs
valid_edge a'
a' \<in> get_return_edges a
(\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1)) return_node
length ms\<^sub>1 = length s\<^sub>1
length s\<^sub>1' = Suc (length s\<^sub>1)
hd ms\<^sub>1 = sourcenode a
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
Ball (set ms\<^sub>1) valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1 = length s\<^sub>1
length ms\<^sub>2 = length s\<^sub>2
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
Ball (set (tl ms\<^sub>1)) return_node
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1')) return_node \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
note obs_eq = \<open>\<forall>a\<in>set (tl ms\<^sub>1'). return_node a \<Longrightarrow>
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (state)
this:
Ball (set (tl ms\<^sub>1')) return_node \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>s\<^sub>1 \<noteq> []\<close> \<open>s\<^sub>2 \<noteq> []\<close>
[PROOF STATE]
proof (chain)
picking this:
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
[PROOF STEP]
obtain cf\<^sub>1 cfs\<^sub>1 cf\<^sub>2 cfs\<^sub>2 where [simp]:"s\<^sub>1 = cf\<^sub>1#cfs\<^sub>1"
and [simp]:"s\<^sub>2 = cf\<^sub>2#cfs\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
goal (1 subgoal):
1. (\<And>cf\<^sub>1 cfs\<^sub>1 cf\<^sub>2 cfs\<^sub>2. \<lbrakk>s\<^sub>1 = cf\<^sub>1 # cfs\<^sub>1; s\<^sub>2 = cf\<^sub>2 # cfs\<^sub>2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(cases s\<^sub>1,auto,cases s\<^sub>2,fastforce+)
[PROOF STATE]
proof (state)
this:
s\<^sub>1 = cf\<^sub>1 # cfs\<^sub>1
s\<^sub>2 = cf\<^sub>2 # cfs\<^sub>2
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>valid_edge a\<close> \<open>kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs
[PROOF STEP]
obtain ins outs where "(p,ins,outs) \<in> set procs"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs
goal (1 subgoal):
1. (\<And>ins outs. (p, ins, outs) \<in> set procs \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(fastforce dest!:callee_in_procs)
[PROOF STATE]
proof (state)
this:
(p, ins, outs) \<in> set procs
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>transfer (kind a) s\<^sub>1 = s\<^sub>1'\<close> \<open>valid_edge a\<close> \<open>kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs\<close>
[PROOF STATE]
proof (chain)
picking this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
valid_edge a
kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs
(p, ins, outs) \<in> set procs
[PROOF STEP]
have [simp]:"s\<^sub>1' = (Map.empty(ins [:=] params fs (fst cf\<^sub>1)), r) # cf\<^sub>1 # cfs\<^sub>1"
[PROOF STATE]
proof (prove)
using this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
valid_edge a
kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs
(p, ins, outs) \<in> set procs
goal (1 subgoal):
1. s\<^sub>1' = (Map.empty(ins [:=] params fs (fst cf\<^sub>1)), r) # cf\<^sub>1 # cfs\<^sub>1
[PROOF STEP]
by simp(unfold formal_in_THE,simp)
[PROOF STATE]
proof (state)
this:
s\<^sub>1' = (Map.empty(ins [:=] params fs (fst cf\<^sub>1)), r) # cf\<^sub>1 # cfs\<^sub>1
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>length ms\<^sub>1 = length s\<^sub>1\<close> \<open>ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
length ms\<^sub>1 = length s\<^sub>1
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
[PROOF STEP]
have "length ms\<^sub>1' = length s\<^sub>1'"
[PROOF STATE]
proof (prove)
using this:
length ms\<^sub>1 = length s\<^sub>1
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
goal (1 subgoal):
1. length ms\<^sub>1' = length s\<^sub>1'
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length ms\<^sub>1' = length s\<^sub>1'
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>valid_edge a\<close> \<open>a' \<in> get_return_edges a\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
a' \<in> get_return_edges a
[PROOF STEP]
have "valid_edge a'"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
a' \<in> get_return_edges a
goal (1 subgoal):
1. valid_edge a'
[PROOF STEP]
by(rule get_return_edges_valid)
[PROOF STATE]
proof (state)
this:
valid_edge a'
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>m\<in>set ms\<^sub>1. valid_node m\<close> \<open>valid_edge a\<close>
\<open>ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set ms\<^sub>1) valid_node
valid_edge a
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
valid_edge a'
[PROOF STEP]
have "\<forall>m\<in>set ms\<^sub>1'. valid_node m"
[PROOF STATE]
proof (prove)
using this:
Ball (set ms\<^sub>1) valid_node
valid_edge a
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
valid_edge a'
goal (1 subgoal):
1. Ball (set ms\<^sub>1') valid_node
[PROOF STEP]
by(cases ms\<^sub>1) auto
[PROOF STATE]
proof (state)
this:
Ball (set ms\<^sub>1') valid_node
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>valid_edge a'\<close> \<open>valid_edge a\<close> \<open>a' \<in> get_return_edges a\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a'
valid_edge a
a' \<in> get_return_edges a
[PROOF STEP]
have "return_node (targetnode a')"
[PROOF STATE]
proof (prove)
using this:
valid_edge a'
valid_edge a
a' \<in> get_return_edges a
goal (1 subgoal):
1. return_node (targetnode a')
[PROOF STEP]
by(fastforce simp:return_node_def)
[PROOF STATE]
proof (state)
this:
return_node (targetnode a')
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>valid_edge a\<close> \<open>a' \<in> get_return_edges a\<close> \<open>valid_edge a'\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
a' \<in> get_return_edges a
valid_edge a'
return_node (targetnode a')
[PROOF STEP]
have "call_of_return_node (targetnode a') (sourcenode a)"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
a' \<in> get_return_edges a
valid_edge a'
return_node (targetnode a')
goal (1 subgoal):
1. call_of_return_node (targetnode a') (sourcenode a)
[PROOF STEP]
by(simp add:call_of_return_node_def) blast
[PROOF STATE]
proof (state)
this:
call_of_return_node (targetnode a') (sourcenode a)
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>m \<in> set (tl ms\<^sub>1). return_node m\<close> \<open>return_node (targetnode a')\<close>
\<open>ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set (tl ms\<^sub>1)) return_node
return_node (targetnode a')
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
[PROOF STEP]
have "\<forall>m \<in> set (tl ms\<^sub>1'). return_node m"
[PROOF STATE]
proof (prove)
using this:
Ball (set (tl ms\<^sub>1)) return_node
return_node (targetnode a')
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
goal (1 subgoal):
1. Ball (set (tl ms\<^sub>1')) return_node
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Ball (set (tl ms\<^sub>1')) return_node
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from obs_eq[OF this]
[PROOF STATE]
proof (chain)
picking this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
have "obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>i < length ms\<^sub>2. \<forall>V \<in> rv S (CFG_node ((mx#tl ms\<^sub>2)!i)).
(fst (s\<^sub>1!(length msx + i))) V = (fst (s\<^sub>2!i)) V\<close> \<open>length ms\<^sub>2 = length s\<^sub>2\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
length ms\<^sub>2 = length s\<^sub>2
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
length ms\<^sub>2 = length s\<^sub>2
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
[PROOF STEP]
by(erule_tac x="0" in allE) auto
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
goal (2 subgoals):
1. \<And>a s s' Q r p fs a' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; valid_edge a'; a' \<in> get_return_edges a; (\<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s' = Suc (length s); hd ms = sourcenode a; ms' = targetnode a # targetnode a' # tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
proof(cases msx)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
msx = []
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>ms\<^sub>1 = msx@mx#tl ms\<^sub>2\<close> \<open>hd ms\<^sub>1 = sourcenode a\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = []
[PROOF STEP]
have [simp]:"mx = sourcenode a" and [simp]:"tl ms\<^sub>1 = tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = []
goal (1 subgoal):
1. mx = sourcenode a &&& tl ms\<^sub>1 = tl ms\<^sub>2
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
mx = sourcenode a
tl ms\<^sub>1 = tl ms\<^sub>2
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>(\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or>
hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
(\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
have "hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
(\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>) \<or> hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>hd ms\<^sub>1 = sourcenode a\<close>
[PROOF STATE]
proof (chain)
picking this:
hd ms\<^sub>1 = sourcenode a
hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
have "sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
hd ms\<^sub>1 = sourcenode a
hd ms\<^sub>1 \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>valid_edge a\<close> \<open>a' \<in> get_return_edges a\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
a' \<in> get_return_edges a
[PROOF STEP]
obtain a'' where "valid_edge a''" and "sourcenode a'' = sourcenode a"
and "targetnode a'' = targetnode a'" and "intra_kind(kind a'')"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
a' \<in> get_return_edges a
goal (1 subgoal):
1. (\<And>a''. \<lbrakk>valid_edge a''; sourcenode a'' = sourcenode a; targetnode a'' = targetnode a'; intra_kind (kind a'')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by -(drule call_return_node_edge,auto simp:intra_kind_def)
[PROOF STATE]
proof (state)
this:
valid_edge a''
sourcenode a'' = sourcenode a
targetnode a'' = targetnode a'
intra_kind (kind a'')
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>valid_edge a''\<close> \<open>intra_kind(kind a'')\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a''
intra_kind (kind a'')
[PROOF STEP]
have "get_proc (sourcenode a'') = get_proc (targetnode a'')"
[PROOF STATE]
proof (prove)
using this:
valid_edge a''
intra_kind (kind a'')
goal (1 subgoal):
1. get_proc (sourcenode a'') = get_proc (targetnode a'')
[PROOF STEP]
by(rule get_proc_intra)
[PROOF STATE]
proof (state)
this:
get_proc (sourcenode a'') = get_proc (targetnode a'')
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>sourcenode a'' = sourcenode a\<close> \<open>targetnode a'' = targetnode a'\<close>
\<open>get_proc mx = get_proc (hd ms\<^sub>2)\<close>
[PROOF STATE]
proof (chain)
picking this:
sourcenode a'' = sourcenode a
targetnode a'' = targetnode a'
get_proc mx = get_proc (hd ms\<^sub>2)
get_proc (sourcenode a'') = get_proc (targetnode a'')
[PROOF STEP]
have "get_proc (targetnode a') = get_proc (hd ms\<^sub>2)"
[PROOF STATE]
proof (prove)
using this:
sourcenode a'' = sourcenode a
targetnode a'' = targetnode a'
get_proc mx = get_proc (hd ms\<^sub>2)
get_proc (sourcenode a'') = get_proc (targetnode a'')
goal (1 subgoal):
1. get_proc (targetnode a') = get_proc (hd ms\<^sub>2)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
get_proc (targetnode a') = get_proc (hd ms\<^sub>2)
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>valid_edge a\<close> \<open>kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs\<close> \<open>a' \<in> get_return_edges a\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs
a' \<in> get_return_edges a
[PROOF STEP]
have "CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs
a' \<in> get_return_edges a
goal (1 subgoal):
1. CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')
[PROOF STEP]
by(fastforce intro:sum_SDG_call_summary_edge)
[PROOF STATE]
proof (state)
this:
CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
have "targetnode a' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. targetnode a' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. targetnode a' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<Longrightarrow> False
[PROOF STEP]
assume "targetnode a' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (state)
this:
targetnode a' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. targetnode a' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<Longrightarrow> False
[PROOF STEP]
hence "CFG_node (targetnode a') \<in> HRB_slice S"
[PROOF STATE]
proof (prove)
using this:
targetnode a' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. CFG_node (targetnode a') \<in> HRB_slice S
[PROOF STEP]
by(simp add:SDG_to_CFG_set_def)
[PROOF STATE]
proof (state)
this:
CFG_node (targetnode a') \<in> HRB_slice S
goal (1 subgoal):
1. targetnode a' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<Longrightarrow> False
[PROOF STEP]
hence "CFG_node (sourcenode a) \<in> HRB_slice S"
[PROOF STATE]
proof (prove)
using this:
CFG_node (targetnode a') \<in> HRB_slice S
goal (1 subgoal):
1. CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
proof(induct "CFG_node (targetnode a')" rule:HRB_slice_cases)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>nx. \<lbrakk>CFG_node (targetnode a') \<in> sum_SDG_slice1 nx; nx \<in> S\<rbrakk> \<Longrightarrow> CFG_node (sourcenode a) \<in> HRB_slice S
2. \<And>nx n' n'' p. \<lbrakk>n' \<in> sum_SDG_slice1 nx; n'' s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n'); CFG_node (targetnode a') \<in> sum_SDG_slice2 n'; nx \<in> S\<rbrakk> \<Longrightarrow> CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
case (phase1 nx)
[PROOF STATE]
proof (state)
this:
CFG_node (targetnode a') \<in> sum_SDG_slice1 nx
nx \<in> S
goal (2 subgoals):
1. \<And>nx. \<lbrakk>CFG_node (targetnode a') \<in> sum_SDG_slice1 nx; nx \<in> S\<rbrakk> \<Longrightarrow> CFG_node (sourcenode a) \<in> HRB_slice S
2. \<And>nx n' n'' p. \<lbrakk>n' \<in> sum_SDG_slice1 nx; n'' s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n'); CFG_node (targetnode a') \<in> sum_SDG_slice2 n'; nx \<in> S\<rbrakk> \<Longrightarrow> CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
with \<open>CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')\<close>
[PROOF STATE]
proof (chain)
picking this:
CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')
CFG_node (targetnode a') \<in> sum_SDG_slice1 nx
nx \<in> S
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')
CFG_node (targetnode a') \<in> sum_SDG_slice1 nx
nx \<in> S
goal (1 subgoal):
1. CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
by(fastforce intro:combine_SDG_slices.combSlice_refl sum_slice1
simp:HRB_slice_def)
[PROOF STATE]
proof (state)
this:
CFG_node (sourcenode a) \<in> HRB_slice S
goal (1 subgoal):
1. \<And>nx n' n'' p. \<lbrakk>n' \<in> sum_SDG_slice1 nx; n'' s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n'); CFG_node (targetnode a') \<in> sum_SDG_slice2 n'; nx \<in> S\<rbrakk> \<Longrightarrow> CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>nx n' n'' p. \<lbrakk>n' \<in> sum_SDG_slice1 nx; n'' s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n'); CFG_node (targetnode a') \<in> sum_SDG_slice2 n'; nx \<in> S\<rbrakk> \<Longrightarrow> CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
case (phase2 nx n' n'' p')
[PROOF STATE]
proof (state)
this:
n' \<in> sum_SDG_slice1 nx
n'' s-p'\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n')
CFG_node (targetnode a') \<in> sum_SDG_slice2 n'
nx \<in> S
goal (1 subgoal):
1. \<And>nx n' n'' p. \<lbrakk>n' \<in> sum_SDG_slice1 nx; n'' s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n'); CFG_node (targetnode a') \<in> sum_SDG_slice2 n'; nx \<in> S\<rbrakk> \<Longrightarrow> CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
from \<open>CFG_node (targetnode a') \<in> sum_SDG_slice2 n'\<close>
\<open>CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')\<close> \<open>valid_edge a\<close>
[PROOF STATE]
proof (chain)
picking this:
CFG_node (targetnode a') \<in> sum_SDG_slice2 n'
CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')
valid_edge a
[PROOF STEP]
have "CFG_node (sourcenode a) \<in> sum_SDG_slice2 n'"
[PROOF STATE]
proof (prove)
using this:
CFG_node (targetnode a') \<in> sum_SDG_slice2 n'
CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')
valid_edge a
goal (1 subgoal):
1. CFG_node (sourcenode a) \<in> sum_SDG_slice2 n'
[PROOF STEP]
by(fastforce intro:sum_slice2)
[PROOF STATE]
proof (state)
this:
CFG_node (sourcenode a) \<in> sum_SDG_slice2 n'
goal (1 subgoal):
1. \<And>nx n' n'' p. \<lbrakk>n' \<in> sum_SDG_slice1 nx; n'' s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n'); CFG_node (targetnode a') \<in> sum_SDG_slice2 n'; nx \<in> S\<rbrakk> \<Longrightarrow> CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
with \<open>n' \<in> sum_SDG_slice1 nx\<close> \<open>n'' s-p'\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n')\<close>
\<open>nx \<in> S\<close>
[PROOF STATE]
proof (chain)
picking this:
n' \<in> sum_SDG_slice1 nx
n'' s-p'\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n')
nx \<in> S
CFG_node (sourcenode a) \<in> sum_SDG_slice2 n'
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
n' \<in> sum_SDG_slice1 nx
n'' s-p'\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n')
nx \<in> S
CFG_node (sourcenode a) \<in> sum_SDG_slice2 n'
goal (1 subgoal):
1. CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
by(fastforce intro:combine_SDG_slices.combSlice_Return_parent_node
simp:HRB_slice_def)
[PROOF STATE]
proof (state)
this:
CFG_node (sourcenode a) \<in> HRB_slice S
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
CFG_node (sourcenode a) \<in> HRB_slice S
goal (1 subgoal):
1. targetnode a' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> \<Longrightarrow> False
[PROOF STEP]
with \<open>sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (chain)
picking this:
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
CFG_node (sourcenode a) \<in> HRB_slice S
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
CFG_node (sourcenode a) \<in> HRB_slice S
goal (1 subgoal):
1. False
[PROOF STEP]
by(simp add:SDG_to_CFG_set_def HRB_slice_def)
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
targetnode a' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
[PROOF STEP]
have "ms\<^sub>1' = [targetnode a] @ targetnode a' # tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
goal (1 subgoal):
1. ms\<^sub>1' = [targetnode a] @ targetnode a' # tl ms\<^sub>2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ms\<^sub>1' = [targetnode a] @ targetnode a' # tl ms\<^sub>2
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)\<close> Nil
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = []
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length [targetnode a] + i)) = snd (s\<^sub>2 ! i)"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = []
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length [targetnode a] + i)) = snd (s\<^sub>2 ! i)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length [targetnode a] + i)) = snd (s\<^sub>2 ! i)
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node (targetnode a')). (fst (s\<^sub>1' ! 1)) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node (targetnode a')). fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
fix V
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
assume "V \<in> rv S (CFG_node (targetnode a'))"
[PROOF STATE]
proof (state)
this:
V \<in> rv S (CFG_node (targetnode a'))
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
from \<open>valid_edge a\<close> \<open>a' \<in> get_return_edges a\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
a' \<in> get_return_edges a
[PROOF STEP]
obtain a'' where edge:"valid_edge a''" "sourcenode a'' = sourcenode a"
"targetnode a'' = targetnode a'" "intra_kind(kind a'')"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
a' \<in> get_return_edges a
goal (1 subgoal):
1. (\<And>a''. \<lbrakk>valid_edge a''; sourcenode a'' = sourcenode a; targetnode a'' = targetnode a'; intra_kind (kind a'')\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by -(drule call_return_node_edge,auto simp:intra_kind_def)
[PROOF STATE]
proof (state)
this:
valid_edge a''
sourcenode a'' = sourcenode a
targetnode a'' = targetnode a'
intra_kind (kind a'')
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
from \<open>V \<in> rv S (CFG_node (targetnode a'))\<close>
[PROOF STATE]
proof (chain)
picking this:
V \<in> rv S (CFG_node (targetnode a'))
[PROOF STEP]
obtain as n' where "targetnode a' -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
and "n' \<in> HRB_slice S" and "V \<in> Use\<^bsub>SDG\<^esub> n'"
and "\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as)
\<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''"
[PROOF STATE]
proof (prove)
using this:
V \<in> rv S (CFG_node (targetnode a'))
goal (1 subgoal):
1. (\<And>as n'. \<lbrakk>targetnode a' -as\<rightarrow>\<^sub>\<iota>* parent_node n'; n' \<in> HRB_slice S; V \<in> Use\<^bsub>SDG\<^esub> n'; \<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(fastforce elim:rvE)
[PROOF STATE]
proof (state)
this:
targetnode a' -as\<rightarrow>\<^sub>\<iota>* parent_node n'
n' \<in> HRB_slice S
V \<in> Use\<^bsub>SDG\<^esub> n'
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
from \<open>targetnode a' -as\<rightarrow>\<^sub>\<iota>* parent_node n'\<close> edge
[PROOF STATE]
proof (chain)
picking this:
targetnode a' -as\<rightarrow>\<^sub>\<iota>* parent_node n'
valid_edge a''
sourcenode a'' = sourcenode a
targetnode a'' = targetnode a'
intra_kind (kind a'')
[PROOF STEP]
have "sourcenode a -a''#as\<rightarrow>\<^sub>\<iota>* parent_node n'"
[PROOF STATE]
proof (prove)
using this:
targetnode a' -as\<rightarrow>\<^sub>\<iota>* parent_node n'
valid_edge a''
sourcenode a'' = sourcenode a
targetnode a'' = targetnode a'
intra_kind (kind a'')
goal (1 subgoal):
1. sourcenode a -a'' # as\<rightarrow>\<^sub>\<iota>* parent_node n'
[PROOF STEP]
by(fastforce intro:Cons_path simp:intra_path_def)
[PROOF STATE]
proof (state)
this:
sourcenode a -a'' # as\<rightarrow>\<^sub>\<iota>* parent_node n'
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
from \<open>valid_edge a\<close> \<open>kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs
[PROOF STEP]
have "V \<notin> Def (sourcenode a)"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs
goal (1 subgoal):
1. V \<notin> Def (sourcenode a)
[PROOF STEP]
by(fastforce dest:call_source_Def_empty)
[PROOF STATE]
proof (state)
this:
V \<notin> Def (sourcenode a)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
with \<open>\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as)
\<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''\<close> \<open>sourcenode a'' = sourcenode a\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
sourcenode a'' = sourcenode a
V \<notin> Def (sourcenode a)
[PROOF STEP]
have "\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes (a''#as))
\<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''"
[PROOF STATE]
proof (prove)
using this:
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
sourcenode a'' = sourcenode a
V \<notin> Def (sourcenode a)
goal (1 subgoal):
1. \<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes (a'' # as)) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
[PROOF STEP]
by(fastforce dest:SDG_Def_parent_Def simp:sourcenodes_def)
[PROOF STATE]
proof (state)
this:
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes (a'' # as)) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
with \<open>sourcenode a -a''#as\<rightarrow>\<^sub>\<iota>* parent_node n'\<close> \<open>n' \<in> HRB_slice S\<close>
\<open>V \<in> Use\<^bsub>SDG\<^esub> n'\<close>
[PROOF STATE]
proof (chain)
picking this:
sourcenode a -a'' # as\<rightarrow>\<^sub>\<iota>* parent_node n'
n' \<in> HRB_slice S
V \<in> Use\<^bsub>SDG\<^esub> n'
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes (a'' # as)) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
[PROOF STEP]
have "V \<in> rv S (CFG_node (sourcenode a))"
[PROOF STATE]
proof (prove)
using this:
sourcenode a -a'' # as\<rightarrow>\<^sub>\<iota>* parent_node n'
n' \<in> HRB_slice S
V \<in> Use\<^bsub>SDG\<^esub> n'
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes (a'' # as)) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
goal (1 subgoal):
1. V \<in> rv S (CFG_node (sourcenode a))
[PROOF STEP]
by(fastforce intro:rvI)
[PROOF STATE]
proof (state)
this:
V \<in> rv S (CFG_node (sourcenode a))
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
from \<open>\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V\<close> Nil
[PROOF STATE]
proof (chain)
picking this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = []
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node (sourcenode a)). fst cf\<^sub>1 V = fst cf\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = []
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node (sourcenode a)). fst cf\<^sub>1 V = fst cf\<^sub>2 V
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node (sourcenode a)). fst cf\<^sub>1 V = fst cf\<^sub>2 V
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
with \<open>V \<in> rv S (CFG_node (sourcenode a))\<close>
[PROOF STATE]
proof (chain)
picking this:
V \<in> rv S (CFG_node (sourcenode a))
\<forall>V\<in>rv S (CFG_node (sourcenode a)). fst cf\<^sub>1 V = fst cf\<^sub>2 V
[PROOF STEP]
have "fst cf\<^sub>1 V = fst cf\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
V \<in> rv S (CFG_node (sourcenode a))
\<forall>V\<in>rv S (CFG_node (sourcenode a)). fst cf\<^sub>1 V = fst cf\<^sub>2 V
goal (1 subgoal):
1. fst cf\<^sub>1 V = fst cf\<^sub>2 V
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
fst cf\<^sub>1 V = fst cf\<^sub>2 V
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node (targetnode a')) \<Longrightarrow> fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
thus "(fst (s\<^sub>1' ! 1)) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
fst cf\<^sub>1 V = fst cf\<^sub>2 V
goal (1 subgoal):
1. fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node (targetnode a')). fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>i < length ms\<^sub>2. \<forall>V \<in> rv S (CFG_node ((mx#tl ms\<^sub>2)!i)).
(fst (s\<^sub>1!(length msx + i))) V = (fst (s\<^sub>2!i)) V\<close> Nil
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = []
\<forall>V\<in>rv S (CFG_node (targetnode a')). fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a' # tl ms\<^sub>2)!i)).
(fst (s\<^sub>1'!(length [targetnode a] + i))) V = (fst (s\<^sub>2!i)) V"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = []
\<forall>V\<in>rv S (CFG_node (targetnode a')). fst (s\<^sub>1' ! 1) V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a' # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length [targetnode a] + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
by clarsimp(case_tac i,auto)
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a' # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length [targetnode a] + i)) V = fst (s\<^sub>2 ! i) V
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>m\<in>set ms\<^sub>1'. valid_node m\<close> \<open>\<forall>m\<in>set ms\<^sub>2. valid_node m\<close>
\<open>length ms\<^sub>1' = length s\<^sub>1'\<close> \<open>length ms\<^sub>2 = length s\<^sub>2\<close>
\<open>\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>ms\<^sub>1' = [targetnode a] @ targetnode a' # tl ms\<^sub>2\<close>
\<open>targetnode a' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close> \<open>return_node (targetnode a')\<close>
\<open>obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>get_proc (targetnode a') = get_proc (hd ms\<^sub>2)\<close>
\<open>\<forall>m \<in> set (tl ms\<^sub>1'). return_node m\<close> \<open>sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>call_of_return_node (targetnode a') (sourcenode a)\<close>
\<open>\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length [targetnode a] + i)) = snd (s\<^sub>2 ! i)\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
ms\<^sub>1' = [targetnode a] @ targetnode a' # tl ms\<^sub>2
targetnode a' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
return_node (targetnode a')
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
get_proc (targetnode a') = get_proc (hd ms\<^sub>2)
Ball (set (tl ms\<^sub>1')) return_node
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
call_of_return_node (targetnode a') (sourcenode a)
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length [targetnode a] + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a' # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length [targetnode a] + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
ms\<^sub>1' = [targetnode a] @ targetnode a' # tl ms\<^sub>2
targetnode a' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
return_node (targetnode a')
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
get_proc (targetnode a') = get_proc (hd ms\<^sub>2)
Ball (set (tl ms\<^sub>1')) return_node
sourcenode a \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
call_of_return_node (targetnode a') (sourcenode a)
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length [targetnode a] + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((targetnode a' # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length [targetnode a] + i)) V = fst (s\<^sub>2 ! i) V
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
by(auto intro!:WSI)
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
case (Cons mx' msx')
[PROOF STATE]
proof (state)
this:
msx = mx' # msx'
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>ms\<^sub>1 = msx@mx#tl ms\<^sub>2\<close> \<open>hd ms\<^sub>1 = sourcenode a\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = mx' # msx'
[PROOF STEP]
have [simp]:"mx' = sourcenode a" and [simp]:"tl ms\<^sub>1 = msx'@mx#tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = mx' # msx'
goal (1 subgoal):
1. mx' = sourcenode a &&& tl ms\<^sub>1 = msx' @ mx # tl ms\<^sub>2
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
mx' = sourcenode a
tl ms\<^sub>1 = msx' @ mx # tl ms\<^sub>2
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
[PROOF STEP]
have "ms\<^sub>1' = (targetnode a # targetnode a' # msx')@mx#tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1' = targetnode a # targetnode a' # tl ms\<^sub>1
goal (1 subgoal):
1. ms\<^sub>1' = (targetnode a # targetnode a' # msx') @ mx # tl ms\<^sub>2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ms\<^sub>1' = (targetnode a # targetnode a' # msx') @ mx # tl ms\<^sub>2
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = mx' # msx'
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2.
snd (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) = snd (s\<^sub>2 ! i)"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = mx' # msx'
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) = snd (s\<^sub>2 ! i)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) = snd (s\<^sub>2 ! i)
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node mx).
(fst (s\<^sub>1' ! length(targetnode a # targetnode a' # msx'))) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length (targetnode a # targetnode a' # msx')) V = state_val s\<^sub>2 V
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length (targetnode a # targetnode a' # msx')) V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>i < length ms\<^sub>2. \<forall>V \<in> rv S (CFG_node ((mx#tl ms\<^sub>2)!i)).
(fst (s\<^sub>1!(length msx + i))) V = (fst (s\<^sub>2!i)) V\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length (targetnode a # targetnode a' # msx')) V = state_val s\<^sub>2 V
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2)!i)).
(fst (s\<^sub>1'!(length (targetnode a # targetnode a' # msx') + i))) V =
(fst (s\<^sub>2!i)) V"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length (targetnode a # targetnode a' # msx')) V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
by clarsimp
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) V = fst (s\<^sub>2 ! i) V
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>m\<in>set ms\<^sub>1'. valid_node m\<close> \<open>\<forall>m\<in>set ms\<^sub>2. valid_node m\<close>
\<open>length ms\<^sub>1' = length s\<^sub>1'\<close> \<open>length ms\<^sub>2 = length s\<^sub>2\<close>
\<open>ms\<^sub>1' = (targetnode a # targetnode a' # msx')@mx#tl ms\<^sub>2\<close>
\<open>return_node (targetnode a')\<close>
\<open>\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)\<close>
\<open>obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close> Cons
\<open>get_proc mx = get_proc (hd ms\<^sub>2)\<close> \<open>\<forall>m \<in> set (tl ms\<^sub>1'). return_node m\<close>
\<open>\<forall>i<length ms\<^sub>2.
snd (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) = snd (s\<^sub>2 ! i)\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
ms\<^sub>1' = (targetnode a # targetnode a' # msx') @ mx # tl ms\<^sub>2
return_node (targetnode a')
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx = mx' # msx'
get_proc mx = get_proc (hd ms\<^sub>2)
Ball (set (tl ms\<^sub>1')) return_node
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
ms\<^sub>1' = (targetnode a # targetnode a' # msx') @ mx # tl ms\<^sub>2
return_node (targetnode a')
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx = mx' # msx'
get_proc mx = get_proc (hd ms\<^sub>2)
Ball (set (tl ms\<^sub>1')) return_node
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length (targetnode a # targetnode a' # msx') + i)) V = fst (s\<^sub>2 ! i) V
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
by -(rule WSI,clarsimp+,fastforce,clarsimp+)
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
case (silent_move_return a s\<^sub>1 s\<^sub>1' Q p f' ms\<^sub>1 S ms\<^sub>1')
[PROOF STATE]
proof (state)
this:
pred (kind a) s\<^sub>1
transfer (kind a) s\<^sub>1 = s\<^sub>1'
valid_edge a
kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'
\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1)) return_node
length ms\<^sub>1 = length s\<^sub>1
length s\<^sub>1 = Suc (length s\<^sub>1')
s\<^sub>1' \<noteq> []
hd ms\<^sub>1 = sourcenode a
hd (tl ms\<^sub>1) = targetnode a
ms\<^sub>1' = tl ms\<^sub>1
Ball (set ms\<^sub>1) valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1 = length s\<^sub>1
length ms\<^sub>2 = length s\<^sub>2
s\<^sub>1 \<noteq> []
s\<^sub>2 \<noteq> []
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
Ball (set (tl ms\<^sub>1)) return_node
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
obs ms\<^sub>1 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
Ball (set (tl ms\<^sub>1')) return_node \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
note obs_eq = \<open>\<forall>a\<in>set (tl ms\<^sub>1'). return_node a \<Longrightarrow>
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (state)
this:
Ball (set (tl ms\<^sub>1')) return_node \<Longrightarrow> obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>transfer (kind a) s\<^sub>1 = s\<^sub>1'\<close> \<open>kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'\<close> \<open>s\<^sub>1 \<noteq> []\<close> \<open>s\<^sub>1' \<noteq> []\<close>
[PROOF STATE]
proof (chain)
picking this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'
s\<^sub>1 \<noteq> []
s\<^sub>1' \<noteq> []
[PROOF STEP]
obtain cf\<^sub>1 cfx\<^sub>1 cfs\<^sub>1 cf\<^sub>1' where [simp]:"s\<^sub>1 = cf\<^sub>1#cfx\<^sub>1#cfs\<^sub>1"
and "s\<^sub>1' = (f' (fst cf\<^sub>1) (fst cfx\<^sub>1),snd cfx\<^sub>1)#cfs\<^sub>1"
[PROOF STATE]
proof (prove)
using this:
transfer (kind a) s\<^sub>1 = s\<^sub>1'
kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'
s\<^sub>1 \<noteq> []
s\<^sub>1' \<noteq> []
goal (1 subgoal):
1. (\<And>cf\<^sub>1 cfx\<^sub>1 cfs\<^sub>1. \<lbrakk>s\<^sub>1 = cf\<^sub>1 # cfx\<^sub>1 # cfs\<^sub>1; s\<^sub>1' = (f' (fst cf\<^sub>1) (fst cfx\<^sub>1), snd cfx\<^sub>1) # cfs\<^sub>1\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(cases s\<^sub>1,auto,case_tac list,fastforce+)
[PROOF STATE]
proof (state)
this:
s\<^sub>1 = cf\<^sub>1 # cfx\<^sub>1 # cfs\<^sub>1
s\<^sub>1' = (f' (fst cf\<^sub>1) (fst cfx\<^sub>1), snd cfx\<^sub>1) # cfs\<^sub>1
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>s\<^sub>2 \<noteq> []\<close>
[PROOF STATE]
proof (chain)
picking this:
s\<^sub>2 \<noteq> []
[PROOF STEP]
obtain cf\<^sub>2 cfs\<^sub>2 where [simp]:"s\<^sub>2 = cf\<^sub>2#cfs\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
s\<^sub>2 \<noteq> []
goal (1 subgoal):
1. (\<And>cf\<^sub>2 cfs\<^sub>2. s\<^sub>2 = cf\<^sub>2 # cfs\<^sub>2 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(cases s\<^sub>2) auto
[PROOF STATE]
proof (state)
this:
s\<^sub>2 = cf\<^sub>2 # cfs\<^sub>2
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>length ms\<^sub>1 = length s\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
length ms\<^sub>1 = length s\<^sub>1
[PROOF STEP]
have "ms\<^sub>1 \<noteq> []" and "tl ms\<^sub>1 \<noteq> []"
[PROOF STATE]
proof (prove)
using this:
length ms\<^sub>1 = length s\<^sub>1
goal (1 subgoal):
1. ms\<^sub>1 \<noteq> [] &&& tl ms\<^sub>1 \<noteq> []
[PROOF STEP]
by(cases ms\<^sub>1,auto)+
[PROOF STATE]
proof (state)
this:
ms\<^sub>1 \<noteq> []
tl ms\<^sub>1 \<noteq> []
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>valid_edge a\<close> \<open>kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'
[PROOF STEP]
obtain a' Q' r' fs' where "valid_edge a'" and "kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'"
and "a \<in> get_return_edges a'"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'
goal (1 subgoal):
1. (\<And>a' Q' r' fs'. \<lbrakk>valid_edge a'; kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'; a \<in> get_return_edges a'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by -(drule return_needs_call,auto)
[PROOF STATE]
proof (state)
this:
valid_edge a'
kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'
a \<in> get_return_edges a'
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
valid_edge a'
kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'
a \<in> get_return_edges a'
[PROOF STEP]
obtain ins outs where "(p,ins,outs) \<in> set procs"
[PROOF STATE]
proof (prove)
using this:
valid_edge a'
kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'
a \<in> get_return_edges a'
goal (1 subgoal):
1. (\<And>ins outs. (p, ins, outs) \<in> set procs \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(fastforce dest!:callee_in_procs)
[PROOF STATE]
proof (state)
this:
(p, ins, outs) \<in> set procs
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>valid_edge a\<close> \<open>kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'
(p, ins, outs) \<in> set procs
[PROOF STEP]
have "f' (fst cf\<^sub>1) (fst cfx\<^sub>1) =
(fst cfx\<^sub>1)(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs)"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'
(p, ins, outs) \<in> set procs
goal (1 subgoal):
1. f' (fst cf\<^sub>1) (fst cfx\<^sub>1) = fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs)
[PROOF STEP]
by(rule CFG_return_edge_fun)
[PROOF STATE]
proof (state)
this:
f' (fst cf\<^sub>1) (fst cfx\<^sub>1) = fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs)
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>s\<^sub>1' = (f' (fst cf\<^sub>1) (fst cfx\<^sub>1),snd cfx\<^sub>1)#cfs\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
s\<^sub>1' = (f' (fst cf\<^sub>1) (fst cfx\<^sub>1), snd cfx\<^sub>1) # cfs\<^sub>1
f' (fst cf\<^sub>1) (fst cfx\<^sub>1) = fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs)
[PROOF STEP]
have [simp]:"s\<^sub>1' = ((fst cfx\<^sub>1)
(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs),snd cfx\<^sub>1)#cfs\<^sub>1"
[PROOF STATE]
proof (prove)
using this:
s\<^sub>1' = (f' (fst cf\<^sub>1) (fst cfx\<^sub>1), snd cfx\<^sub>1) # cfs\<^sub>1
f' (fst cf\<^sub>1) (fst cfx\<^sub>1) = fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs)
goal (1 subgoal):
1. s\<^sub>1' = (fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs), snd cfx\<^sub>1) # cfs\<^sub>1
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
s\<^sub>1' = (fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs), snd cfx\<^sub>1) # cfs\<^sub>1
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>m\<in>set ms\<^sub>1. valid_node m\<close> \<open>ms\<^sub>1' = tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set ms\<^sub>1) valid_node
ms\<^sub>1' = tl ms\<^sub>1
[PROOF STEP]
have "\<forall>m\<in>set ms\<^sub>1'. valid_node m"
[PROOF STATE]
proof (prove)
using this:
Ball (set ms\<^sub>1) valid_node
ms\<^sub>1' = tl ms\<^sub>1
goal (1 subgoal):
1. Ball (set ms\<^sub>1') valid_node
[PROOF STEP]
by(cases ms\<^sub>1) auto
[PROOF STATE]
proof (state)
this:
Ball (set ms\<^sub>1') valid_node
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>length ms\<^sub>1 = length s\<^sub>1\<close> \<open>ms\<^sub>1' = tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
length ms\<^sub>1 = length s\<^sub>1
ms\<^sub>1' = tl ms\<^sub>1
[PROOF STEP]
have "length ms\<^sub>1' = length s\<^sub>1'"
[PROOF STATE]
proof (prove)
using this:
length ms\<^sub>1 = length s\<^sub>1
ms\<^sub>1' = tl ms\<^sub>1
goal (1 subgoal):
1. length ms\<^sub>1' = length s\<^sub>1'
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length ms\<^sub>1' = length s\<^sub>1'
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>m\<in>set (tl ms\<^sub>1). return_node m\<close> \<open>ms\<^sub>1' = tl ms\<^sub>1\<close> \<open>ms\<^sub>1 \<noteq> []\<close> \<open>tl ms\<^sub>1 \<noteq> []\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set (tl ms\<^sub>1)) return_node
ms\<^sub>1' = tl ms\<^sub>1
ms\<^sub>1 \<noteq> []
tl ms\<^sub>1 \<noteq> []
[PROOF STEP]
have "\<forall>m\<in>set (tl ms\<^sub>1'). return_node m"
[PROOF STATE]
proof (prove)
using this:
Ball (set (tl ms\<^sub>1)) return_node
ms\<^sub>1' = tl ms\<^sub>1
ms\<^sub>1 \<noteq> []
tl ms\<^sub>1 \<noteq> []
goal (1 subgoal):
1. Ball (set (tl ms\<^sub>1')) return_node
[PROOF STEP]
by(cases ms\<^sub>1)(auto,cases ms\<^sub>1',auto)
[PROOF STATE]
proof (state)
this:
Ball (set (tl ms\<^sub>1')) return_node
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from obs_eq[OF this]
[PROOF STATE]
proof (chain)
picking this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
have "obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. \<And>a s s' Q p f' ms S ms'. \<lbrakk>pred (kind a) s; transfer (kind a) s = s'; valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f'; \<exists>m\<in>set (tl ms). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms)) return_node; length ms = length s; length s = Suc (length s'); s' \<noteq> []; hd ms = sourcenode a; hd (tl ms) = targetnode a; ms' = tl ms; Ball (set ms) valid_node; Ball (set ms\<^sub>2) valid_node; length ms = length s; length ms\<^sub>2 = length s\<^sub>2; s \<noteq> []; s\<^sub>2 \<noteq> []; ms = msx @ mx # tl ms\<^sub>2; get_proc mx = get_proc (hd ms\<^sub>2); \<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>); Ball (set (tl ms)) return_node; \<forall>i<length ms\<^sub>2. snd (s ! (length msx + i)) = snd (s\<^sub>2 ! i); \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s ! (length msx + i)) V = fst (s\<^sub>2 ! i) V; obs ms \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>; Ball (set (tl ms')) return_node \<Longrightarrow> obs ms' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> ((ms', s'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
proof(cases msx)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
msx = []
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>ms\<^sub>1 = msx@mx#tl ms\<^sub>2\<close> \<open>hd ms\<^sub>1 = sourcenode a\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = []
[PROOF STEP]
have "mx = sourcenode a" and "tl ms\<^sub>1 = tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = []
goal (1 subgoal):
1. mx = sourcenode a &&& tl ms\<^sub>1 = tl ms\<^sub>2
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
mx = sourcenode a
tl ms\<^sub>1 = tl ms\<^sub>2
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
mx = sourcenode a
tl ms\<^sub>1 = tl ms\<^sub>2
[PROOF STEP]
have False
[PROOF STATE]
proof (prove)
using this:
\<exists>m\<in>set (tl ms\<^sub>1). \<exists>m'. call_of_return_node m m' \<and> m' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
mx = sourcenode a
tl ms\<^sub>1 = tl ms\<^sub>2
goal (1 subgoal):
1. False
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
False
goal (2 subgoals):
1. msx = [] \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
2. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
False
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
case (Cons mx' msx')
[PROOF STATE]
proof (state)
this:
msx = mx' # msx'
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>ms\<^sub>1 = msx@mx#tl ms\<^sub>2\<close> \<open>hd ms\<^sub>1 = sourcenode a\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = mx' # msx'
[PROOF STEP]
have [simp]:"mx' = sourcenode a" and [simp]:"tl ms\<^sub>1 = msx'@mx#tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
hd ms\<^sub>1 = sourcenode a
msx = mx' # msx'
goal (1 subgoal):
1. mx' = sourcenode a &&& tl ms\<^sub>1 = msx' @ mx # tl ms\<^sub>2
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
mx' = sourcenode a
tl ms\<^sub>1 = msx' @ mx # tl ms\<^sub>2
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>ms\<^sub>1' = tl ms\<^sub>1\<close>
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1' = tl ms\<^sub>1
[PROOF STEP]
have "ms\<^sub>1' = msx'@mx#tl ms\<^sub>2"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1' = tl ms\<^sub>1
goal (1 subgoal):
1. ms\<^sub>1' = msx' @ mx # tl ms\<^sub>2
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ms\<^sub>1' = msx' @ mx # tl ms\<^sub>2
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>ms\<^sub>1 = msx@mx#tl ms\<^sub>2\<close> \<open>\<forall>m\<in>set (tl ms\<^sub>1). return_node m\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
Ball (set (tl ms\<^sub>1)) return_node
msx = mx' # msx'
ms\<^sub>1' = msx' @ mx # tl ms\<^sub>2
[PROOF STEP]
have "\<forall>m\<in>set (tl ms\<^sub>1'). return_node m"
[PROOF STATE]
proof (prove)
using this:
ms\<^sub>1 = msx @ mx # tl ms\<^sub>2
Ball (set (tl ms\<^sub>1)) return_node
msx = mx' # msx'
ms\<^sub>1' = msx' @ mx # tl ms\<^sub>2
goal (1 subgoal):
1. Ball (set (tl ms\<^sub>1')) return_node
[PROOF STEP]
by(cases msx') auto
[PROOF STATE]
proof (state)
this:
Ball (set (tl ms\<^sub>1')) return_node
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = mx' # msx'
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx' + i)) = snd (s\<^sub>2 ! i)"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1 ! (length msx + i)) = snd (s\<^sub>2 ! i)
msx = mx' # msx'
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx' + i)) = snd (s\<^sub>2 ! i)
[PROOF STEP]
by auto(case_tac i,auto,cases msx',auto)
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx' + i)) = snd (s\<^sub>2 ! i)
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
from \<open>\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)).
(fst (s\<^sub>1 ! (length msx + i))) V = (fst (s\<^sub>2 ! i)) V\<close>
\<open>length ms\<^sub>2 = length s\<^sub>2\<close> \<open>s\<^sub>2 \<noteq> []\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
length ms\<^sub>2 = length s\<^sub>2
s\<^sub>2 \<noteq> []
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
length ms\<^sub>2 = length s\<^sub>2
s\<^sub>2 \<noteq> []
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1' ! length msx')) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
proof(cases msx')
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. msx' = [] \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
2. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
msx' = []
goal (2 subgoals):
1. msx' = [] \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
2. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
with \<open>\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V\<close>
\<open>msx = mx'#msx'\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
msx' = []
[PROOF STEP]
have rv:"\<forall>V\<in>rv S (CFG_node mx). fst cfx\<^sub>1 V = fst cf\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
msx' = []
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst cfx\<^sub>1 V = fst cf\<^sub>2 V
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst cfx\<^sub>1 V = fst cf\<^sub>2 V
goal (2 subgoals):
1. msx' = [] \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
2. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
from Nil \<open>tl ms\<^sub>1 = msx'@mx#tl ms\<^sub>2\<close> \<open>hd (tl ms\<^sub>1) = targetnode a\<close>
[PROOF STATE]
proof (chain)
picking this:
msx' = []
tl ms\<^sub>1 = msx' @ mx # tl ms\<^sub>2
hd (tl ms\<^sub>1) = targetnode a
[PROOF STEP]
have [simp]:"mx = targetnode a"
[PROOF STATE]
proof (prove)
using this:
msx' = []
tl ms\<^sub>1 = msx' @ mx # tl ms\<^sub>2
hd (tl ms\<^sub>1) = targetnode a
goal (1 subgoal):
1. mx = targetnode a
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
mx = targetnode a
goal (2 subgoals):
1. msx' = [] \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
2. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
from Cons
\<open>msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)\<close>
[PROOF STATE]
proof (chain)
picking this:
msx = mx' # msx'
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
[PROOF STEP]
obtain mx'' where "call_of_return_node mx mx''" and "mx'' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
msx = mx' # msx'
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
goal (1 subgoal):
1. (\<And>mx''. \<lbrakk>call_of_return_node mx mx''; mx'' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
call_of_return_node mx mx''
mx'' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. msx' = [] \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
2. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
hence "mx \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>"
[PROOF STATE]
proof (prove)
using this:
call_of_return_node mx mx''
mx'' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. mx \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
by(rule call_node_notin_slice_return_node_neither)
[PROOF STATE]
proof (state)
this:
mx \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (2 subgoals):
1. msx' = [] \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
2. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node mx).
(fst cfx\<^sub>1)(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node mx) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
fix V
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node mx) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
assume "V\<in>rv S (CFG_node mx)"
[PROOF STATE]
proof (state)
this:
V \<in> rv S (CFG_node mx)
goal (1 subgoal):
1. \<And>V. V \<in> rv S (CFG_node mx) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
show "(fst cfx\<^sub>1)(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V =
fst cf\<^sub>2 V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
proof(cases "V \<in> set (ParamDefs (targetnode a))")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. V \<in> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
2. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
V \<in> set (ParamDefs (targetnode a))
goal (2 subgoals):
1. V \<in> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
2. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>valid_edge a\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
V \<in> set (ParamDefs (targetnode a))
[PROOF STEP]
have "V \<in> Def (targetnode a)"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
V \<in> set (ParamDefs (targetnode a))
goal (1 subgoal):
1. V \<in> Def (targetnode a)
[PROOF STEP]
by(fastforce intro:ParamDefs_in_Def)
[PROOF STATE]
proof (state)
this:
V \<in> Def (targetnode a)
goal (2 subgoals):
1. V \<in> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
2. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>valid_edge a\<close>
[PROOF STATE]
proof (chain)
picking this:
valid_edge a
V \<in> Def (targetnode a)
[PROOF STEP]
have "V \<in> Def\<^bsub>SDG\<^esub> (CFG_node (targetnode a))"
[PROOF STATE]
proof (prove)
using this:
valid_edge a
V \<in> Def (targetnode a)
goal (1 subgoal):
1. V \<in> Def\<^bsub>SDG\<^esub> CFG_node (targetnode a)
[PROOF STEP]
by(auto intro!:CFG_Def_SDG_Def)
[PROOF STATE]
proof (state)
this:
V \<in> Def\<^bsub>SDG\<^esub> CFG_node (targetnode a)
goal (2 subgoals):
1. V \<in> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
2. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
from \<open>V\<in>rv S (CFG_node mx)\<close>
[PROOF STATE]
proof (chain)
picking this:
V \<in> rv S (CFG_node mx)
[PROOF STEP]
obtain as n'
where "targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
and "n' \<in> HRB_slice S" "V \<in> Use\<^bsub>SDG\<^esub> n'"
and "\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as)
\<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''"
[PROOF STATE]
proof (prove)
using this:
V \<in> rv S (CFG_node mx)
goal (1 subgoal):
1. (\<And>as n'. \<lbrakk>targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'; n' \<in> HRB_slice S; V \<in> Use\<^bsub>SDG\<^esub> n'; \<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(fastforce elim:rvE)
[PROOF STATE]
proof (state)
this:
targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'
n' \<in> HRB_slice S
V \<in> Use\<^bsub>SDG\<^esub> n'
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
goal (2 subgoals):
1. V \<in> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
2. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
from \<open>targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'\<close> \<open>n' \<in> HRB_slice S\<close>
\<open>mx \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
[PROOF STATE]
proof (chain)
picking this:
targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'
n' \<in> HRB_slice S
mx \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
[PROOF STEP]
obtain ax asx where "as = ax#asx"
[PROOF STATE]
proof (prove)
using this:
targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'
n' \<in> HRB_slice S
mx \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
goal (1 subgoal):
1. (\<And>ax asx. as = ax # asx \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(auto simp:intra_path_def)(erule path.cases,
auto dest:valid_SDG_node_in_slice_parent_node_in_slice
simp:SDG_to_CFG_set_def)
[PROOF STATE]
proof (state)
this:
as = ax # asx
goal (2 subgoals):
1. V \<in> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
2. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'\<close>
[PROOF STATE]
proof (chain)
picking this:
targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'
as = ax # asx
[PROOF STEP]
have "targetnode a = sourcenode ax" and "valid_edge ax"
[PROOF STATE]
proof (prove)
using this:
targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n'
as = ax # asx
goal (1 subgoal):
1. targetnode a = sourcenode ax &&& valid_edge ax
[PROOF STEP]
by(auto elim:path.cases simp:intra_path_def)
[PROOF STATE]
proof (state)
this:
targetnode a = sourcenode ax
valid_edge ax
goal (2 subgoals):
1. V \<in> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
2. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as)
\<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''\<close> \<open>as = ax#asx\<close> \<open>V \<in> Def\<^bsub>SDG\<^esub> (CFG_node (targetnode a))\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
as = ax # asx
V \<in> Def\<^bsub>SDG\<^esub> CFG_node (targetnode a)
targetnode a = sourcenode ax
valid_edge ax
[PROOF STEP]
have False
[PROOF STATE]
proof (prove)
using this:
\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes as) \<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n''
as = ax # asx
V \<in> Def\<^bsub>SDG\<^esub> CFG_node (targetnode a)
targetnode a = sourcenode ax
valid_edge ax
goal (1 subgoal):
1. False
[PROOF STEP]
by(fastforce simp:sourcenodes_def)
[PROOF STATE]
proof (state)
this:
False
goal (2 subgoals):
1. V \<in> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
2. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
False
goal (1 subgoal):
1. fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
goal (1 subgoal):
1. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
V \<notin> set (ParamDefs (targetnode a))
goal (1 subgoal):
1. V \<notin> set (ParamDefs (targetnode a)) \<Longrightarrow> fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
with \<open>V\<in>rv S (CFG_node mx)\<close> rv
[PROOF STATE]
proof (chain)
picking this:
V \<in> rv S (CFG_node mx)
\<forall>V\<in>rv S (CFG_node mx). fst cfx\<^sub>1 V = fst cf\<^sub>2 V
V \<notin> set (ParamDefs (targetnode a))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
V \<in> rv S (CFG_node mx)
\<forall>V\<in>rv S (CFG_node mx). fst cfx\<^sub>1 V = fst cf\<^sub>2 V
V \<notin> set (ParamDefs (targetnode a))
goal (1 subgoal):
1. fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
by(fastforce dest:fun_upds_notin[of _ _ "fst cfx\<^sub>1"])
[PROOF STATE]
proof (state)
this:
fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
goal (2 subgoals):
1. msx' = [] \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
2. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
with Nil \<open>msx = mx'#msx'\<close>
[PROOF STATE]
proof (chain)
picking this:
msx' = []
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
msx' = []
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst cfx\<^sub>1(ParamDefs (targetnode a) [:=] map (fst cf\<^sub>1) outs) V = fst cf\<^sub>2 V
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
case Cons
[PROOF STATE]
proof (state)
this:
msx' = a_ # list_
goal (1 subgoal):
1. \<And>a list. msx' = a # list \<Longrightarrow> \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
with \<open>\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V\<close>
\<open>msx = mx'#msx'\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
msx' = a_ # list_
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
msx' = a_ # list_
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1 ! length msx)) V = state_val s\<^sub>2 V\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
have "\<forall>V\<in>rv S (CFG_node mx). (fst (s\<^sub>1' ! length msx')) V = state_val s\<^sub>2 V"
[PROOF STATE]
proof (prove)
using this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1 ! length msx) V = state_val s\<^sub>2 V
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
by(cases msx') auto
[PROOF STATE]
proof (state)
this:
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>i < length ms\<^sub>2. \<forall>V \<in> rv S (CFG_node ((mx#tl ms\<^sub>2)!i)).
(fst (s\<^sub>1!(length msx + i))) V = (fst (s\<^sub>2!i)) V\<close> Cons
[PROOF STATE]
proof (chain)
picking this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
[PROOF STEP]
have "\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)).
(fst (s\<^sub>1' ! (length msx' + i))) V = (fst (s\<^sub>2 ! i)) V"
[PROOF STATE]
proof (prove)
using this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1 ! (length msx + i)) V = fst (s\<^sub>2 ! i) V
msx = mx' # msx'
\<forall>V\<in>rv S (CFG_node mx). fst (s\<^sub>1' ! length msx') V = state_val s\<^sub>2 V
goal (1 subgoal):
1. \<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length msx' + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
by clarsimp(case_tac i,auto)
[PROOF STATE]
proof (state)
this:
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length msx' + i)) V = fst (s\<^sub>2 ! i) V
goal (1 subgoal):
1. \<And>a list. msx = a # list \<Longrightarrow> ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
with \<open>\<forall>m\<in>set ms\<^sub>1'. valid_node m\<close> \<open>\<forall>m\<in>set ms\<^sub>2. valid_node m\<close>
\<open>length ms\<^sub>1' = length s\<^sub>1'\<close> \<open>length ms\<^sub>2 = length s\<^sub>2\<close>
\<open>ms\<^sub>1' = msx'@mx#tl ms\<^sub>2\<close> \<open>get_proc mx = get_proc (hd ms\<^sub>2)\<close>
\<open>\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)\<close>
\<open>\<forall>m\<in>set (tl ms\<^sub>1'). return_node m\<close> Cons \<open>get_proc mx = get_proc (hd ms\<^sub>2)\<close>
\<open>\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>\<close>
\<open>\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx' + i)) = snd (s\<^sub>2 ! i)\<close>
[PROOF STATE]
proof (chain)
picking this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
ms\<^sub>1' = msx' @ mx # tl ms\<^sub>2
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
Ball (set (tl ms\<^sub>1')) return_node
msx = mx' # msx'
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx' + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length msx' + i)) V = fst (s\<^sub>2 ! i) V
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Ball (set ms\<^sub>1') valid_node
Ball (set ms\<^sub>2) valid_node
length ms\<^sub>1' = length s\<^sub>1'
length ms\<^sub>2 = length s\<^sub>2
ms\<^sub>1' = msx' @ mx # tl ms\<^sub>2
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
msx \<noteq> [] \<longrightarrow> (\<exists>mx'. call_of_return_node mx mx' \<and> mx' \<notin> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>)
Ball (set (tl ms\<^sub>1')) return_node
msx = mx' # msx'
get_proc mx = get_proc (hd ms\<^sub>2)
\<forall>m\<in>set (tl ms\<^sub>2). \<exists>m'. call_of_return_node m m' \<and> m' \<in> \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
obs ms\<^sub>1' \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub> = obs ms\<^sub>2 \<lfloor>HRB_slice S\<rfloor>\<^bsub>CFG\<^esub>
\<forall>i<length ms\<^sub>2. snd (s\<^sub>1' ! (length msx' + i)) = snd (s\<^sub>2 ! i)
\<forall>i<length ms\<^sub>2. \<forall>V\<in>rv S (CFG_node ((mx # tl ms\<^sub>2) ! i)). fst (s\<^sub>1' ! (length msx' + i)) V = fst (s\<^sub>2 ! i) V
goal (1 subgoal):
1. ((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
[PROOF STEP]
by(auto intro!:WSI)
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
((ms\<^sub>1', s\<^sub>1'), ms\<^sub>2, s\<^sub>2) \<in> WS S
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 121726, "file": "HRB-Slicing_StaticInter_WeakSimulation", "length": 439}
|
"""
Fourier Reconstruction of RR-Lyrae Templates
--------------------------------------------
This figure demonstrates Fourier decomposition using RR-Lyrae templates
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_rrlyrae_templates
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Load the RR Lyrae template
templates = fetch_rrlyrae_templates()
x, y = templates['115r'].T
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(hspace=0)
kvals = [1, 3, 8]
subplots = [311, 312, 313]
for (k, subplot) in zip(kvals, subplots):
ax = fig.add_subplot(subplot)
# Use FFT to fit a truncated Fourier series
y_fft = np.fft.fft(y)
y_fft[k + 1:-k] = 0
y_fit = np.fft.ifft(y_fft).real
# plot the true value and the k-term reconstruction
ax.plot(np.concatenate([x, 1 + x]),
np.concatenate([y, y]), '--k', lw=2)
ax.plot(np.concatenate([x, 1 + x]),
np.concatenate([y_fit, y_fit]), color='gray')
label = "%i mode" % k
if k > 1:
label += 's'
ax.text(0.02, 0.1, label, ha='left', va='bottom',
transform=ax.transAxes)
if subplot == subplots[-1]:
ax.set_xlabel('phase')
else:
ax.xaxis.set_major_formatter(plt.NullFormatter())
if subplot == subplots[1]:
ax.set_ylabel('amplitude')
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlim(0, 2)
ax.set_ylim(1.1, -0.1)
plt.show()
|
{"hexsha": "d25d5d1b639ddf47bee65ea99d2b1306982647f2", "size": 2324, "ext": "py", "lang": "Python", "max_stars_repo_path": "book_figures/chapter10/fig_rrlyrae_reconstruct.py", "max_stars_repo_name": "larsmans/astroML", "max_stars_repo_head_hexsha": "01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-03T12:22:22.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-03T12:22:22.000Z", "max_issues_repo_path": "book_figures/chapter10/fig_rrlyrae_reconstruct.py", "max_issues_repo_name": "larsmans/astroML", "max_issues_repo_head_hexsha": "01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "book_figures/chapter10/fig_rrlyrae_reconstruct.py", "max_forks_repo_name": "larsmans/astroML", "max_forks_repo_head_hexsha": "01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-07-03T12:22:24.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-03T12:22:24.000Z", "avg_line_length": 31.8356164384, "max_line_length": 79, "alphanum_fraction": 0.6101549053, "include": true, "reason": "import numpy", "num_tokens": 587}
|
import os
import sys
import numpy as np
import pandas as pd
from pprint import pprint
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from utils import model_methods
from utils.data_utils import *
from utils.arguments import Arguments
from utils.mappings import Mappings, Labellers
from utils.samplers import SeqSamplerDataset
from va_transformers.va_transformers import TransformerWrapper, Decoder
from va_transformers.finetuning_wrapper import FinetuningWrapper
def main(args):
print('*' * 17, f'va-transformer summoned for {args.mode} with the following settings:', sep='\n')
pprint(vars(args), indent=2)
print('*' * 17)
# paths
d_items_path = os.path.join(args.data_root, "D_LABITEMS.csv")
train_path = os.path.join(args.data_root, "train_data.pkl")
val_path = os.path.join(args.data_root, "val_data.pkl")
mapping_path = os.path.join(args.data_root, "mappings.pkl")
ckpt_path = os.path.join(args.save_root, args.model_name + ".pt")
logs_path = os.path.join(args.logs_root, args.model_name)
train_tgt_path = os.path.join(args.data_root, "train_targets.pkl")
val_tgt_path = os.path.join(args.data_root, "val_targets.pkl")
params_path = os.path.join(args.model_root, args.pretrained_model)
# device
device = torch.device(args.device)
# fetch mappings
mappings_dict = fetch_mappings(mapping_path)
len_t_dict = len(mappings_dict['itemid2token'])
len_q_dict = len(mappings_dict['qname2qtoken'])
pad_token = args.pad_token
pad_quant_token = args.pad_quant_token if args.with_values else None
sos_token = sos_quant_token = eos_token = eos_quant_token = None
if args.specials == 'SOS':
sos_token = len_t_dict
sos_quant_token = len_q_dict if args.with_values else None
elif args.specials == 'EOS':
eos_token = len_t_dict
eos_quant_token = len_q_dict if args.with_values else None
elif args.specials == 'both':
sos_token = len_t_dict
sos_quant_token = len_q_dict if args.with_values else None
eos_token = len_t_dict + 1
eos_quant_token = (len_q_dict + 1) if args.with_values else None
mappings = Mappings(mappings_dict,
pad_token=pad_token,
sos_token=sos_token,
eos_token=eos_token,
pad_quant_token=pad_quant_token,
sos_quant_token=sos_quant_token,
eos_quant_token=eos_quant_token
)
print(f"[PAD] token is {mappings.pad_token}",
f"[SOS] token is {mappings.sos_token}",
f"[EOS] token is {mappings.eos_token}",
f"[PAD] quant token is {mappings.pad_quant_token}",
f"[SOS] quant token is {mappings.sos_quant_token}",
f"[EOS] quant token is {mappings.eos_quant_token}",
sep="\n")
# labellers
d_items_df = pd.read_csv(d_items_path, index_col='ITEMID', dtype={'ITEMID': str})
labeller = Labellers(mappings, d_items_df)
# fetch targets
with open(train_tgt_path, 'rb') as f:
x = pickle.load(f)
train_targets = {k: v[args.targets] for k, v in x['train_targets'].items()}
del x
with open(val_tgt_path, 'rb') as f:
x = pickle.load(f)
val_targets = {k: v[args.targets] for k, v in x['val_targets'].items()}
del x
# get tokens
data_train = fetch_data_as_torch(train_path, 'train_tokens')
data_val = fetch_data_as_torch(val_path, 'val_tokens')
# get quants
if bool(args.with_values):
quants_train = fetch_data_as_torch(train_path, 'train_quants')
quants_val = fetch_data_as_torch(val_path, 'val_quants')
else:
quants_train = None
quants_val = None
train_dataset = SeqSamplerDataset(data_train, args.seq_len, mappings, device,
quants=quants_train, targets=train_targets,
specials=args.specials,
align_sample_at=args.align_sample_at
)
val_dataset = SeqSamplerDataset(data_val, args.seq_len, mappings, device,
quants=quants_val, targets=val_targets,
specials=args.specials,
align_sample_at=args.align_sample_at
)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size_tr, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size_val, shuffle=True)
# for quick test run
if bool(args.toy_run):
train_loader = make_toy_loader(train_loader)
val_loader = make_toy_loader(val_loader)
# weighting with target propensities if necessary
weights = None
if args.clf_or_reg == 'clf':
def propensity_from(targets_dict):
return sum(targets_dict.values()) / len(targets_dict)
p = propensity_from(train_targets)
print(f"Train set positive class propensity is {p}")
if bool(args.weighted_loss):
weights = torch.tensor([p, 1 - p]).to(device)
# fetch model params
pretrained_ckpt = torch.load(params_path, map_location=device)
state_dict = pretrained_ckpt['model_state_dict']
# initialisation of model
model = TransformerWrapper(
num_tokens=mappings.num_tokens,
with_values=bool(args.with_values),
num_quant_tokens=mappings.num_quant_tokens,
max_seq_len=args.seq_len,
attn_layers=Decoder(
dim=args.attn_dim,
depth=args.attn_depth,
heads=args.attn_heads,
attn_dropout=args.attn_dropout,
ff_dropout=args.ff_dropout,
use_rezero=bool(args.use_rezero),
rotary_pos_emb=bool(args.rotary_pos_emb)
),
token_emb_dim=args.token_emb_dim,
quant_emb_dim=args.quant_emb_dim,
logit_head=args.logit_head,
va_transformer=bool(args.va_transformer)
)
# wrap model for finetuning
fit_model = FinetuningWrapper(model,
seq_len=args.seq_len,
load_from=args.load_from,
state_dict=state_dict,
clf_or_reg=args.clf_or_reg,
num_classes=args.num_classes,
clf_style=args.clf_style,
clf_dropout=args.clf_dropout,
clf_depth=args.clf_depth,
weight=weights)
fit_model.to(device)
print("base transformer specification:", fit_model.net, sep="\n")
print("clf specification:", fit_model.clf,
"clf style:", fit_model.clf_style,
sep="\n")
if bool(args.freeze_base):
print("Freezing base transformer parameters...")
for name, param in fit_model.named_parameters():
if 'clf' not in name:
param.requires_grad = False
else:
print("Base transformer parameters remaining unfrozen...")
if args.mode == "finetuning":
# initialise optimiser
optimizer = torch.optim.Adam(fit_model.parameters(), lr=args.learning_rate)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.scheduler_decay)
writer = SummaryWriter(log_dir=logs_path, flush_secs=args.writer_flush_secs)
training = model_methods.FinetuningMethods(fit_model, writer, clf_or_reg=args.clf_or_reg)
# write initial embeddings
if bool(args.write_initial_embeddings):
training.write_embeddings(-1, mappings, labeller, args.seq_len, device)
# training loop
best_val_loss = np.inf
early_stopping_counter = 0
for epoch in range(args.num_epochs):
# training and evaluation
training.train(train_loader, optimizer, epoch, grad_accum_every=args.grad_accum_every)
val_loss = training.evaluate(val_loader, epoch)
# tracking model classification metrics
if bool(args.predict_on_train):
training.predict(train_loader, epoch, device, prefix="train")
_, _, metrics = training.predict(val_loader, epoch, device, prefix="val")
# whether to checkpoint model
if val_loss < best_val_loss:
print("Saving checkpoint because best val_loss attained...")
torch.save({
'epoch': epoch,
'val_loss': val_loss,
'metrics': metrics,
'args': vars(args),
'model_state_dict': fit_model.state_dict(),
'optim_state_dict': optimizer.state_dict()
}, ckpt_path)
# track checkpoint's embeddings
if bool(args.write_best_val_embeddings):
training.write_embeddings(epoch, mappings, labeller, args.seq_len, device)
print("Checkpoint saved!\n")
best_val_loss = min(val_loss, best_val_loss)
early_stopping_counter = 0
else:
early_stopping_counter += 1
if early_stopping_counter == args.early_stopping_threshold:
print('early stopping threshold hit! ending training...')
break
scheduler.step()
# flushing writer
print(f'epoch {epoch} completed!', '\n')
print('flushing writer...')
writer.flush()
# write final embeddings
if bool(args.write_final_embeddings):
training.write_embeddings(args.num_epochs, mappings, labeller, args.seq_len, device)
writer.close()
print("training finished and writer closed!")
if bool(args.WARNING_TESTING):
print("\nWARNING TEST set in use!\n")
# load test set data
test_path = os.path.join(args.data_root, "test_data.pkl")
test_tgt_path = os.path.join(args.data_root, "test_targets.pkl")
data_test = fetch_data_as_torch(test_path, 'test_tokens')
if bool(args.with_values):
quants_test = fetch_data_as_torch(test_path, 'test_quants')
else:
quants_test = None
with open(test_tgt_path, 'rb') as f:
x = pickle.load(f)
test_targets = {k: v[args.targets] for k, v in x['test_targets'].items()}
del x
test_dataset = SeqSamplerDataset(data_test, args.seq_len, mappings, device,
targets=test_targets,
quants=quants_test,
specials=args.specials,
align_sample_at=args.align_sample_at)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size_tr, shuffle=True)
if bool(args.toy_run):
test_loader = make_toy_loader(test_loader)
# test the model at the checkpoint
params_path = os.path.join(args.model_root, args.model_name + '.pt')
print(f"loading state dict from checkpoint at {params_path}...")
checkpoint = torch.load(params_path, map_location=device)
states = checkpoint['model_state_dict']
fit_model.load_state_dict(states)
print(f"checkpoint loaded!")
writer = SummaryWriter(log_dir=logs_path, flush_secs=args.writer_flush_secs)
testing = model_methods.FinetuningMethods(fit_model, writer=writer, clf_or_reg=args.clf_or_reg)
# test the model...
val_losses = testing.evaluate(val_loader, epoch=0, prefix='re-val')
_, _, val_metrics = testing.predict(val_loader, epoch=0, device=device, prefix="re-val")
test_losses = testing.evaluate(test_loader, epoch=0, prefix='test')
_, _, test_metrics = testing.predict(test_loader, epoch=0, device=device, prefix="test")
# write results to auxiliary logs file for convenience
print("writing finetuning logs to central csv for convenience!")
central_logs_name = f'finetuning_{args.targets}_logs.csv'
central_logs_path = os.path.join(args.logs_root, central_logs_name)
if not os.path.isfile(central_logs_path):
with open(central_logs_path, 'w') as f:
if args.clf_or_reg == "clf":
f.write(f"model_name,pretrained_model,"
f"val_loss,test_loss,bal_acc_val,bal_acc_tst,roc_val,roc_tst\n")
else:
f.write(f"model_name,pretrained_model,"
f"val_loss,test_loss,mse_val,mse_tst,r2_val,r2_tst\n")
with open(central_logs_path, 'a') as f:
if args.clf_or_reg == "clf":
f.write(f"{args.model_name},{args.pretrained_model},{val_losses:.4f},{test_losses:.4f}"
f",{val_metrics['bal_acc']:.4f},{test_metrics['bal_acc']:.4f}"
f",{val_metrics['roc_auc']:.4f},{test_metrics['roc_auc']:.4f}\n")
else:
f.write(f"{args.model_name},{args.pretrained_model},{val_losses:.4f},{test_losses:.4f},"
f"{val_metrics['mse']:.4f},{test_metrics['mse']:.4f},"
f"{val_metrics['r2']:.4f},{test_metrics['r2']:.4f}\n")
print(f"metrics written to {central_logs_path}")
if __name__ == "__main__":
arguments = Arguments(mode='finetuning').parse()
# check output roots exist; if not, create...
if not os.path.exists(arguments.save_root):
os.mkdir(arguments.save_root)
if not os.path.exists(arguments.logs_root):
os.mkdir(arguments.logs_root)
# check that arguments are well-specified
if arguments.clf_or_reg == 'reg':
assert arguments.num_classes == 1, "if doing regression, num_classes for the clf_head must be 1!"
# run finetuning
print(f"mode is {arguments.mode}")
main(arguments)
|
{"hexsha": "6f573ac0cce8e0f857d4eb5e6f8f55851bbfb03d", "size": 14133, "ext": "py", "lang": "Python", "max_stars_repo_path": "finetuning.py", "max_stars_repo_name": "jfcann/va-transformer", "max_stars_repo_head_hexsha": "bbf04612770c95d38915f41045cf9f9acb5dad21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "finetuning.py", "max_issues_repo_name": "jfcann/va-transformer", "max_issues_repo_head_hexsha": "bbf04612770c95d38915f41045cf9f9acb5dad21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "finetuning.py", "max_forks_repo_name": "jfcann/va-transformer", "max_forks_repo_head_hexsha": "bbf04612770c95d38915f41045cf9f9acb5dad21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9338842975, "max_line_length": 105, "alphanum_fraction": 0.6161466072, "include": true, "reason": "import numpy", "num_tokens": 2996}
|
# -*-coding: utf-8-*-
from lightgbm import LGBMClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
from bayes_opt import BayesianOptimization
import numpy as np
def parm_format(parms, intdeal, middledeal, maxdeal):
'''
整理模型参数的格式,intdeal是int类参数的列表,middledeal是小数类参数,maxdeal是可大与1的整数类参数
如下面是lightgbm的格式:
# intdeal = ['max_bin','max_depth','max_drop','min_child_samples',
# 'min_child_weight','n_estimators','num_leaves','scale_pos_weight',
# 'subsample_for_bin','subsample_freq'] # int类参数
# middledeal = ['colsample_bytree','drop_rate','learning_rate',
# 'min_split_gain','skip_drop','subsample',''] # float, 只能在0,1之间
# maxdeal = ['reg_alpha','reg_lambda','sigmoid'] # float,且可以大于1
'''
for k in parms:
if k in intdeal:
parms[k] = int(parms[k])
elif k in maxdeal:
parms[k] = max(parms[k], 0)
elif k in middledeal:
parms[k] = max(min(parms[k], 1), 0)
return parms
class bayes_ops(object):
'''
object that implement Bayesian Optimization using BayesianOptimization
(https://github.com/fmfn/BayesianOptimization)
'''
def __init__(self, estimator, param_grid, cv, intdeal, middledeal, maxdeal, score_func,baseparms={}, num_iter=100, init_points=15, n_iter=25, acq='ucb', kappa=2.576, xi=0.0, gp_params={"alpha": 1e-5, "n_restarts_optimizer": 2}, others={}):
'''
estimator need to have fit function
'''
self.estimator = estimator
self.baseparms = baseparms
self.parms = param_grid
self.cv = cv
self.intdeal = intdeal
self.middledeal = middledeal
self.maxdeal = maxdeal
self.score_func = score_func
self.num_iter = num_iter
self.init_points = init_points
self.acq = acq
self.kappa = kappa
self.xi = xi
self.gp_params = gp_params
self.others = others
def est_eval(self, **parms):
parms = parm_format(parms, self.intdeal, self.middledeal, self.maxdeal)
for p in self.baseparms:
parms[f] = self.baseparms[p]
if len(self.others)>0:
for f in self.others:
parms[f] = self.others[f]
estmr = self.estimator(**parms)
score = cross_val_score(estmr, X=self.X, y=self.Y, scoring=self.score_func, cv=self.cv, verbose=0, pre_dispatch=1)
return np.array(score).mean()
def run(self, X,Y):
self.X = X
self.Y = Y
self.estmrBO = BayesianOptimization(self.est_eval,
self.parms
)
self.estmrBO.maximize(init_points=self.init_points, n_iter=self.num_iter, acq=self.acq, kappa=self.kappa, xi=self.xi,**self.gp_params)
self.baseparms = parm_format(self.estmrBO.max['params'], self.intdeal, self.middledeal, self.maxdeal)
if len(self.others)>0:
for f in self.others:
self.baseparms[f] = self.others[f]
|
{"hexsha": "74122080abbd045eeb2d4286a6c790d0499ce1c5", "size": 3111, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/optimize.py", "max_stars_repo_name": "kaiwang0112006/project_demo", "max_stars_repo_head_hexsha": "4067d245be5139aec236adf2179b5880df507cf0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/optimize.py", "max_issues_repo_name": "kaiwang0112006/project_demo", "max_issues_repo_head_hexsha": "4067d245be5139aec236adf2179b5880df507cf0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/optimize.py", "max_forks_repo_name": "kaiwang0112006/project_demo", "max_forks_repo_head_hexsha": "4067d245be5139aec236adf2179b5880df507cf0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9342105263, "max_line_length": 243, "alphanum_fraction": 0.612343298, "include": true, "reason": "import numpy", "num_tokens": 828}
|
[STATEMENT]
lemma ground_head: "ground s \<Longrightarrow> is_Sym (head s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ground s \<Longrightarrow> is_Sym (head s)
[PROOF STEP]
by (cases s rule: tm_exhaust_apps) (auto simp: is_Var_def)
|
{"llama_tokens": 92, "file": "Lambda_Free_RPOs_Lambda_Free_Term", "length": 1}
|
__pytorch_version__ = "1.3.0"
import logging
from datetime import timedelta
import numpy as np
import torch
import torch.distributed as dist
from ftlib.commlib.basic_commlib import BasicCommLib
from ftlib.commlib.commlib_status import CommLibStatus
class PyTorch(BasicCommLib):
def __init__(
self, grad_sync_timeout=10, max_try=30, port=12355, backend="gloo"
):
self.type = "pytorch"
self.grad_sync_timeout = grad_sync_timeout
self._max_try = max_try
self._port = port
self._backend = backend
self._timeout = timedelta(minutes=1)
@BasicCommLib.register_api
def grad_sync_done(self, *args, **kwargs):
model = None
if "model" in kwargs.keys():
model = kwargs["model"]
elif len(args) > 0:
model = args[0]
if model is None:
return CommLibStatus.FAIL
try:
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
except Exception as e:
logging.error(str(e))
return CommLibStatus.FAIL
return CommLibStatus.SUCCESS
@BasicCommLib.register_api
def allreduce(self, data, op="MEAN", *args, **kwargs):
data = torch.from_numpy(data) if isinstance(data, np.ndarray) else data
# torch.distributed.ReduceOp has no option for 'MEAN'
# so far, we only implemented 'MEAN'
reduce_op = dist.reduce_op.SUM
dist.all_reduce(data, op=reduce_op)
if op == "MEAN":
size = float(dist.get_world_size())
data /= size
@BasicCommLib.register_api
def broadcast(self, data, root_rank, *args, **kwargs):
data = torch.from_numpy(data) if isinstance(data, np.ndarray) else data
dist.broadcast(data, root_rank)
@BasicCommLib.register_api
def barrier(self, *args, **kwargs):
dist.barrier()
def rebuild(self, rank, size, master_addr):
if dist.is_initialized():
logging.info("aborting communicator")
self.abort_communicator()
assert not dist.is_initialized()
init_method = f"tcp://{master_addr}:{self._port}"
logging.info(
"initializing process group with "
+ f"backend={self._backend}, rank={rank}, world_size={size}, "
+ f"master_port={self._port}, master_addr={master_addr}"
)
dist.init_process_group(
backend=self._backend,
timeout=self._timeout,
world_size=size,
rank=rank,
init_method=init_method,
)
return dist.is_initialized()
def abort_communicator(self):
if dist.is_initialized():
dist.destroy_process_group()
|
{"hexsha": "07d1ba8e4c11b4c4b521bad0b92fd3d9c2f47e69", "size": 2861, "ext": "py", "lang": "Python", "max_stars_repo_path": "ftlib/commlib/pytorch/impl.py", "max_stars_repo_name": "terrytangyuan/ftlib", "max_stars_repo_head_hexsha": "7d2862dafe9d338d733300047b03c514d1893201", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ftlib/commlib/pytorch/impl.py", "max_issues_repo_name": "terrytangyuan/ftlib", "max_issues_repo_head_hexsha": "7d2862dafe9d338d733300047b03c514d1893201", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ftlib/commlib/pytorch/impl.py", "max_forks_repo_name": "terrytangyuan/ftlib", "max_forks_repo_head_hexsha": "7d2862dafe9d338d733300047b03c514d1893201", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4395604396, "max_line_length": 79, "alphanum_fraction": 0.6165676337, "include": true, "reason": "import numpy", "num_tokens": 655}
|
import numpy as np
import napari
from .utils import *
# Shift, Control, Alt, Meta, Up, Down, Left, Right, PageUp, PageDown, Insert,
# Delete, Home, End, Escape, Backspace, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10,
# F11, F12, Space, Enter, Tab
KEYS = {"focus_next": "]",
"focus_previous": "[",
"hide_others": "Control-Shift-A",
"reslice": "/",
"to_front": "Control-Shift-F",
"reset_view": "Control-Shift-R",
}
# Overwrite napari shortcut
@napari.Viewer.bind_key("Alt-Up", overwrite=True)
def z_up(viewer:"napari.Viewer"):
axes = "".join(viewer.dims.axis_labels)
i = axes.find("z")
if i < 0:
return None
else:
step = list(viewer.dims.current_step)
step[i] = min(step[i]+1, viewer.dims.nsteps[i]-1)
viewer.dims.current_step = step
return None
@napari.Viewer.bind_key("Alt-Down", overwrite=True)
def z_down(viewer:"napari.Viewer"):
axes = "".join(viewer.dims.axis_labels)
i = axes.find("z")
if i < 0:
return None
else:
step = list(viewer.dims.current_step)
step[i] = max(step[i]-1, 0)
viewer.dims.current_step = step
return None
__all__ = list(KEYS.keys())
def bind_key(func):
return napari.Viewer.bind_key(KEYS[func.__name__])(func)
@bind_key
def focus_next(viewer:"napari.Viewer"):
_change_focus(viewer, 1)
return None
@bind_key
def focus_previous(viewer:"napari.Viewer"):
_change_focus(viewer, -1)
return None
def _change_focus(viewer:"napari.Viewer", ind:int):
# assert one Shapes or Points layer is selected
selected_layer = get_a_selected_layer(viewer)
if not isinstance(selected_layer, (napari.layers.Shapes, napari.layers.Points)):
return None
# check if one shape/point is selected
selected_data = list(selected_layer.selected_data)
if len(selected_data) != 1:
return None
selected_data = selected_data[0]
# determine next/previous index/data to select
ndata = len(selected_layer.data)
next_to_select = (selected_data + ind) % ndata
next_data = np.atleast_2d(selected_layer.data[next_to_select])
# update camera
scale = selected_layer.scale
next_center = np.mean(next_data, axis=0) * scale
viewer.dims.current_step = list(next_data[0, :].astype(np.int64))
viewer.camera.center = next_center
zoom = viewer.camera.zoom
viewer.camera.events.zoom() # Here events are emitted and zoom changes automatically.
viewer.camera.zoom = zoom
selected_layer.selected_data = {next_to_select}
selected_layer._set_highlight()
return None
@bind_key
def hide_others(viewer:"napari.Viewer"):
"""
Make selected layers visible and others invisible.
"""
selected = viewer.layers.selection
visibility_old = [layer.visible for layer in viewer.layers]
visibility_new = [layer in selected for layer in viewer.layers]
if visibility_old != visibility_new:
for layer, vis in zip(viewer.layers, visibility_new):
layer.visible = vis
else:
for layer in viewer.layers:
layer.visible = True
@bind_key
def to_front(viewer:"napari.Viewer"):
"""
Let selected layers move to front.
"""
not_selected_index = [i for i, l in enumerate(viewer.layers)
if l not in viewer.layers.selection]
viewer.layers.move_multiple(not_selected_index, 0)
@bind_key
def reset_view(viewer:"napari.Viewer"):
"""
Reset translate/scale parameters to the initial value.
"""
for layer in viewer.layers.selection:
layer.translate -= (layer.translate - layer.metadata["init_translate"])
layer.scale = layer.metadata["init_scale"]
@bind_key
def reslice(viewer:"napari.Viewer"):
"""
2D Reslice with currently selected lines/paths and images.
"""
if viewer.dims.ndisplay == 3:
viewer.status = "Cannot reslice in 3D mode."
imglist = list(iter_selected_layer(viewer, "Image"))
ndim = np.unique([shape_layer.ndim for shape_layer
in iter_selected_layer(viewer, "Shapes")])
if len(ndim) > 1:
viewer.status = "Cannot crop using Shapes layers with different number of dimensions."
else:
ndim = ndim[0]
if ndim == viewer.dims.ndim == 3:
active_plane = [-3, -2, -1]
else:
active_plane = [-2, -1]
if len(imglist) == 0:
imglist = [front_image(viewer)]
paths = []
for shape_layer in iter_selected_layer(viewer, "Shapes"):
for shape, type_ in zip(shape_layer.data, shape_layer.shape_type):
if type_ in ("line", "path"):
paths.append((shape, shape_layer.scale)) # shape = float pixel
out = []
for path, shape_layer_scale in paths:
for layer in imglist:
factor = layer.scale[active_plane]/shape_layer_scale[active_plane]
dr = layer.translate[active_plane] / layer.scale[active_plane]
out_ = layer.data.reslice(path[:,active_plane]/factor - dr)
out.append(out_)
viewer.window._results.append(out)
return None
|
{"hexsha": "f89b74609a13d5d07a33d234f32d494938fafd39", "size": 5206, "ext": "py", "lang": "Python", "max_stars_repo_path": "impy/viewer/keybinds.py", "max_stars_repo_name": "hanjinliu/impy", "max_stars_repo_head_hexsha": "d35b21be7739c3073ae87486673af68b1cdb2853", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2021-07-19T13:42:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T13:32:15.000Z", "max_issues_repo_path": "impy/viewer/keybinds.py", "max_issues_repo_name": "hanjinliu/impy", "max_issues_repo_head_hexsha": "d35b21be7739c3073ae87486673af68b1cdb2853", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-04-10T11:58:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T12:42:47.000Z", "max_forks_repo_path": "impy/viewer/keybinds.py", "max_forks_repo_name": "hanjinliu/impy", "max_forks_repo_head_hexsha": "d35b21be7739c3073ae87486673af68b1cdb2853", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-19T13:34:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-01T09:51:29.000Z", "avg_line_length": 31.743902439, "max_line_length": 94, "alphanum_fraction": 0.6413753362, "include": true, "reason": "import numpy", "num_tokens": 1309}
|
"""Module with logic for a 1-D signal dataset."""
from typing import Callable, NamedTuple, Union
import matplotlib.pyplot as plt
import numpy as np
import torch
from .fourier_feature_models import FourierFeatureMLP
class SignalData(NamedTuple("FunctionData", [("x", torch.FloatTensor),
("y", torch.FloatTensor)])):
"""1-D Signal data with x and corresponding y values."""
def _get_limits(vals: Union[np.ndarray, torch.Tensor], stretch=1.1):
min_x, max_x = vals.min().item(), vals.max().item()
mid_x = 0.5 * (min_x + max_x)
min_x = mid_x + stretch * (min_x - mid_x)
max_x = mid_x + stretch * (max_x - mid_x)
return min_x, max_x
class SignalDataset:
"""Dataset consisting of 1-d signal data."""
def __init__(self, train_data: SignalData, val_data: SignalData):
"""Constructor.
Args:
train_data (SignalData): The x/y values for training
val_data (SignalData): The x/y values for validation
"""
self.train_x, self.train_y = train_data
self.val_x, self.val_y = val_data
self.x_lim = _get_limits(self.val_x)
self.y_lim = _get_limits(self.val_y)
@staticmethod
def create(signal: Callable[[np.ndarray], np.ndarray],
num_samples: int, sample_rate: int) -> "SignalDataset":
"""Creates a signal data using the provided signal function.
Description:
The signal function should handle values ranging from 0 to 2.
Args:
signal (Callable[[np.ndarray], np.ndarray]): This is a 1D signal
function.
num_samples (int): The number of samples to use for training.
sample_rate (int): The rate at which training samples are taken.
For example, if a rate of 8 is used then every
8th point will be used for training.
Returns:
SignalDataset: The constructed dataset
"""
x = np.linspace(0, 2,
num_samples * sample_rate,
endpoint=False).astype(np.float32)
y = signal(x)
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)
train_data = SignalData(torch.from_numpy(x[::sample_rate]),
torch.from_numpy(y[::sample_rate]))
val_data = SignalData(torch.from_numpy(x), torch.from_numpy(y))
return SignalDataset(train_data, val_data)
def plot(self, space_ax: plt.Axes, hidden_ax: plt.Axes,
model: FourierFeatureMLP, num_points: int,
colors: np.ndarray, max_hidden: int):
"""Plots a visualization of the model to the given axes.
Description:
This method will plot the top N activations from the
final layer of the MLP (as indicated by `max_hidden`)
multiplied by the slopes and shifted by the bias
appropriate to the final regressor, on `hidden_ax`. On
`space_ax` it will plot the reconstructed sample points
and the ground truth function.
Args:
space_ax (plt.Axes): A matplotlib Axes for the reconstruction
hidden_ax (plt.Axes): A matplotlib Axes for the hidden basis chart
model (FourierFeatureMLP): The model to visualize
num_points (int): The number of points to visualize
colors (np.ndarray): The colors to use per point
max_hidden (int): The maximum number of hidden units to display
"""
x_vals = torch.linspace(self.val_x[0, 0], self.val_x[-1, 0], num_points)
model.eval()
model.keep_activations = True
with torch.no_grad():
y_vals = model(x_vals.reshape(-1, 1)).reshape(-1)
y_vals = y_vals.cpu().numpy()
model.keep_activations = False
model.train()
slope = model.layers[-1].weight.data.detach().cpu().numpy().reshape(-1)
bias = model.layers[-1].bias.data.item()
activation = model.activations[-1]
activation_values = activation * slope[np.newaxis, :] + bias
activation_range = activation_values.max(0) - activation_values.min(0)
index = np.argsort(activation_range)[::-1]
index = index[:max_hidden]
cmap = plt.get_cmap("jet")
for rank, i in enumerate(index):
on_index = activation[:, i] > 0
act_x = x_vals
act_y = activation_values[:, i]
hidden_ax.plot(act_x, act_y, color=cmap(rank / max_hidden)[:3], zorder=1, label="h{:02d}".format(i))
act_x = x_vals[on_index]
act_y = act_y[on_index]
hidden_ax.scatter(act_x, act_y, color=colors[on_index], marker=".", zorder=2)
activation_values = activation_values[activation > 0]
hidden_ax.set_ylim(*_get_limits(activation_values))
hidden_ax.legend(loc="upper right", ncol=2)
space_ax.set_xlim(*self.x_lim)
space_ax.set_ylim(*self.y_lim)
space_ax.plot(self.val_x.numpy(), self.val_y.numpy(), "r-", label="val", zorder=1)
space_ax.plot(self.train_x.numpy(), self.train_y.numpy(), "go", label="train", zorder=2)
space_ax.scatter(x_vals.numpy(), y_vals, color=colors,
marker="P", label="pred", zorder=3)
space_ax.legend()
|
{"hexsha": "01bd169e170e801d3a808449d900f8ffc6ce6845", "size": 5406, "ext": "py", "lang": "Python", "max_stars_repo_path": "fourier_feature_nets/signal_dataset.py", "max_stars_repo_name": "matajoh/fourier_feature_nets", "max_stars_repo_head_hexsha": "784140f01464e34a0dd4b813c50d20c4c15a8a59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 88, "max_stars_repo_stars_event_min_datetime": "2021-11-24T09:22:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T20:34:51.000Z", "max_issues_repo_path": "fourier_feature_nets/signal_dataset.py", "max_issues_repo_name": "matajoh/fourier_feature_nets", "max_issues_repo_head_hexsha": "784140f01464e34a0dd4b813c50d20c4c15a8a59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-04T18:13:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T09:36:52.000Z", "max_forks_repo_path": "fourier_feature_nets/signal_dataset.py", "max_forks_repo_name": "matajoh/fourier_feature_nets", "max_forks_repo_head_hexsha": "784140f01464e34a0dd4b813c50d20c4c15a8a59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-11-27T13:48:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T20:35:24.000Z", "avg_line_length": 42.234375, "max_line_length": 112, "alphanum_fraction": 0.6011838698, "include": true, "reason": "import numpy", "num_tokens": 1233}
|
from __future__ import print_function
import argparse, os, copy
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import prismnet.model as arch
from prismnet.utils import log_print, metrics, datautils
def train(args, model, device, train_loader, criterion, optimizer):
model.train()
met = metrics.MLMetrics(objective='binary')
for batch_idx, (x0, y0) in enumerate(train_loader):
x, y = x0.float().to(device), y0.to(device).float()
if y0.sum() ==0 or y0.sum() ==args.batch_size:
continue
optimizer.zero_grad()
output = model(x)
loss = criterion(output, y)
prob = torch.sigmoid(output)
y_np = y.to(device='cpu', dtype=torch.long).detach().numpy()
p_np = prob.to(device='cpu').detach().numpy()
met.update(y_np, p_np,[loss.item()])
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
return met
def validate(args, model, device, test_loader, criterion):
model.eval()
y_all = []
p_all = []
l_all = []
with torch.no_grad():
for batch_idx, (x0, y0) in enumerate(test_loader):
x, y = x0.float().to(device), y0.to(device).float()
#if y0.sum() ==0:
# import pdb; pdb.set_trace()
output = model(x)
loss = criterion(output, y)
prob = torch.sigmoid(output)
y_np = y.to(device='cpu', dtype=torch.long).numpy()
p_np = prob.to(device='cpu').numpy()
l_np = loss.item()
y_all.append(y_np)
p_all.append(p_np)
l_all.append(l_np)
y_all = np.concatenate(y_all)
p_all = np.concatenate(p_all)
l_all = np.array(l_all)
met = metrics.MLMetrics(objective='binary')
met.update(y_all, p_all,[l_all.mean()])
return met, y_all, p_all
def inference(args, model, device, test_loader):
model.eval()
p_all = []
with torch.no_grad():
for batch_idx, (x0, y0) in enumerate(test_loader):
x, y = x0.float().to(device), y0.to(device).float()
output = model(x)
prob = torch.sigmoid(output)
p_np = prob.to(device='cpu').numpy()
p_all.append(p_np)
p_all = np.concatenate(p_all)
return p_all
def compute_saliency(args, model, device, test_loader, identity):
from prismnet.model import GuidedBackpropSmoothGrad
model.eval()
saliency_dir = datautils.make_directory(args.out_dir, "out/saliency")
saliency_path = os.path.join(saliency_dir, identity+'.sal')
# sgrad = SmoothGrad(model, device=device)
sgrad = GuidedBackpropSmoothGrad(model, device=device)
sal = ""
for batch_idx, (x0, y0) in enumerate(test_loader):
X, Y = x0.float().to(device), y0.to(device).float()
output = model(X)
prob = torch.sigmoid(output)
p_np = prob.to(device='cpu').detach().numpy().squeeze()
guided_saliency = sgrad.get_batch_gradients(X, Y)
# import pdb; pdb.set_trace()
N, NS, _, _ = guided_saliency.shape # (N, 101, 1, 5)
for i in range(N):
inr = batch_idx*args.batch_size + i
str_sal = datautils.mat2str(np.squeeze(guided_saliency[i]))
sal += "{}\t{:.6f}\t{}\n".format(inr, p_np[i], str_sal)
f = open(saliency_path,"w")
f.write(sal)
f.close()
print(saliency_path)
def compute_saliency_img(args, model, device, test_loader, identity):
from prismnet.model import GuidedBackpropSmoothGrad
from prismnet.utils import visualize
def saliency_img(X, mul_saliency, outdir="results"):
"""generate saliency image
Args:
X ([np.ndarray]): raw input(L x 5/4)
mul_saliency ([np.ndarray]): [description]
outdir (str, optional): [description]. Defaults to "results".
"""
if X.shape[-1]==5:
x_str = X[:,4:]
str_null = np.zeros_like(x_str)
ind =np.where(x_str == -1)[0]
str_null[ind,0]=1
ss = mul_saliency[:,:]
s_str = mul_saliency[:,4:]
s_str = (s_str - s_str.min())/(s_str.max() - s_str.min())
ss[:,4:] = s_str * (1-str_null)
str_null=np.squeeze(str_null).T
else:
str_null = None
ss = mul_saliency[:,:]
visualize.plot_saliency(
X.T,
ss.T,
nt_width=100,
norm_factor=3,
str_null=str_null,
outdir=outdir
)
prefix_n = len(str(len(test_loader.dataset)))
datautils.make_directory(args.out_dir, "out/imgs/")
imgs_dir = datautils.make_directory(args.out_dir, "out/imgs/"+identity)
imgs_path = imgs_dir+'/{:0'+str(prefix_n)+'d}_{:.3f}.pdf'
saliency_path = os.path.join(imgs_dir, 'all.sal')
# sgrad = SmoothGrad(model, device=device)
sgrad = GuidedBackpropSmoothGrad(model, device=device, magnitude=1)
for batch_idx, (x0, y0) in enumerate(test_loader):
X, Y = x0.float().to(device), y0.to(device).float()
output = model(X)
prob = torch.sigmoid(output)
p_np = prob.to(device='cpu').detach().numpy().squeeze()
guided_saliency = sgrad.get_batch_gradients(X, Y)
mul_saliency = copy.deepcopy(guided_saliency)
mul_saliency[:,:,:,:4] = guided_saliency[:,:,:,:4] * X[:,:,:,:4]
N, NS, _, _ = guided_saliency.shape # (N, 101, 1, 5)
sal = ""
for i in tqdm(range(N)):
inr = batch_idx*args.batch_size + i
str_sal = datautils.mat2str(np.squeeze(guided_saliency[i]))
sal += "{}\t{:.6f}\t{}\n".format(inr, p_np[i], str_sal)
img_path = imgs_path.format(inr, p_np[i])
# import pdb; pdb.set_trace()
saliency_img(
X[i,0].to(device='cpu').detach().numpy(),
mul_saliency[i,0].to(device='cpu').numpy(),
outdir=img_path)
if not os.path.exists(saliency_path):
f = open(saliency_path,"w")
f.write(sal)
f.close()
print(saliency_path)
def compute_high_attention_region(args, model, device, test_loader, identity):
from prismnet.model import GuidedBackpropSmoothGrad
har_dir = datautils.make_directory(args.out_dir, "out/har")
har_path = os.path.join(har_dir, identity+'.har')
L = 20
har = ""
# sgrad = SmoothGrad(model, device=device)
sgrad = GuidedBackpropSmoothGrad(model, device=device)
for batch_idx, (x0, y0) in enumerate(test_loader):
X, Y = x0.float().to(device), y0.to(device).float()
output = model(X)
prob = torch.sigmoid(output)
p_np = prob.to(device='cpu').detach().numpy().squeeze()
guided_saliency = sgrad.get_batch_gradients(X, Y)
attention_region = guided_saliency.sum(dim=3)[:,0,:].to(device='cpu').numpy() # (N, 101, 1)
N,NS = attention_region.shape # (N, 101)
for i in range(N):
inr = batch_idx*args.batch_size + i
iar = attention_region[i]
ar_score = np.array([ iar[j:j+L].sum() for j in range(NS-L+1)])
# import pdb; pdb.set_trace()
highest_ind = np.argmax(iar)
har += "{}\t{:.6f}\t{}\t{}\n".format(inr, p_np[i], highest_ind, highest_ind+L)
f = open(har_path,"w")
f.write(har)
f.close()
print(har_path)
|
{"hexsha": "67a78795ffee76d555d412ce07330f5f261efeee", "size": 7493, "ext": "py", "lang": "Python", "max_stars_repo_path": "prismnet/engine/train_loop.py", "max_stars_repo_name": "kuixu/PrismNet", "max_stars_repo_head_hexsha": "aef6f0bdfef765c2fd431762e27a35625d0bd2d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2020-06-17T13:31:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T12:02:39.000Z", "max_issues_repo_path": "prismnet/engine/train_loop.py", "max_issues_repo_name": "zhangqf-lab/PrismNet", "max_issues_repo_head_hexsha": "aef6f0bdfef765c2fd431762e27a35625d0bd2d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-15T12:03:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-30T00:28:30.000Z", "max_forks_repo_path": "prismnet/engine/train_loop.py", "max_forks_repo_name": "zhangqf-lab/PrismNet", "max_forks_repo_head_hexsha": "aef6f0bdfef765c2fd431762e27a35625d0bd2d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-06-17T13:37:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T19:57:07.000Z", "avg_line_length": 34.5299539171, "max_line_length": 99, "alphanum_fraction": 0.5820098759, "include": true, "reason": "import numpy", "num_tokens": 1983}
|
from copy import deepcopy
from pathlib import Path
import itertools
import time
import numpy as np
import tempfile
import os
from python.solver import *
from python.config import TORCHSCRIPT_MODEL_PATH
from python.deploy_model import *
SVCOMP_PATH = ""
SATCOMP18_PATH = ""
BENCHMARKS = [SVCOMP_PATH, SATCOMP18_PATH]
def mk_cadical_options():
RESULT = []
refocuses = [True]
cpu_lims = [5000]
query_intervals = [50000]
refocus_bases = [1000]
refocus_exps = [2]
refocus_ceils = [250000]
refocus_init_times = [15]
refocus_glue_suckss = [False]
refocus_glue_sucks_margins = [20]
refocus_reluctants = [False]
refocus_scales = [10000.0]
irrlims = [10e6]
BASELINE_OPTIONS = []
BASELINE_OPTIONS.append({
"refocus": False, "cpu_lim": 5000, "config": "sat"
})
for bm, clim, qi, rs, irrlim, refocus_base, refocus_exp, refocus_init_time, refocus_ceil, glue_sucks, glue_sucks_margin, refocus_reluctant in itertools.product(
refocuses, cpu_lims, query_intervals, refocus_scales, irrlims, refocus_bases, refocus_exps,
refocus_init_times, refocus_ceils, refocus_glue_suckss, refocus_glue_sucks_margins, refocus_reluctants):
mopts = {
"refocus": bm, "cpu_lim": clim, "query_interval": qi, "refocus_scale": rs, "irrlim": irrlim,
"refocus_base": refocus_base, "refocus_exp": refocus_exp, "refocus_init_time": refocus_init_time,
"refocus_ceil": refocus_ceil, "config": "sat", "refocus_glue_sucks": glue_sucks,
"refocus_glue_sucks_margin": glue_sucks_margin, "refocus_reluctant": refocus_reluctant
}
RESULT.append(mopts)
mopts2 = deepcopy(mopts)
mopts2["random_refocus"] = True
BASELINE_OPTIONS.append(mopts2)
return RESULT, BASELINE_OPTIONS
CADICAL_SOLVER_OPTIONS, CADICAL_BASELINE_OPTIONS = mk_cadical_options()
def get_name(path):
ckpt_name = Path(path).stem
model_name = Path(os.path.dirname(path)).stem
return model_name + "-" + ckpt_name
def _parse_main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--benchmarks", action = "store", dest = "benchmarks", nargs = "*", default = None)
parser.add_argument("cfg", action = "store", type = str)
parser.add_argument("ckpt", action = "store", type = str)
parser.add_argument("--n-workers", dest = "n_workers", action = "store", type = int, default = 8)
parser.add_argument("--root-dir", dest = "log_dir", action = "store", type = str)
parser.add_argument("--baseline", action = "store_true")
parser.add_argument("--no-neuro", dest = "no_neuro", action = "store_true")
parser.add_argument("--timeout", type = int, dest = "timeout", action = "store", default = 5000)
parser.add_argument("--mem-per-worker", type = int, dest = "mem_per_worker", action = "store", default = 1)
parser.add_argument("--glue-sucks", dest = "glue_sucks", action = "store_true")
parser.add_argument("--glue-sucks-margin", dest = "glue_sucks_margin", action = "store", default = 20)
parser.add_argument("--config", dest = "config", default = None, action = "store")
parser.add_argument("--refocus-reluctant", dest = "refocus_reluctant", action = "store_true")
parser.add_argument("--refocus-rebump", dest = "refocus_rebump", action = "store_true")
parser.add_argument("--refocus-restart", dest = "refocus_restart", action = "store_true")
parser.add_argument("--query-interval", dest = "query_interval", action = "store", type = float, default = 50000)
parser.add_argument("--refocus-ceil", dest = "refocus_ceil", action = "store", type = float, default = 250000)
parser.add_argument("--refocus-init-time", dest = "refocus_init_time", action = "store", type = int, default = 15)
parser.add_argument("--refocus-base", dest = "refocus_base", action = "store", type = int, default = 1000)
parser.add_argument("--refocus-exp", dest = "refocus_exp", action = "store", type = int, default = 2)
parser.add_argument("--elim-rel-eff", dest = "elim_rel_eff", action = "store", default = 1000, type = int)
parser.add_argument("--subsume-rel-eff", dest = "subsume_rel_eff", action = "store", default = 1000, type = int)
parser.add_argument("--rephase", dest = "rephase", action = "store", type = int, default = 1)
parser.add_argument("--stabilize-only", dest = "stabilize_only", default = 0, type = int)
parser.add_argument("--walk", dest = "walk", action = "store", default = 1, type = int)
opts = parser.parse_args()
root_dir = os.path.join(opts.log_dir, time.strftime("%Y%m%d-%H%M", time.localtime()))
opts.log_dir = os.path.join(root_dir, 'logs')
try:
assert opts.config == "sat" or opts.config == "unsat" or opts.config is None
except AssertionError as e:
print(e)
raise Exception("must specify --config as sat, unsat, or None")
return opts
def is_baseline(mopts):
return mopts.get("random_refocus", False) or mopts.get("branch_mode", 1) == 0 or (not mopts.get("refocus", True))
def _main(cfg, ckpt, n_workers, log_dir, benchmarks, baseline, no_neuro, timeout, mem_per_worker = None,
glue_sucks = False, config = None, refocus_reluctant = False, refocus_rebump = False, query_interval = 50000,
refocus_ceil = 250000, refocus_restart = True, glue_sucks_margin = 20, refocus_init_time = 15,
refocus_base = 1000, refocus_exp = 2, elim_rel_eff = 1000, subsume_rel_eff = 1000, rephase = True,
stabilize_only = False, walk = True):
seed = np.random.choice(int(1e4)) # use fixed seed throughout
if mem_per_worker == 0:
mem_per_worker = None
if benchmarks is None:
benchmarks = BENCHMARKS
name = get_name(ckpt)
DEPLOY_PATH = "/tmp/"
with tempfile.TemporaryDirectory(dir = DEPLOY_PATH) as tmpdir:
model_drat_path = os.path.join(tmpdir, name + "_drat.pt")
deploy_GNN1_drat(model_cfg_path = cfg, ckpt_path = ckpt, save_path = model_drat_path)
for cnfdir in benchmarks:
OPTIONS = CADICAL_SOLVER_OPTIONS if not baseline else CADICAL_SOLVER_OPTIONS + CADICAL_BASELINE_OPTIONS
if no_neuro:
OPTIONS = [{
"refocus": False, "cpu_lim": timeout, "config": config
}]
TMPOPTIONS = []
for mopt in OPTIONS:
mopt["refocus_base"] = refocus_base
mopt["refocus_exp"] = refocus_exp
mopt["refocus_restart"] = refocus_restart
mopt["query_interval"] = query_interval
mopt["refocus_ceil"] = refocus_ceil
mopt["refocus_rebump"] = refocus_rebump
mopt["refocus_reluctant"] = refocus_reluctant
mopt["refocus_glue_sucks"] = glue_sucks
mopt["refocus_glue_sucks_margin"] = glue_sucks_margin
mopt["refocus_init_time"] = refocus_init_time
mopt["subsume_rel_eff"] = subsume_rel_eff
mopt["elim_rel_eff"] = elim_rel_eff
mopt["rephase"] = bool(rephase)
mopt["stabilize_only"] = bool(stabilize_only)
mopt["walk"] = bool(walk)
# mopt["gpu"] = False
mopt["cpu_lim"] = timeout
mopt["seed"] = seed
mopt["config"] = config
mopt_drat = deepcopy(mopt)
mopt_drat["model"] = model_drat_path
TMPOPTIONS.append(mopt_drat)
OPTIONS = TMPOPTIONS
if mem_per_worker is not None:
worker_kwargs = {"num_cpus": 1, "num_gpus": 0, "memory": (mem_per_worker * 1024 * 1024 * 1024)}
else:
worker_kwargs = {"num_cpus": 1, "num_gpus": 0}
workers = [ray.remote(**worker_kwargs)(DummyWorker).remote() for _ in range(n_workers)]
for worker in workers:
pin_to_core(worker)
pool = ActorPool(workers)
for k, w in enumerate(workers):
ray.get(w.run_fn.remote((lambda: print(f"worker {k} online"))))
for i, mopts in enumerate(OPTIONS):
config_string = str(i)
benchmark_name = Path(os.path.dirname(os.path.dirname(cnfdir))).stem + "-" + Path(cnfdir).stem
shopts = {
"program_name": "cadical", "prog_alias": (f"n-cdl-{i}"), "benchmark": benchmark_name,
"prog_args": ""
}
solver_name = "cadical"
OUT_NAME = f"{solver_name}-{name}-run-{i}.json" if not is_baseline(
mopts) else f"{solver_name}-{name}-BASELINE-run-{i}.json"
out_path = os.path.join(log_dir, benchmark_name, OUT_NAME)
print("OUT PATH", out_path)
deploy_GNN1_drat(model_cfg_path = cfg, ckpt_path = ckpt, save_path = model_drat_path)
test_harness(cnfdir, mopts, shopts, out_path, pool = pool, exts = ["cnf", "gz"],
forbidden = ["bz2", "xz"], solver = solver_name)
for w in workers:
del w
del pool
if __name__ == "__main__":
cfg_dict = vars(_parse_main())
num_cpus = cfg_dict["n_workers"] + 1
try:
ray.init(address = "auto", redis_password = '5241590000000000')
except:
print("[WARNING] FALLING BACK ON SINGLE MACHINE RAY CLUSTER")
ray.init()
try:
_main(**cfg_dict)
except Exception as e:
print("CAUGHT EXCEPTION IN MAIN")
print(e)
pass
|
{"hexsha": "896d4a6cbd45a19e784166a1ac167e705396e3d8", "size": 9606, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/solver_eval.py", "max_stars_repo_name": "negotiatorvivian/neuro-cadical", "max_stars_repo_head_hexsha": "ea7d052a5e03b33d3fb8e4a47bcecf7b9f99551d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/solver_eval.py", "max_issues_repo_name": "negotiatorvivian/neuro-cadical", "max_issues_repo_head_hexsha": "ea7d052a5e03b33d3fb8e4a47bcecf7b9f99551d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/solver_eval.py", "max_forks_repo_name": "negotiatorvivian/neuro-cadical", "max_forks_repo_head_hexsha": "ea7d052a5e03b33d3fb8e4a47bcecf7b9f99551d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.7428571429, "max_line_length": 164, "alphanum_fraction": 0.6275244639, "include": true, "reason": "import numpy", "num_tokens": 2539}
|
import pandas as pd
import numpy as np
DATA_PATH = "./data/EVconsumption/"
d1 = pd.read_csv(DATA_PATH + "data_1_selected.csv")
d1.head()
ids = np.unique(d1['trip_id'])
N = len(ids)
N_train = int(N * 0.7)
N_val = int(N * 0.8)
ids_train = ids[:N_train]
ids_val = ids[N_train:N_val]
ids_test = ids[N_val:]
data_train = d1[d1['trip_id'].isin(ids_train)]
data_val = d1[d1['trip_id'].isin(ids_val)]
data_test = d1[d1['trip_id'].isin(ids_test)]
data_train.shape, data_val.shape, data_test.shape
X_labels = ['speed', 'speed_limit', 'speed_avg_week', 'speed_avg_time', 'speed_avg_week_time', 'speed_avg', 'seconds', 'air_temperature',
'wind_direction', 'wind_speed_ms', 'segangle',
'time', 'weekend',
'drifting', 'dry', 'fog', 'freezing', 'none', 'snow', 'thunder', 'wet',
'living_street', 'motorway', 'motorway_link', 'primary', 'residential',
'secondary', 'secondary_link', 'service', 'tertiary',
'track', 'trunk', 'trunk_link', 'unclassified', 'unpaved']
y_labels = ['trip_id', 'trip_segmentno', 'segmentkey',
'segmentid', 'ev_kwh']
data_train[X_labels].to_csv(DATA_PATH + "X_train.csv", index=False)
data_val[X_labels].to_csv(DATA_PATH + "X_val.csv", index=False)
data_test[X_labels].to_csv(DATA_PATH + "X_test.csv", index=False)
data_train[y_labels].to_csv(DATA_PATH + "y_train.csv", index=False)
data_val[y_labels].to_csv(DATA_PATH + "y_val.csv", index=False)
data_test[y_labels].to_csv(DATA_PATH + "y_test.csv", index=False)
|
{"hexsha": "c8ece8236f88b31e77bdf2a070b4f5c75359cf71", "size": 1498, "ext": "py", "lang": "Python", "max_stars_repo_path": "prep_scripts/2_data_split.py", "max_stars_repo_name": "linas-p/EVDPEP", "max_stars_repo_head_hexsha": "2062e20ef784a76eebaf71ebbe4f9006cde5bbd5", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-10-05T14:02:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T07:59:06.000Z", "max_issues_repo_path": "prep_scripts/2_data_split.py", "max_issues_repo_name": "patrickiswwgp/EVDPEP", "max_issues_repo_head_hexsha": "2062e20ef784a76eebaf71ebbe4f9006cde5bbd5", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-10-31T14:41:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-31T16:23:45.000Z", "max_forks_repo_path": "prep_scripts/2_data_split.py", "max_forks_repo_name": "patrickiswwgp/EVDPEP", "max_forks_repo_head_hexsha": "2062e20ef784a76eebaf71ebbe4f9006cde5bbd5", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-23T07:59:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T09:09:03.000Z", "avg_line_length": 33.2888888889, "max_line_length": 137, "alphanum_fraction": 0.6842456609, "include": true, "reason": "import numpy", "num_tokens": 447}
|
# -*- coding: utf-8 -*-
import datetime
import pandas as pd
import numpy as np
from rqdatac.services.calendar import get_previous_trading_date
from rqdatac.validators import (
ensure_string_in,
ensure_order_book_id,
ensure_order_book_ids,
ensure_date_range,
ensure_list_of_string
)
from rqdatac.utils import (
int8_to_datetime_v,
int14_to_datetime_v,
int17_to_datetime_v,
int17_to_datetime,
today_int,
date_to_int8,
)
from rqdatac.client import get_client
from rqdatac.decorators import export_as_api
DAYBAR_FIELDS = [
"close", "volume", "total_turnover"
]
TICKBAR_FIELDS = [
"datetime", "close", "volume", "total_turnover", "bid_vol", "ask_vol"
]
def convert_bar_to_multi_df(data, dt_name, fields, convert_dt):
line_no = 0
dt_set = set()
obid_level = []
obid_slice_map = {}
for obid, d in data:
dts = d[dt_name]
dts_len = len(dts)
if dts_len == 0:
continue
obid_slice_map[obid] = slice(line_no, line_no + dts_len, None)
dt_set.update(dts)
line_no += dts_len
obid_level.append(obid)
if line_no == 0:
return
obid_idx_map = {o: i for i, o in enumerate(obid_level)}
obid_label = np.empty(line_no, dtype=object)
dt_label = np.empty(line_no, dtype=object)
arr = np.full((line_no, len(fields)), np.nan)
r_map_fields = {f: i for i, f in enumerate(fields)}
dt_arr_sorted = np.array(sorted(dt_set))
dt_level = convert_dt(dt_arr_sorted)
for obid, d in data:
dts = d[dt_name]
if len(dts) == 0:
continue
slice_ = obid_slice_map[obid]
for f, value in d.items():
if f == dt_name:
dt_label[slice_] = dt_arr_sorted.searchsorted(dts, side='left')
else:
arr[slice_, r_map_fields[f]] = value
obid_label[slice_] = [obid_idx_map[obid]] * len(dts)
try:
# func 'is_datetime_with_singletz_array' is the most time consuming part in multi_index constructing
# it is useless for our situation. skip it.
func_is_singletz = getattr(pd._libs.lib, 'is_datetime_with_singletz_array')
setattr(pd._libs.lib, 'is_datetime_with_singletz_array', lambda *args: True)
except AttributeError:
func_is_singletz = None
multi_idx = pd.MultiIndex(levels=[obid_level, dt_level], labels=[obid_label, dt_label],
names=('order_book_id', dt_name))
if func_is_singletz is not None:
# recovery
setattr(pd._libs.lib, 'is_datetime_with_singletz_array', func_is_singletz)
df = pd.DataFrame(data=arr, index=multi_idx, columns=fields)
return df
def get_ksh_daybar(order_book_ids, start_date, end_date, fields, duration=1, market="cn"):
data = get_client().execute(
"get_ksh_daybar", order_book_ids, start_date, end_date, fields, duration, market
)
data = [(obid, {k: np.frombuffer(*v) for k, v in d.items()}) for obid, d in data]
res = convert_bar_to_multi_df(data, 'date', fields, int8_to_datetime_v)
return res
def get_today_ksh_minbar(order_book_ids, date, fields, duration, market="cn"):
data = get_client().execute("get_today_ksh_minbar", order_book_ids, date, fields, duration, market)
return convert_bar_to_multi_df(data, "datetime", fields, int14_to_datetime_v)
def get_ksh_minbar(order_book_ids, start_date, end_date, fields, duration, market):
data = get_client().execute(
"get_ksh_minbar", order_book_ids, start_date, end_date, fields, duration, market
)
if data:
data = [(obid, {k: np.frombuffer(*v) for k, v in d.items()}) for obid, d in data]
df = convert_bar_to_multi_df(data, 'datetime', fields, int14_to_datetime_v)
else:
df = None
today = today_int()
if df is None:
history_latest_date = date_to_int8(get_previous_trading_date(today, market=market))
else:
history_latest_date = date_to_int8(df.index.get_level_values(1).max())
if history_latest_date >= end_date or start_date > today or history_latest_date >= today:
return df
live_df = get_today_ksh_minbar(order_book_ids, today, fields, duration, market)
if live_df is None:
return df
if df is None:
return live_df
df = pd.concat([df, live_df])
df.sort_index(inplace=True)
return df
def get_today_ksh_tick(order_book_id, date, fields, market="cn"):
data = get_client().execute("get_today_ksh_tick", order_book_id, date, market)
df = pd.DataFrame(data[0])
if df.empty:
return None
df = df[fields]
df.datetime = df.datetime.apply(int17_to_datetime)
df.set_index("datetime", inplace=True)
return df
def get_ksh_tickbar(order_book_id, start_date, end_date, fields, market):
order_book_id = ensure_order_book_id(order_book_id)
start_date, end_date = ensure_date_range(start_date, end_date, datetime.timedelta(days=3))
data = get_client().execute(
"get_ksh_tickbar", order_book_id, start_date, end_date, fields, market
)
today = today_int()
if data:
data = [(obid, {k: np.frombuffer(*v) for k, v in d.items()}) for obid, d in data]
df_list = []
for obid, d in data:
df = pd.DataFrame(d)
df_list.append(df)
df = pd.concat(df_list) # type: pd.DataFrame
df["datetime"] = int17_to_datetime_v(df["datetime"].values)
history_latest_date = date_to_int8(df.iloc[-1]["datetime"])
df.set_index("datetime", inplace=True)
else:
df = None
history_latest_date = date_to_int8(get_previous_trading_date(today, market=market))
if history_latest_date >= end_date or start_date > today or history_latest_date >= today:
return df
live_df = get_today_ksh_tick(order_book_id, today, fields, market=market)
if live_df is None:
return df
if df is None:
return live_df
return pd.concat([df, live_df])
@export_as_api
def get_ksh_auction_info(order_book_ids, start_date=None, end_date=None, frequency="1d", market="cn"):
"""获取科创板盘后数据
:param order_book_ids: 股票代码or股票代码列表, 如'000001.XSHE'
:param start_date: 开始日期
:param end_date: 结束日期
:param frequency: 默认为日线。日线使用 '1d', 分钟线 '1m' 快照 'tick' (Default value = "1d"),
:param market: (Default value = "cn")
:returns: pandas.DataFrame or None
"""
ensure_string_in(frequency, ("1d", "1m", "tick"), "frequency")
if frequency == "tick":
return get_ksh_tickbar(order_book_ids, start_date, end_date, TICKBAR_FIELDS, market)
order_book_ids = ensure_order_book_ids(order_book_ids)
start_date, end_date = ensure_date_range(start_date, end_date)
if frequency == "1d":
return get_ksh_daybar(order_book_ids, start_date, end_date, DAYBAR_FIELDS, 1, market)
return get_ksh_minbar(order_book_ids, start_date, end_date, DAYBAR_FIELDS, 1, market)
|
{"hexsha": "d18d9128f5bddf6e126d45b4dc9c87806f88a75e", "size": 6938, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/lib/python3.7/site-packages/rqdatac/services/ksh_auction_info.py", "max_stars_repo_name": "CatTiger/vnpy", "max_stars_repo_head_hexsha": "7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "venv/lib/python3.7/site-packages/rqdatac/services/ksh_auction_info.py", "max_issues_repo_name": "CatTiger/vnpy", "max_issues_repo_head_hexsha": "7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-21T02:42:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-21T02:42:32.000Z", "max_forks_repo_path": "venv/lib/python3.7/site-packages/rqdatac/services/ksh_auction_info.py", "max_forks_repo_name": "CatTiger/vnpy", "max_forks_repo_head_hexsha": "7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.864321608, "max_line_length": 109, "alphanum_fraction": 0.6742577112, "include": true, "reason": "import numpy", "num_tokens": 1893}
|
import numpy as np
import pandas as pd
import gc
from sklearn import metrics
from tqdm import tqdm
import torch
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from sklearn.model_selection import train_test_split
from datasets import WakeWordDataset, get_loaders
from model import SimpleRNN, SimpleCNN
def train(model, train_loader, loss_fn, optimizer, scheduler, epoch, tensorboard_writer, device):
epoch_train_loss = 0
train_metrics = 0.0
model.train()
for x, y in tqdm(train_loader, desc='training'):
x,y = x.to(device), y.to(device)
x = x.squeeze(1)
optimizer.zero_grad()
predictions = model(x)
# print(predictions)
predictions = torch.sigmoid(predictions)
# predictions = predictions.squeeze()
# y = y.squeeze()
loss = loss_fn(predictions, y)
# predictions = torch.sigmoid(predictions)
loss.backward()
optimizer.step()
if scheduler:
scheduler.step()
predictions = torch.round(torch.sigmoid(predictions))
predictions = predictions.detach().cpu().numpy()
y = y.detach().cpu().numpy()
f_score = metrics.f1_score(y, predictions)
train_metrics += f_score
tensorboard_writer.add_scalar("LR/train", optimizer.param_groups[0]["lr"], epoch)
tensorboard_writer.add_scalar("Loss/train", loss, epoch)
tensorboard_writer.add_scalar("F1Score/train", f_score, epoch)
epoch_train_loss += loss.item()
tqdm.write(f'Epoch {epoch}')
tqdm.write(f'Train Epoch loss {epoch_train_loss / len(train_loader)}')
tqdm.write(f'Train f1 score {train_metrics / len(train_loader)}')
return train_metrics / len(train_loader), epoch_train_loss / len(train_loader)
def evaluate(model, test_loader, loss_fn, epoch, tensorboard_writer, device):
model.eval()
test_metrics = 0.0
epoch_test_loss = 0.0
for x, y in tqdm(test_loader, desc='eval'):
x,y = x.to(device), y.to(device)
x = x.squeeze(1)
predictions = model(x)
# print(predictions)
# predictions = predictions.squeeze()
predictions = torch.sigmoid(predictions)
loss = loss_fn(predictions, y)
epoch_test_loss += loss.item()
tensorboard_writer.add_scalar("Loss/eval", loss, epoch)
predictions = torch.round(predictions)
predictions = predictions.detach().cpu().numpy()
y = y.detach().cpu().numpy()
f_score = metrics.f1_score(y, predictions)
tensorboard_writer.add_scalar("F1Score/eval", f_score, epoch)
test_metrics += f_score
tqdm.write(f'Epoch {epoch}')
tqdm.write(f'Eval Epoch loss {epoch_test_loss / len(test_loader)}')
tqdm.write(f'Eval f1 score {test_metrics / len(test_loader)}')
return test_metrics / len(test_loader), epoch_test_loss / len(test_loader)
def run_training(model, train_loader, test_loader, loss_fn, optimizer, scheduler, early_stop_patience: int = None, EPOCHS=100):
eval_f1_score = 0.0
max_loss = 1000
early_stopping = 0
writer = SummaryWriter()
device = torch.device('cuda')
# model.to(device)
for epoch in range(EPOCHS):
train_f1, train_loss = train(model, train_loader, loss_fn, optimizer, scheduler, epoch, writer, device)
test_f1, test_loss = evaluate(model, test_loader, loss_fn, epoch, writer, device)
if test_f1 > eval_f1_score:
eval_f1_score = test_f1
torch.save(model, 'wake_model_cnn.pth')
if test_loss < max_loss:
max_loss = test_loss
early_stopping = 0
else:
print('Early stopping', early_stopping)
early_stopping += 1
if early_stop_patience and early_stop_patience == early_stopping:
print('Early stopping stopped')
break
return eval_f1_score
if __name__ == '__main__':
torch.cuda.empty_cache()
gc.collect()
import time
time.sleep(1.0)
model = SimpleCNN()
df = pd.read_csv('data/upsampled_data.csv')
train_loader, test_loader = get_loaders(df, batch_size=64)
device = torch.device('cuda')
model.to(device)
loss_fn = torch.nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
run_training(model, train_loader, test_loader, loss_fn, optimizer, scheduler=scheduler, early_stop_patience=50, EPOCHS=30)
|
{"hexsha": "c7ce6e7f94f69bdde5afd6d8207387688afb048f", "size": 4584, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "streamride/wakeworddetection", "max_stars_repo_head_hexsha": "da162a93c90d0c139293f4479da6ed44897f492e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-29T07:13:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T11:00:34.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "streamride/wakeworddetection", "max_issues_repo_head_hexsha": "da162a93c90d0c139293f4479da6ed44897f492e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "streamride/wakeworddetection", "max_forks_repo_head_hexsha": "da162a93c90d0c139293f4479da6ed44897f492e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9677419355, "max_line_length": 127, "alphanum_fraction": 0.6570680628, "include": true, "reason": "import numpy", "num_tokens": 1062}
|
from pathlib import Path
import cv2
import numpy as np
from medhack.dataset import CovidImageDataset
import albumentations as A
import pytorch_lightning as pl
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch.utils.data import DataLoader
import torch.distributed as dist
from medhack.distributed_sampler import WeightedDistributedRandomSampler
mean = 0.8184206354986654 # Gimme for Gregor
std = 0.03884859786640268 # Wait for Gregor
def gamma_transform(img, gamma, epsilon=1e-7):
_min = img.min()
_max = img.max()
_range = _max - _min
return np.power(((img - _min) / float(_range + epsilon)), gamma) * _range + _min
class OurGammaTransform(A.RandomGamma):
def apply(self, img, gamma=1, **params):
return gamma_transform(img, gamma=gamma)
class BasicDataModule(pl.LightningDataModule):
def __init__(self, root_dir: Path, gpu_num: int, num_workers: int = 8, batch_size: int = 32,
):
super(BasicDataModule, self).__init__()
train_csv = "train.csv"
val_csv = "valid.csv"
# Hyperparameters of Dataloader
self.gpu_num: int = gpu_num
self.batch_size: int = batch_size
self.shuffle: bool = True
self.drop_last: bool = False
self.persistent_workers: bool = True # Probably useful
self.num_workers = num_workers
self.pin_memory = True
train_transforms = A.Compose(
[
# A.Normalize(mean, std, max_pixel_value=1.0),
A.HorizontalFlip(),
A.OneOf(
[
A.Affine(scale=(0.75, 1.30), # 0.5 == 50% zoomed out
rotate=30,
shear=(10, 10),
interpolation=cv2.INTER_CUBIC,
mode=cv2.BORDER_REFLECT,
p=1.0,
),
A.RandomResizedCrop(
224, 224, scale=(0.2, 1.0), ratio=(0.75, 1.33), p=1.0,
),
A.NoOp(),
]
),
# old params
# A.Affine(scale=(0.75, 1.30), # 0.5 == 50% zoomed out
# rotate=30,
# shear=(10, 10),
# interpolation=cv2.INTER_CUBIC,
# mode=cv2.BORDER_REFLECT,
# p=0.9,
# ),
# albumentations.augmentations.transforms.ImageCompression
# A.ImageCompression(
# quality_lower=80,
# quality_upper=100,
# p=0.1,
# ),
# A.GridDropout(
# ratio=0.5,
# p=1.0,
# holes_number_x=4,
# holes_number_y=4,
# random_offset=True,
# ),
A.RandomBrightnessContrast(p=0.2, contrast_limit=0.1, brightness_limit=0.1, brightness_by_max=False),
# A.GaussNoise(var_limit=0.01, p=0.2),
# # # Insane Aug
OurGammaTransform(gamma_limit=(80, 120), p=0.2),
A.GaussianBlur(),
]
)
val_transforms = A.Compose(
[
# A.Normalize(mean, std, max_pixel_value=1.0),
# A.VerticalFlip(),
]
)
self.train_dataset = CovidImageDataset(train_csv, root_dir, train_transforms)
self.valid_dataset = CovidImageDataset(val_csv, root_dir, val_transforms)
if self.gpu_num == 1:
rank = 0
else:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = None
self.training_data_sampler = WeightedDistributedRandomSampler(
weights=self.train_dataset.get_data_weights(),
num_samples=len(self.train_dataset),
replacement=True,
rank=rank
)
def train_dataloader(self) -> TRAIN_DATALOADERS:
return DataLoader(
self.train_dataset,
self.batch_size,
shuffle=False,
sampler=self.training_data_sampler,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
drop_last=False,
persistent_workers=self.persistent_workers,
)
def test_dataloader(self) -> EVAL_DATALOADERS:
pass
def val_dataloader(self) -> EVAL_DATALOADERS:
return DataLoader(
self.valid_dataset,
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
drop_last=False,
persistent_workers=self.persistent_workers,
)
def predict_dataloader(self) -> EVAL_DATALOADERS:
pass
|
{"hexsha": "9fc70199894edf1cd92023cdc4fd7e2a95e0e319", "size": 5071, "ext": "py", "lang": "Python", "max_stars_repo_path": "medhack/data_loading.py", "max_stars_repo_name": "mibaumgartner/hackathon_health", "max_stars_repo_head_hexsha": "e3ab4971ecb4efd0e43c583104b8485c548320d5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "medhack/data_loading.py", "max_issues_repo_name": "mibaumgartner/hackathon_health", "max_issues_repo_head_hexsha": "e3ab4971ecb4efd0e43c583104b8485c548320d5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "medhack/data_loading.py", "max_forks_repo_name": "mibaumgartner/hackathon_health", "max_forks_repo_head_hexsha": "e3ab4971ecb4efd0e43c583104b8485c548320d5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8066666667, "max_line_length": 117, "alphanum_fraction": 0.5249457701, "include": true, "reason": "import numpy", "num_tokens": 1117}
|
def run_pyexocross():
import numpy as np
import argparse
import os
from .pyexocross import PyExocross
from .util import create_grid_res, convert_to_wavenumber
parser = argparse.ArgumentParser()
parser.add_argument("--linelist",type=str,dest="linelist",required=True)
parser.add_argument("--path",type=str,dest="path",required=True)
parser.add_argument("-T",type=float,dest="T",required=True)
parser.add_argument("-P",type=float,required=True)
parser.add_argument("--pressure-unit","-U",type=str,dest="U",default="bar",help="Units defined for pressure")
parser.add_argument("-b","--broadeners",dest="broadeners",nargs="+",help='Broadeners to include')
parser.add_argument("-r","--ratios",dest="ratios",nargs="+",type=float,help="corresponding ratios for each broadener. Default equally weighs them")
parser.add_argument("-n","--nworkers",dest="nworkers",type=int,default=2,help="Number of worker threads to spin up for voigt calculation")
parser.add_argument("-c","--chunk",type=int,default=100000,dest="chunk",help='How many transitions to read at a time')
parser.add_argument("-o",type=str,dest="output",help="Output filename")
parser.add_argument("-s",type=float,nargs="+",default=[0.1,10000], help='Spectral range')
parser.add_argument("--thresh",type=float,default=1e-30, help='Threshold for intensities (default: %(default)s cm/molecule)')
parser.add_argument("--wing",type=float,default=25.0, help='Voigt wing cutoff (default: %(default)s cm-1)')
parser.add_argument("-u","--spectra-units",type=str,default="k",dest="spectra_unit",help="Spectral units, must be parsable by astropy (Default: cm-1)")
parser.add_argument("-R","--resolution",dest="res",type=float,default=10000,help='Resolution (default: R=%(default)s)')
parser.add_argument("--plot", action='store_true', default=False)
args = parser.parse_args()
linelist = args.linelist.lower()
linelist_klass = None
if linelist in ('exomol',):
from .exomol import ExomolLinelist
linelist_klass = ExomolLinelist
print('Using ExoMol linelist')
elif linelist in ('hitran',):
from .hitran import HITRANLinelist
linelist_klass = HITRANLinelist
print('Using HITRAN linelist')
else:
raise ValueError(f'Unknown linelist {args.linelist}')
ll = linelist_klass(args.path)
print(f'Detected molecule is {ll.molecule}')
temperature = args.T
print(f'Temperature selected = {temperature} K')
pressure_unit = args.U
pressure_value = float(args.P)
if pressure_unit != 'bar':
from .util import conversion_factor
factor = conversion_factor(pressure_unit,'bar')
pressure_value*= factor
print(f'Pressure selected {args.P} {pressure_unit} -> {pressure_value} bar')
else:
print(f'Pressure selected {pressure_value} bar')
broadeners = args.broadeners
ratios = args.ratios
if broadeners is not None:
if ratios is None:
ratios = [1.0]*len(broadeners)
if linelist in ('hitran',):
for b,r in zip(broadeners,ratios):
if b.lower() == 'self':
ll.add_self_broadener(ratio=r)
elif b.lower() =='air':
ll.add_air_broadener(ratio=r)
else:
raise ValueError(f'HITRAN does not support broadener type {b}')
print(f'Including {b} broadener at ratio={r}')
if linelist in ('exomol',):
for b,r in zip(broadeners,ratios):
if b.lower() == 'default':
ll.add_default_broadener(ratio=r)
elif b in ll.availableBroadeners:
ll.add_available_broadener(b, ratio=r)
else:
raise ValueError(f'Exomol: Unknown {b} or file not found in linelist path')
print(f'Including {b} broadener at ratio={r}')
max_jobs = args.nworkers*10
R = args.res
grid = convert_to_wavenumber(create_grid_res(R,min(args.s),max(args.s))[:,0],args.spectra_unit)
grid = np.sort(grid)
print(f'Running on wavenumber grid {grid.min()}--{grid.max()} cm-1 at R={R}')
pyexo = PyExocross(ll)
wn,xsec = pyexo.compute_xsec_parallel(grid,temperature,pressure_value, chunksize=args.chunk, threshold=args.thresh, wing_cutoff=args.wing,
max_workers=args.nworkers,max_jobs=max_jobs)
# wn,xsec = pyexo.compute_xsec(grid,temperature,pressure_value, chunksize=args.chunk, threshold=args.thresh, wing_cutoff=args.wing)
output_folder = args.output
filename = f'{ll.molecule}_{temperature}K_{pressure_value}bar_R={R}.xsec'
out_filename = filename
if output_folder is not None and os.path.isdir(output_folder):
out_filename = os.path.join(output_folder,filename)
print(f'Writing output to {out_filename}')
np.savetxt(out_filename, np.vstack((wn,xsec)).T)
if args.plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(wn,xsec)
plt.xlabel(r'Wavenumber cm$^{-1}$')
plt.ylabel(r'Cross-section cm$^{2}$/molecule')
plt.show()
|
{"hexsha": "d566975a1b2e77dcad44a85a5e6cfa12a093ccb2", "size": 5287, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyexocross/run.py", "max_stars_repo_name": "ucl-exoplanets/pyexocross", "max_stars_repo_head_hexsha": "703341cd0fddafcbb04e935c89ddc9d02dda9f59", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyexocross/run.py", "max_issues_repo_name": "ucl-exoplanets/pyexocross", "max_issues_repo_head_hexsha": "703341cd0fddafcbb04e935c89ddc9d02dda9f59", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyexocross/run.py", "max_forks_repo_name": "ucl-exoplanets/pyexocross", "max_forks_repo_head_hexsha": "703341cd0fddafcbb04e935c89ddc9d02dda9f59", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-15T12:54:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-15T12:54:04.000Z", "avg_line_length": 41.9603174603, "max_line_length": 155, "alphanum_fraction": 0.6438433894, "include": true, "reason": "import numpy", "num_tokens": 1314}
|
import matplotlib.pyplot as plt
import glob
import numpy as np
def parse_RMSF_file(rmsf_file: str):
if glob.glob(rmsf_file):
return np.genfromtxt(rmsf_file,skip_header=1,usecols=1,dtype=float)
else:
print("File not found: ",rmsf_file)
return None
def rmsf(temp_rmsf_array,ax=None,title="",plotrange=0,num_res=0,barcolor="blue",threshold=0,res_offset=0):
"""
description
Parameters
----------
rmsf_array : NumPy array or str filename of array.
either the array itself or a filename containing the array
ax : matplotlib ax
matplotlib.Axes object to plot data on.
title : str
Title of the plot.
plotrange : int
x-axis maximum limit, usually the number of residues in the array.
num_res : int
number of residues in the array
barcolor : str
matplotlib.colors color name
Returns
-------
None
"""
if type(temp_rmsf_array) == str:
if glob.glob(temp_rmsf_array):
data = np.genfromtxt(temp_rmsf_array,skip_header=1,usecols=1,dtype=float)
else:
print("File not found: " + temp_rmsf_array)
return
elif type(temp_rmsf_array) != str:
data = temp_rmsf_array.copy()
if ax == None:
ax = plt.gca()
if plotrange != 0:
ax.set_ylim(0,plotrange)
if num_res != 0:
data = data[:num_res]
ax.set_title(title)
ax.set_xlim(0+res_offset,len(data)+1+res_offset)
ax.set_xlabel("Residue")
ax.set_ylabel(r"RMSF ($\AA$)")
if threshold != 0:
ax.bar(np.arange(1+res_offset,len(data)+1+res_offset,1),data,align="center",color="grey",alpha=0.5)
for i in range(len(data)):
if (data[i] < threshold) and (data[i] > -threshold):
data[i] = 0
ax.bar(np.arange(1,len(data)+1,1),data,align="center",color=barcolor)
|
{"hexsha": "ff0b335680c085268fe696fa707f2b3655f323c0", "size": 1916, "ext": "py", "lang": "Python", "max_stars_repo_path": "pydrachem/Subplots/rmsf.py", "max_stars_repo_name": "markahix/pydrachem", "max_stars_repo_head_hexsha": "56a55260bbcbb3759629a36625920f4094a49202", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pydrachem/Subplots/rmsf.py", "max_issues_repo_name": "markahix/pydrachem", "max_issues_repo_head_hexsha": "56a55260bbcbb3759629a36625920f4094a49202", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pydrachem/Subplots/rmsf.py", "max_forks_repo_name": "markahix/pydrachem", "max_forks_repo_head_hexsha": "56a55260bbcbb3759629a36625920f4094a49202", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0344827586, "max_line_length": 107, "alphanum_fraction": 0.610125261, "include": true, "reason": "import numpy", "num_tokens": 520}
|
# pylint: disable=C0103,W0102,R0914
import numpy as np
from omnizart.feature.cfp import extract_cfp
from omnizart.utils import get_logger
logger = get_logger("HCFP Feature")
def fetch_harmonic(data, cenf, ith_har, start_freq=27.5, num_per_octave=48, is_reverse=False):
ith_har += 1
if ith_har != 0 and is_reverse:
ith_har = 1 / ith_har
# harmonic_series = [12, 19, 24, 28, 31]
bins_per_note = int(num_per_octave / 12)
total_bins = int(bins_per_note * 88)
hid = min(range(len(cenf)), key=lambda i: abs(cenf[i] - ith_har*start_freq)) # noqa: E226
harmonic = np.zeros((total_bins, data.shape[1]))
upper_bound = min(len(cenf) - 1, hid + total_bins)
harmonic[:(upper_bound - hid)] = data[hid:upper_bound]
return harmonic
def extract_hcfp(
filename,
hop=0.02, # in seconds
win_size=7939,
fr=2.0,
g=[0.24, 0.6, 1],
bin_per_octave=48,
down_fs=44100,
max_sample=2000,
harmonic_num=6,
):
_, spec, gcos, ceps, cenf = extract_cfp(
filename,
hop=hop,
win_size=win_size,
fr=fr,
fc=1.0,
tc=1 / 22050,
g=g,
bin_per_octave=bin_per_octave,
down_fs=down_fs,
max_sample=max_sample,
)
har = []
logger.debug("Fetching harmonics of spectrum")
for i in range(harmonic_num + 1):
har.append(fetch_harmonic(spec, cenf, i))
har_s = np.transpose(np.array(har), axes=(2, 1, 0))
# Harmonic GCoS
har = []
logger.debug("Fetching harmonics of GCoS")
for i in range(harmonic_num + 1):
har.append(fetch_harmonic(gcos, cenf, i))
har_g = np.transpose(np.array(har), axes=(2, 1, 0))
# Harmonic cepstrum
har = []
logger.debug("Fetching harmonics of cepstrum")
for i in range(harmonic_num + 1):
har.append(fetch_harmonic(ceps, cenf, i, is_reverse=True))
har_c = np.transpose(np.array(har), axes=(2, 1, 0))
return har_s, har_g, har_c, cenf
|
{"hexsha": "cfd5d318799a7c14d55d91a92d2b0b50e2dab8c5", "size": 1972, "ext": "py", "lang": "Python", "max_stars_repo_path": "omnizart/feature/hcfp.py", "max_stars_repo_name": "nicolasanjoran/omnizart", "max_stars_repo_head_hexsha": "b0e74af39b2e3a312ef32dbf0837626b2e043cb6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1145, "max_stars_repo_stars_event_min_datetime": "2020-11-13T10:07:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T17:35:36.000Z", "max_issues_repo_path": "omnizart/feature/hcfp.py", "max_issues_repo_name": "nicolasanjoran/omnizart", "max_issues_repo_head_hexsha": "b0e74af39b2e3a312ef32dbf0837626b2e043cb6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2020-12-29T04:51:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T06:52:04.000Z", "max_forks_repo_path": "omnizart/feature/hcfp.py", "max_forks_repo_name": "nicolasanjoran/omnizart", "max_forks_repo_head_hexsha": "b0e74af39b2e3a312ef32dbf0837626b2e043cb6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 61, "max_forks_repo_forks_event_min_datetime": "2020-12-19T09:09:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T01:26:22.000Z", "avg_line_length": 26.2933333333, "max_line_length": 94, "alphanum_fraction": 0.6293103448, "include": true, "reason": "import numpy", "num_tokens": 639}
|
import numpy as np
import time
import torch
import torch.nn as nn
# from flair.parser.modules.dropout import SharedDropout
from torch.nn.modules.rnn import apply_permutation
from torch.nn.utils.rnn import PackedSequence
from torch.nn.utils.rnn import (pack_padded_sequence, pad_packed_sequence,
pad_sequence)
import flair.ner_dp_utils as utils
from flair.ner_dp_utils import get_shape, train_dataloader, eval_dataloader
import pdb
class BiaffineNERModel(nn.Module):
def __init__(self, config, model_sizes):
super().__init__()
self.config = config
self.device = torch.device(config['device'])
# self.context_embeddings = util.EmbeddingDictionary(config["context_embeddings"])
self.char_dict = utils.load_char_dict(config["char_vocab_path"])
self.char_emb_size = config["char_embedding_size"]
self.char_filter_widths = config['filter_widths']
self.char_filter_size = config['filter_size']
self.char_wordemb_size = len(self.char_filter_widths)*self.char_filter_size
self.char_embbedings = torch.nn.Embedding(num_embeddings=len(self.char_dict), embedding_dim=self.char_emb_size)
self.context_embeddings_size = model_sizes[0] + self.char_wordemb_size
self.eval_data = None # Load eval data lazily.
self.ner_types = self.config['ner_types']
self.ner_maps = {ner: (i + 1) for i, ner in enumerate(self.ner_types)}
self.num_types = len(self.ner_types)
self.dropout = self.config["dropout_rate"]
# self.lexical_Dropout = torch.nn.Dropout(p=config['lexical_dropout_rate'])
self.lstm_dropout = self.config["lstm_dropout_rate"]
self.lexical_Dropout = nn.Dropout(p=config['lexical_dropout_rate'])
self.lstm_input_size = self.context_embeddings_size
self.lstm_output_size = 2*self.config["contextualization_size"]
self.mlpx = projection(emb_size=self.context_embeddings_size,
output_size=self.lstm_output_size)
#char emb
self.char_emb_cnn = cnn(emb_size=self.char_emb_size, kernel_sizes=self.char_filter_widths, num_filter=self.char_filter_size)
self.rnn = BiLSTM_1(input_size=self.lstm_input_size,
hidden_size=self.config["contextualization_size"],
num_layers=config['contextualization_layers'],
dropout=self.lstm_dropout)
self.start_project = projection(emb_size=self.lstm_output_size,
output_size=self.config["ffnn_size"])
self.end_project = projection(emb_size=self.lstm_output_size,
output_size=self.config['ffnn_size'])
self.bilinear = bilinear_classifier(dropout=self.dropout,
input_size_x=self.config["ffnn_size"],
input_size_y=self.config["ffnn_size"],
output_size=self.num_types+1,
)
self.criterion = torch.nn.CrossEntropyLoss(reduction='none')
self.global_step = 0
self.batch_len = None
self.to(self.device)
def sequence_mask(self, lengths, maxlen, dtype=torch.bool):
if maxlen is None:
maxlen = lengths.max()
row = torch.range(0, maxlen-1).to(self.device )
matrix = torch.tensor(lengths).view(-1,1)
mask = matrix > row
mask.type(dtype)
return mask
def forward(self, batch, is_train=False):
"""compute score for each step"""
batch_tensors = batch[0]
tokens, context_word_emb, char_index, text_len, gold_labels = batch_tensors
n_sentences, max_sentence_length = tokens.shape[0], tokens.shape[1]
text_len_mask = self.sequence_mask(lengths=text_len, maxlen=max_sentence_length)
context_emb_list = []
context_emb_list.append(context_word_emb)
#TODO add char_emb
# pdb.set_trace()
char_emb = self.char_embbedings(torch.as_tensor(char_index, device=self.device, dtype=torch.int64))
_, _, max_char_len, self.char_emb_size = char_emb.shape
flattened_char_emb = char_emb.reshape([n_sentences * max_sentence_length, max_char_len, self.char_emb_size]).transpose_(1,2) # n_words, max_word_len, char_emb_size (N, L, C)->(N, C, L)
flattened_aggregated_char_emb = self.char_emb_cnn(flattened_char_emb)
aggregated_char_emb = flattened_aggregated_char_emb.reshape(n_sentences, max_sentence_length, flattened_aggregated_char_emb.shape[1])
context_emb_list.append(aggregated_char_emb)
# pdb.set_trace()
context_emb = torch.cat(context_emb_list, 2)
context_emb = self.lexical_Dropout(context_emb)
candidate_scores_mask = torch.logical_and(torch.unsqueeze(text_len_mask,dim=1),torch.unsqueeze(text_len_mask,dim=2))
candidate_scores_mask = torch.triu(candidate_scores_mask, diagonal=0)
flattened_candidate_scores_mask = candidate_scores_mask.view(-1)
# pdb.set_trace()
#----------through rnn------------
pack = pack_padded_sequence(context_emb, text_len, batch_first=True, enforce_sorted=False)
pack, _ = self.rnn(pack)
context_outputs, _ = pad_packed_sequence(pack, batch_first=True, total_length=context_emb.shape[1])
# context_outputs = self.mlpx(context_emb)
#--------biaffine----------------
candidate_starts_emb = self.start_project(context_outputs)
candidate_end_emb = self.end_project(context_outputs)
candidate_ner_scores = self.bilinear(candidate_starts_emb, candidate_end_emb)
candidate_ner_scores = candidate_ner_scores.reshape(-1,self.num_types+1)[flattened_candidate_scores_mask==True]
# pdb.set_trace()
if is_train:
loss = self.criterion(input=candidate_ner_scores, target=gold_labels)
loss = loss.sum()
else:
loss = 0
return loss, candidate_ner_scores
def get_pred_ner(self, sentences, span_scores, is_flat_ner): # span_scores: shape [num_sentence, max_sentence_length,max_sentence_length,types+1]
candidates = []
span_scores = span_scores.detach().cpu().numpy()
for sid,sent in enumerate(sentences):
for s in range(len(sent)):
for e in range(s,len(sent)):
candidates.append((sid,s,e))
top_spans = [[] for _ in range(len(sentences))]
for i, type in enumerate(np.argmax(span_scores,axis=1)): # span_scores (429,8), type: ner label index, i: sentence id.
if type > 0:
sid, s,e = candidates[i]
top_spans[sid].append((s,e,type,span_scores[i,type]))
top_spans = [sorted(top_span,reverse=True,key=lambda x:x[3]) for top_span in top_spans] # 对于每个句子中的所有spans,按分数排序
sent_pred_mentions = [[] for _ in range(len(sentences))]
for sid, top_span in enumerate(top_spans):
for ns,ne,t,_ in top_span:
for ts,te,_ in sent_pred_mentions[sid]: # ts, te 是之前记录的span start/end position,ns, ne必须与所有的 ts,te相容。
if ns < ts <= ne < te or ts < ns <= te < ne: # clash 发生,break, ns, ne不相容,看下一span.
#for both nested and flat ner no clash is allowed
break
if is_flat_ner and (ns <= ts <= te <= ne or ts <= ns <= ne <= te):
#for flat ner nested mentions are not allowed
break
else:
sent_pred_mentions[sid].append((ns,ne,t))
# pdb.set_trace()
pred_mentions = set((sid,s,e,t) for sid, spr in enumerate(sent_pred_mentions) for s,e,t in spr)
return pred_mentions
#------------------------------------------------------------
# for training
# def load_datasets(self, datatype='train'):
# if datatype=='train':
# self.train_dataloader = train_dataloader(config)
# self.batch_len = len(self.train_dataloader)
# elif datatype=='eval':
# self.eval_dataloader = eval_dataloader(config)
# else:
# pdb.set_trace()
# def step(self):
# if self.batch_len is not None:
# batch = self.train_dataloader[self.global_step%self.batch_len]
# loss_step = self.forward(batch)
# else:
# pdb.set_trace()
# self.global_step += 1
def evaluate(self, eval_dataloader, is_final_test=False):
# self.load_eval_data()
self.eval()
tp,fn,fp = 0,0,0
start_time = time.time()
num_words = 0
sub_tp,sub_fn,sub_fp = [0] * self.num_types,[0]*self.num_types, [0]*self.num_types
is_flat_ner = 'flat_ner' in self.config and self.config['flat_ner']
for batch_num, batch in enumerate(eval_dataloader.batches):
batch_tensor, batch_data = batch
# tokens, context_word_emb, char_index, text_len, gold_labels = batch_tensor
# pdb.set_trace()
_, candidate_ner_scores = self.forward(batch, is_train=False) # (439, 8)
num_words += sum(len(tok) for tok in batch_data['sentences'])
gold_ners = set([(sid,s,e, self.ner_maps[t]) for sid, ner in enumerate(batch_data['ners']) for s,e,t in ner]) # {(1, 3, 3, 4), (0, 0, 0, 4), (1, 10, 14, 6), (0, 3, 3, 3), (1, 20, 25, 3), (0, 5, 5, 3), (1, 20, 20, 3), (1, 22, 22, 3)}
pred_ners = self.get_pred_ner(batch_data["sentences"], candidate_ner_scores,is_flat_ner)
tp += len(gold_ners & pred_ners)
fn += len(gold_ners - pred_ners)
fp += len(pred_ners - gold_ners)
if is_final_test:
for i in range(self.num_types):
sub_gm = set((sid,s,e) for sid,s,e,t in gold_ners if t ==i+1)
sub_pm = set((sid,s,e) for sid,s,e,t in pred_ners if t == i+1)
sub_tp[i] += len(sub_gm & sub_pm)
sub_fn[i] += len(sub_gm - sub_pm)
sub_fp[i] += len(sub_pm - sub_gm)
if batch_num % 10 == 0:
print("Evaluated {}/{} examples.".format(batch_num + 1, len(eval_dataloader.batches)))
used_time = time.time() - start_time
print("Time used: %d second, %.2f w/s " % (used_time, num_words*1.0/used_time))
m_r = 0 if tp == 0 else float(tp)/(tp+fn)
m_p = 0 if tp == 0 else float(tp)/(tp+fp)
m_f1 = 0 if m_p == 0 else 2.0*m_r*m_p/(m_r+m_p)
print("Mention F1: {:.2f}%".format(m_f1*100))
print("Mention recall: {:.2f}%".format(m_r*100))
print("Mention precision: {:.2f}%".format(m_p*100))
if is_final_test:
print("****************SUB NER TYPES********************")
for i in range(self.num_types):
sub_r = 0 if sub_tp[i] == 0 else float(sub_tp[i]) / (sub_tp[i] + sub_fn[i])
sub_p = 0 if sub_tp[i] == 0 else float(sub_tp[i]) / (sub_tp[i] + sub_fp[i])
sub_f1 = 0 if sub_p == 0 else 2.0 * sub_r * sub_p / (sub_r + sub_p)
print("{} F1: {:.2f}%".format(self.ner_types[i],sub_f1 * 100))
print("{} recall: {:.2f}%".format(self.ner_types[i],sub_r * 100))
print("{} precision: {:.2f}%".format(self.ner_types[i],sub_p * 100))
summary_dict = {}
summary_dict["Mention F1"] = m_f1
summary_dict["Mention recall"] = m_r
summary_dict["Mention precision"] = m_p
return utils.make_summary(summary_dict), m_f1
#-----------modules------------------
#-----------bilinear-----------------
class Sparse_dropout(nn.Module):
def __init__(self, p):
super(Sparse_dropout, self).__init__()
self.dropout_rate = p
def forward(self, input, noise_shape):
if not self.training:
return input
shapes = input.shape
noise_shape = list(noise_shape)
broadcast_dims = []
# pdb.set_trace()
for idx, dim_pair in enumerate(zip(shapes, noise_shape)):
if dim_pair[1]>1:
broadcast_dims.append((idx, dim_pair[0]))
mask_dims = []
for dim in broadcast_dims:
mask_dims.append(dim[1])
mask = torch.bernoulli((torch.ones(mask_dims, device=input.device)*(1-self.dropout_rate)).reshape(noise_shape))*(1/(1-self.dropout_rate))
mask.to(input.dtype)
return input*mask
class bilinear_classifier(nn.Module):
def __init__(self, dropout, input_size_x, input_size_y, output_size, bias_x=True, bias_y=True):
super(bilinear_classifier, self).__init__()
# self.batch_size = batch_size
# self.bucket_size = bucket_size
# self.input_size = input_size
# pdb.set_trace()
# self.dropout_rate = 0
self.dropout_rate = dropout
self.output_size = output_size
self.dropout = Sparse_dropout(p=self.dropout_rate)
self.biaffine = biaffine_mapping(
input_size_x, input_size_y,
output_size, bias_x, bias_y,
)
def forward(self, x_bnv, y_bnv):
batch_size, input_size_x = x_bnv.shape[0], x_bnv.shape[-1]
input_size_y = y_bnv.shape[-1]
noise_shape_x = [batch_size, 1, input_size_x]
noise_shape_y = [batch_size, 1, input_size_y]
x = self.dropout(x_bnv, noise_shape_x)
y = self.dropout(y_bnv, noise_shape_y)
output = self.biaffine(x, y)
#TODO reshape output
if self.output_size == 1:
output = output.squeeze(-1)
return output
class biaffine_mapping(nn.Module):
def __init__(self, input_size_x, input_size_y, output_size, bias_x, bias_y, initializer=None):
super(biaffine_mapping, self).__init__()
self.bias_x = bias_x
self.bias_y = bias_y
self.output_size = output_size
self.initilizer = None
if self.bias_x:
input_size1 = input_size_x + 1
input_size2 = input_size_y + 1
self.biaffine_map = nn.Parameter(torch.Tensor(input_size1, output_size, input_size2))
self.initialize()
def initialize(self):
if self.initilizer == None:
torch.nn.init.orthogonal_(self.biaffine_map)
else:
self.initilizer(self.biaffine_map)
def forward(self, x, y):
batch_size, bucket_size = x.shape[0], x.shape[1]
if self.bias_x:
x = torch.cat([x, torch.ones([batch_size, bucket_size, 1], device=x.device)], axis=2)
if self.bias_y:
y = torch.cat([y, torch.ones([batch_size, bucket_size, 1], device=y.device)], axis=2)
#reshape
x_set_size, y_set_size = x.shape[-1], y.shape[-1]
# b,n,v1 -> b*n, v1
x = x.reshape(-1, x_set_size)
# # b,n,v2 -> b*n, v2
# y = y.reshape(-1, y_set_size)
biaffine_map = self.biaffine_map.reshape(x_set_size, -1) # v1, r, v2 -> v1, r*v2
# b, n, r*v2 -> b, n*r, v2
biaffine_mapping = (torch.matmul(x, biaffine_map)).reshape(batch_size, -1, y_set_size)
# (b, n*r, v2) bmm (b, n, v2) -> (b, n*r, n) -> (b, n, r, n)
biaffine_mapping = (biaffine_mapping.bmm(torch.transpose(y, 1, 2))).reshape(batch_size, bucket_size, self.output_size, bucket_size)
# (b, n, r, n) -> (b, n, n, r)
biaffine_mapping = biaffine_mapping.transpose(2, 3)
return biaffine_mapping
#------------ linear --------------------------------------
def projection(emb_size, output_size, initializer=None):
return ffnn(emb_size, 0, -1, output_size, dropout=0, output_weights_initializer=initializer)
class ffnn(nn.Module):
def __init__(self, emb_size, num_layers, hidden_size, output_size, dropout, output_weights_initializer=None):
super(ffnn, self).__init__()
self.dropout = torch.nn.Dropout(p=dropout)
self.weights = nn.Parameter(torch.Tensor(emb_size, output_size))
self.bias = nn.Parameter(torch.Tensor(output_size))
self.activation = torch.nn.ReLU()
self.num_layers = num_layers
self.emb_size = emb_size
self.hidden_size = hidden_size
self.output_size = output_size
self.initializer = output_weights_initializer
self.initialize()
def initialize(self):
if self.initializer == None:
torch.nn.init.xavier_uniform_(self.weights, gain=1)
else:
# pdb.set_trace()
self.initializer(self.weights, gain=1)
nn.init.zeros_(self.bias)
def forward(self, inputs):
# pdb.set_trace()
current_inputs = inputs
if len(get_shape(inputs))==3:
batch_size, seqlen, emb_size = get_shape(inputs)
current_inputs = inputs.reshape(batch_size*seqlen, emb_size)
emb_size = get_shape(current_inputs)[-1]
# if emb_size != self.emb_size:
# pdb.set_trace()
assert emb_size==self.emb_size,'last dim of input does not match this layer'
# if self.dropout is not None or self.dropout > 0:
# output = self.dropout(current_inputs)
#TODO num_layers>0 case.
outputs = current_inputs.matmul(self.weights) + self.bias
if len(get_shape(inputs))==3:
outputs = outputs.reshape(batch_size, seqlen, self.output_size)
return outputs
#--------------lstm ---------------------
class BiLSTM_1(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, dropout=None):
super(BiLSTM_1, self).__init__()
self.input_size = input_size #emb_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout_rate = dropout
self.f_cells = nn.ModuleList()
self.b_cells = nn.ModuleList()
for _ in range(self.num_layers):
self.f_cells.append(LstmCell(input_size, hidden_size, dropout))
self.b_cells.append(LstmCell(input_size, hidden_size, dropout))
input_size = 2*hidden_size
self.dropout = torch.nn.Dropout(p=dropout)
self.mlp = projection(emb_size=input_size, output_size=input_size)
# self.initialize()
def __repr__(self):
s = self.__class__.__name__ + '('
s += f"{self.input_size}, {self.hidden_size}"
if self.num_layers > 1:
s += f", num_layers={self.num_layers}"
if self.dropout_rate > 0:
s += f", dropout={self.dropout_rate}"
s += ')'
return s
def permute_hidden(self, hx, permutation):
if permutation is None:
return hx
h = apply_permutation(hx[0], permutation)
c = apply_permutation(hx[1], permutation)
return h, c
def layer_forward(self, x, hx, cell, batch_sizes, reverse=False):
hx_0 = hx_i = hx
hx_n, output = [], []
steps = reversed(range(len(x))) if reverse else range(len(x))
# if self.training:
# hid_mask = SharedDropout.get_mask(hx_0[0], self.dropout)
for t in steps:
last_batch_size, batch_size = len(hx_i[0]), batch_sizes[t]
if last_batch_size < batch_size:
hx_i = [torch.cat((h, ih[last_batch_size:batch_size]))
for h, ih in zip(hx_i, hx_0)]
else:
hx_n.append([h[batch_size:] for h in hx_i])
hx_i = [h[:batch_size] for h in hx_i]
# pdb.set_trace()
hx_i = [h for h in cell(x[t], hx_i)]
output.append(hx_i[0])
# if self.training:
# hx_i[0] = hx_i[0] * hid_mask[:batch_size]
if reverse:
hx_n = hx_i
output.reverse()
else:
hx_n.append(hx_i)
hx_n = [torch.cat(h) for h in zip(*reversed(hx_n))]
# pdb.set_trace()
output = torch.cat(output)
return output, hx_n
def forward(self, sequence, hx=None):
# pdb.set_trace()
x, batch_sizes = sequence.data, sequence.batch_sizes.tolist()
batch_size = batch_sizes[0]
h_n, c_n = [], []
if hx is None:
# pdb.set_trace()
h = self.f_cells[0].initial_state[0].repeat([batch_size, 1])
c = self.f_cells[0].initial_state[1].repeat([batch_size, 1])
h = torch.unsqueeze(torch.unsqueeze(h, 0), 0).repeat([self.num_layers, 2, 1, 1])
c = torch.unsqueeze(torch.unsqueeze(c, 0), 0).repeat([self.num_layers, 2, 1, 1])
else:
h, c = self.permute_hidden(hx, sequence.sorted_indices)
h = h.view(self.num_layers, 2, batch_size, self.hidden_size)
c = c.view(self.num_layers, 2, batch_size, self.hidden_size)
for i in range(self.num_layers):
current_input = x
x = torch.split(x, batch_sizes)
# if self.training:
# mask = SharedDropout.get_mask(x[0], self.dropout)
# x = [i * mask[:len(i)] for i in x]
x_f, (h_f, c_f) = self.layer_forward(x=x,
hx=(h[i,0], c[i,0]),
cell=self.f_cells[i],
batch_sizes=batch_sizes
)
x_b, (h_b, c_b) = self.layer_forward(x=x,
hx=(h[i, 1], c[i, 1]),
cell=self.b_cells[i],
batch_sizes=batch_sizes,
reverse=True)
h_n.append(torch.stack((h_f, h_b)))
c_n.append(torch.stack((c_f, c_b)))
text_outputs = torch.cat((x_f, x_b), -1)
text_outputs = self.dropout(text_outputs)
if i > 0:
# pdb.set_trace()
highway_gates = torch.sigmoid(self.mlp(text_outputs))
text_outputs = highway_gates*text_outputs + (1-highway_gates)*current_input
x = text_outputs
x = PackedSequence(x,
sequence.batch_sizes,
sequence.sorted_indices,
sequence.unsorted_indices)
hx = torch.cat(h_n, 0), torch.cat(c_n, 0)
hx = self.permute_hidden(hx, sequence.unsorted_indices)
return x, hx
class LstmCell(nn.Module):
def __init__(self, input_size, hidden_size, dropout=0):
super(LstmCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.dropout = torch.nn.Dropout(p=dropout)
self.mlp = projection(emb_size=input_size+hidden_size, output_size=3*hidden_size,
initializer=self._block_orthonormal_initializer(output_sizes=[hidden_size] * 3)
)
self.initial_cell_state = nn.Parameter(torch.Tensor(1, hidden_size))
self.initial_hidden_state = nn.Parameter(torch.Tensor(1, hidden_size))
self.initialize()
self._initial_state = (self.initial_cell_state, self.initial_hidden_state)
def initialize(self):
torch.nn.init.xavier_uniform_(self.initial_cell_state, gain=1)
torch.nn.init.xavier_uniform_(self.initial_hidden_state, gain=1)
def forward(self, inputs, states):
batch_size = get_shape(inputs)[0]
_dropout_mask = self.dropout(torch.ones(batch_size, self.hidden_size, device=inputs.device))
h, c = states
if self.training:
h *= _dropout_mask
concat = self.mlp(inputs=torch.cat([inputs, h], axis=1))
i, j, o = torch.chunk(input=concat, chunks=3, dim=1)
i = torch.sigmoid(i)
new_c = (1-i)*c + i*torch.tanh(j)
new_h = torch.tanh(new_c) * torch.sigmoid(o)
new_state = (new_h, new_c)
return new_state
@property
def initial_state(self):
return self._initial_state
def _orthonormal_initializer(self, weights, gain=1.0):
if len(weights.shape)>2:
pdb.set_trace()
device = weights.device
dtype = weights.dtype
# pdb.set_trace()
shape0, shape1 = get_shape(weights)
M1 = torch.randn(size=(shape0, shape0), dtype=dtype, device=device)
M2 = torch.randn(size=(shape1, shape1), dtype=dtype, device=device)
Q1, R1 = torch.qr(M1) # let weights.shape= (s0,s1) and sm = min(s0, s1), then Q1:(s0,sm), R1:(sm,s1)
Q2, R2 = torch.qr(M2)
Q1 = Q1 * torch.sign(torch.diag(R1))
Q2 = Q2 * torch.sign(torch.diag(R2))
n_min = min(shape0, shape1)
with torch.no_grad():
q = torch.matmul(Q1[:, :n_min], Q2[:n_min, :])
weights.view_as(q).copy_(q)
weights.mul_(gain)
return weights
def _block_orthonormal_initializer(self, output_sizes):
def _initializer(weights, gain=1.0):
shape = get_shape(weights)
assert len(shape) == 2
assert sum(output_sizes) == shape[1]
initializer = self._orthonormal_initializer
with torch.no_grad():
# pdb.set_trace()
q_list = [initializer(a, gain) for a in torch.split(weights,split_size_or_sections=output_sizes, dim=1)]
q = torch.cat(q_list, axis=1)
weights.view_as(q).copy_(q)
return weights
return _initializer
#---------character embedding-----------------------
class cnn(nn.Module):
def __init__(self, emb_size, kernel_sizes, num_filter):
super(cnn, self).__init__()
self.emb_size = emb_size
self.num_layers = len(kernel_sizes)
self.conv_layers = nn.ModuleList()
# self.weights = nn.ModuleList()
# self.biases = nn.ModuleList()
for i, filter_size in enumerate(kernel_sizes):
self.conv_layers.append(cnn_layer(in_channels=emb_size, out_channels=num_filter,
kernel_size=kernel_sizes[i], stride=1,
padding=0, bias=True))
def forward(self, input):
outputs = []
# pdb.set_trace()
for i in range(self.num_layers):
output = self.conv_layers[i](input) # (n_words, n_chars-filter_size+1, n_filters)
pooled = torch.max(output, dim=2)[0] # channel is dim1.
outputs.append(pooled)
return torch.cat(outputs, 1)
class cnn_layer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
super(cnn_layer, self).__init__()
self.conv = torch.nn.Conv1d(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias)
self.relu = torch.nn.ReLU()
def forward(self, input):
return self.relu(self.conv(input))
|
{"hexsha": "183b7cf6737730be32eb70c4b37b94fc5d059621", "size": 23168, "ext": "py", "lang": "Python", "max_stars_repo_path": "flair/models/biaffine_dp.py", "max_stars_repo_name": "db-bionlp/CLNER", "max_stars_repo_head_hexsha": "77910311acf0411252b9fea8c3e6efb7175eb21f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2021-05-29T05:37:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T02:35:25.000Z", "max_issues_repo_path": "flair/models/biaffine_dp.py", "max_issues_repo_name": "db-bionlp/CLNER", "max_issues_repo_head_hexsha": "77910311acf0411252b9fea8c3e6efb7175eb21f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-07-06T15:46:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T04:03:01.000Z", "max_forks_repo_path": "flair/models/biaffine_dp.py", "max_forks_repo_name": "Alibaba-NLP/KB-NER", "max_forks_repo_head_hexsha": "d8ddc6dbee17251622584b894dbb5765850b0add", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-08-04T05:23:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T07:11:33.000Z", "avg_line_length": 33.2395982783, "max_line_length": 236, "alphanum_fraction": 0.6909962017, "include": true, "reason": "import numpy", "num_tokens": 6717}
|
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
import numpy as np
from PIL import Image
import argparse
import os
def imload(filename: str, im_width: int, im_height: int, datatype: str):
"""Converts an image to a numpy array and resizes.
Args:
filename (str): Image filename.
im_width (int): Image width.
im_height (int): Image height.
datatype (str): Datatype to convert to (float/uint8). Float scales to <0;1> range.
Returns:
np.array: Image as a numpy array.
"""
img = Image.open(filename)
img = img.resize((im_width, im_height))
img_rgb = img.convert('RGB')
numpy_img_rgb = np.array(img_rgb)
if datatype == "float":
numpy_img_rgb = numpy_img_rgb.astype('f') / 255.0
elif datatype == "uint8":
numpy_img_rgb = numpy_img_rgb.astype(np.uint8)
else:
raise Exception("Unsupported datatype.")
return numpy_img_rgb
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Converts a tensorflow frozen model to tflite.')
parser.add_argument('--image', action='store', dest='image_file',
help='Input image.')
parser.add_argument('--output', action='store', dest='output_npy',
help='Output NPY file.')
parser.add_argument('--width', action='store', dest='image_width',
help='Image width.')
parser.add_argument('--height', action='store', dest='image_height',
help='Image height.')
parser.add_argument('--datatype', action='store', dest='datatype', default="float",
help='Type of data (float, uint8).')
args = parser.parse_args()
np_arr = imload(args.image_file, int(args.image_width), int(args.image_height), args.datatype)
output_filename = args.output_npy
filename_base = os.path.basename(args.image_file)
filename, ext = os.path.splitext(filename_base)
if output_filename is None:
output_filename = os.path.join(os.getcwd(), filename)
else:
dir_path = os.path.dirname(output_filename)
# path does not exist
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# dir and not file is specified
if os.path.isdir(output_filename):
output_filename = os.path.join(output_filename, filename)
np.save(output_filename, np_arr)
|
{"hexsha": "e4c654a10561d3d96cc4d39bf2f4902033512a7c", "size": 2476, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/pyarmnn/scripts/image_to_npy.py", "max_stars_repo_name": "PetervdPerk-NXP/pyarmnn-release", "max_stars_repo_head_hexsha": "2008c270f7c7c84a930842c845138628c8b95713", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-02-27T07:45:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-25T12:07:12.000Z", "max_issues_repo_path": "python/pyarmnn/scripts/image_to_npy.py", "max_issues_repo_name": "MitchellTesla/PyArmNN", "max_issues_repo_head_hexsha": "cbe37a0364b00f32ac2a8ced74eed5d576a0d52c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-07-28T15:01:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T18:24:02.000Z", "max_forks_repo_path": "python/pyarmnn/scripts/image_to_npy.py", "max_forks_repo_name": "MitchellTesla/PyArmNN", "max_forks_repo_head_hexsha": "cbe37a0364b00f32ac2a8ced74eed5d576a0d52c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-31T11:41:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-06T07:58:39.000Z", "avg_line_length": 36.4117647059, "max_line_length": 99, "alphanum_fraction": 0.6223747981, "include": true, "reason": "import numpy", "num_tokens": 544}
|
from pprint import pprint
import numpy as np
from skimage.data import camera
from skimage.exposure import rescale_intensity
from skimage.metrics import peak_signal_noise_ratio as psnr
from skimage.metrics import structural_similarity as ssim
from aydin.io.datasets import add_noise
from aydin.it.transforms.padding import PaddingTransform
from aydin.it.transforms.range import RangeTransform
from aydin.restoration.denoise.noise2selffgr import Noise2SelfFGR
def test_configure():
implementations = Noise2SelfFGR().implementations
pprint(implementations)
configurable_arguments = Noise2SelfFGR().configurable_arguments
pprint(configurable_arguments)
implementations_description = Noise2SelfFGR().implementations_description
pprint(implementations_description)
def test_run_n2s_fgr():
# Prepare the noisy classic_denoisers camera image
image = camera().astype(np.float32)
image = rescale_intensity(image, in_range='image', out_range=(0, 1))
noisy_image = add_noise(image)
# Call the Noise2Self restoration
transforms = [
{"class": RangeTransform, "kwargs": {}},
{"class": PaddingTransform, "kwargs": {}},
]
n2s = Noise2SelfFGR(variant="fgr-cb", it_transforms=transforms)
n2s.train(noisy_image)
denoised_image = n2s.denoise(noisy_image).clip(0, 1)
# Check if denoised image satisfies some checks
assert psnr(denoised_image, image) >= 20.0
assert ssim(denoised_image, image) >= 0.7
|
{"hexsha": "13d2842b2c14045cbc80ad866454c9831edab4fc", "size": 1481, "ext": "py", "lang": "Python", "max_stars_repo_path": "aydin/restoration/denoise/test/test_n2s_fgr.py", "max_stars_repo_name": "royerloic/aydin", "max_stars_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 78, "max_stars_repo_stars_event_min_datetime": "2021-11-08T16:11:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T17:51:04.000Z", "max_issues_repo_path": "aydin/restoration/denoise/test/test_n2s_fgr.py", "max_issues_repo_name": "royerloic/aydin", "max_issues_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2021-11-08T17:15:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T17:46:55.000Z", "max_forks_repo_path": "aydin/restoration/denoise/test/test_n2s_fgr.py", "max_forks_repo_name": "royerloic/aydin", "max_forks_repo_head_hexsha": "f9c61a24030891d008c318b250da5faec69fcd7d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-11-09T17:42:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T00:37:57.000Z", "avg_line_length": 32.9111111111, "max_line_length": 77, "alphanum_fraction": 0.7643484132, "include": true, "reason": "import numpy", "num_tokens": 361}
|
from __future__ import print_function
from __future__ import division
from sklearn.utils import check_random_state
from sklearn import preprocessing as prep
from utils.data import load_data, show_data_splits, shape_data
from utils.evaluation import evaluate
from utils.profiles import select_model, show_design, train, fit, compute_scores
import theano
import lasagne as lg
import numpy as np
import argparse
import os
'''
Hybrid music playlist continuation based on a song-to-playlist classifier.
We learn a classifier that takes song features as inputs and predicts the
playlists songs belong to. Once it is learned, such classifier can be
used to populate a matrix of song-playlist scores describing how well a song
and a playlist fit together. Thus, a playlist can be extended by selecting
the songs with highest score. This approach is "hybrid" in the usual sense in
the recommender systems literature, i.e., it combines content (given by the
song features) and cf information (given by playlists examples).
As it is, this approach only works on the so-called weak generalization setting.
That is, the model is trained on the same playlists that will be extended.
'''
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hybrid music playlist continuation based on a song-to-playlist classifier.')
parser.add_argument('--model', type=str, help='path to the model specification file', metavar='')
parser.add_argument('--dataset', type=str, help='path to the playlists dataset directory', metavar='')
parser.add_argument('--msd', type=str, help='path to the MSD directory', metavar='')
parser.add_argument('--train', action='store_true', help='train the song-to-playist classifier with monitoring')
parser.add_argument('--fit', action='store_true', help='fit the song-to-playlist classifier')
parser.add_argument('--test', action='store_true', help='evaluate the playlist continuations')
parser.add_argument('--ci', action='store_true', help='compute confidence intervals if True')
parser.add_argument('--song_occ', type=int, help='test on songs observed song_occ times during training', nargs='+', metavar='')
parser.add_argument('--metrics_file', type=str, help='file name to save metrics', metavar='')
parser.add_argument('--seed', type=int, help='set random behavior', metavar='')
args = parser.parse_args()
# set random behavior
rng = check_random_state(args.seed)
lg.random.set_rng(rng)
# set model configuration
model = select_model(args.model)
# prepare output directory
data_name = os.path.basename(os.path.normpath(args.dataset))
out_dir = os.path.join('params', 'profiles', model.name + '_' + data_name + '_weak')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# load data: playlists, splits, features and artist info
data = load_data(args.dataset, args.msd, model)
playlists_coo, split_weak, _, features, song2artist = data
# playlists_coo are the playlists stored in coordinate format
playlists_idx, songs_idx, _, idx2song = playlists_coo
# each playlist is split into a "query" of ~80% of the songs (train_idx +
# valid_idx) and a "continuation" of ~20% of the songs (test_idx)
train_idx, valid_idx, test_idx = split_weak
# define splits for this experiment
# train model on the training queries
# validate model on the validation queries
# fit the model on the full queries
# extend all the playlists, using all queries and continuations
train_idx = train_idx
valid_idx = valid_idx
fit_idx = np.hstack((train_idx, valid_idx))
query_idx = fit_idx
cont_idx = test_idx
# provide data information
show_data_splits(playlists_idx, songs_idx, idx2song, song2artist,
train_idx, valid_idx, fit_idx, query_idx, cont_idx)
# provide model information
print('\nNetwork:')
show_design(model)
if args.train:
#
# train the hybrid model while validating on withheld playlists
#
# prepare input song features and playlist targets at training
X_train, Y_train = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='train', subset=train_idx
)
# prepare input song features and playlist targets at validation
X_valid, Y_valid = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='test', subset=valid_idx
)
# preprocess input features if required
# use the training song features to standardize the validation data
if model.standardize:
scaler = prep.RobustScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
if model.normalize:
X_train = prep.normalize(X_train, norm=model.normalize)
X_valid = prep.normalize(X_valid, norm=model.normalize)
# train the classifier
train(
model=model,
train_input=X_train.astype(theano.config.floatX),
train_target=Y_train.astype(np.int8),
valid_input=X_valid.astype(theano.config.floatX),
valid_target=Y_valid.astype(np.int8),
out_dir=out_dir,
random_state=rng
)
if args.fit:
#
# fit the hybrid model
#
# prepare input song features and playlist targets at training
X_fit, Y_fit = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='train', subset=fit_idx
)
# preprocess input features if required
if model.standardize:
X_fit = prep.robust_scale(X_fit)
if model.normalize:
X_fit = prep.normalize(X_fit, norm=model.normalize)
# fit the classifier
fit(
model=model,
fit_input=X_fit.astype(theano.config.floatX),
fit_target=Y_fit.astype(np.int8),
out_dir=out_dir,
random_state=rng
)
if args.test:
#
# extend the playlists in the query split and evaluate the
# continuations by comparing them to actual withheld continuations
#
# prepare input song features and playlist targets at test
X_cont, Y_cont = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='test', subset=cont_idx
)
# preprocess input features if required
# use the training song features to standardize the test data
if model.standardize:
X_fit, _ = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='train', subset=fit_idx
)
scaler = prep.RobustScaler()
scaler.fit(X_fit)
X_cont = scaler.transform(X_cont)
if model.normalize:
X_cont = prep.normalize(X_cont, norm=model.normalize)
# songs in the "query" playlists need to be masked to make sure that
# they are not recommended as continuations
_, Y_query = shape_data(
playlists_idx, songs_idx, idx2song, features,
mode='test', subset=query_idx
)
# get number of song occurrences when fitting for cold-start analysis
# Y_fit = Y_query
train_occ = np.asarray(Y_query.sum(axis=1)).flatten()
# compute the song-playlist scores
cont_output = compute_scores(
model=model,
params_dir=out_dir,
cont_input=X_cont.astype(theano.config.floatX),
cont_target=Y_cont.astype(np.int8)
)
# evaluate the continuations
evaluate(
scores=[cont_output.T],
targets=[Y_cont.T.tocsr()],
queries=[Y_query.T.tocsr()],
train_occ=[train_occ],
k_list=[10, 30, 100],
ci=args.ci,
song_occ=args.song_occ,
metrics_file=args.metrics_file
)
|
{"hexsha": "844aff8b757e567eab04101d17c08cb3e245797f", "size": 8032, "ext": "py", "lang": "Python", "max_stars_repo_path": "profiles_weak.py", "max_stars_repo_name": "andreuvall/HybridPlaylistContinuation", "max_stars_repo_head_hexsha": "6e31e50050c61a2c3ae55183e18b665fd54c7250", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-06-04T11:42:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-19T12:16:01.000Z", "max_issues_repo_path": "profiles_weak.py", "max_issues_repo_name": "andreuvall/HybridPlaylistContinuation", "max_issues_repo_head_hexsha": "6e31e50050c61a2c3ae55183e18b665fd54c7250", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "profiles_weak.py", "max_forks_repo_name": "andreuvall/HybridPlaylistContinuation", "max_forks_repo_head_hexsha": "6e31e50050c61a2c3ae55183e18b665fd54c7250", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-08-27T17:02:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-09T01:21:09.000Z", "avg_line_length": 37.8867924528, "max_line_length": 132, "alphanum_fraction": 0.6624750996, "include": true, "reason": "import numpy,import theano", "num_tokens": 1715}
|
import numpy as np
class Kinematics:
def __init__(self):
self._l = 0.14
self._w = 0.075
self._hip = 0.04
self._leg = 0.1
self._foot = 0.1
self.y_dist = 0.11
self.x_dist = self._l
self.height = 0.15
# frame vectors
self._hip_front_right_v = np.array([self._l / 2, -self._w / 2, 0])
self._hip_front_left_v = np.array([self._l / 2, self._w / 2, 0])
self._hip_rear_right_v = np.array([-self._l / 2, -self._w / 2, 0])
self._hip_rear_left_v = np.array([-self._l / 2, self._w / 2, 0])
self._foot_front_right_v = np.array([self.x_dist / 2, -self.y_dist / 2, -self.height])
self._foot_front_left_v = np.array([self.x_dist / 2, self.y_dist / 2, -self.height])
self._foot_rear_right_v = np.array([-self.x_dist / 2, -self.y_dist / 2, -self.height])
self._foot_rear_left_v = np.array([-self.x_dist / 2, self.y_dist / 2, -self.height])
self._frames = np.asmatrix([[self.x_dist / 2, -self.y_dist / 2, -self.height],
[self.x_dist / 2, self.y_dist / 2, -self.height],
[-self.x_dist / 2, -self.y_dist / 2, -self.height],
[-self.x_dist / 2, self.y_dist / 2, -self.height]])
@staticmethod
def get_Rx(x):
return np.asmatrix([[1, 0, 0, 0],
[0, np.cos(x), -np.sin(x), 0],
[0, np.sin(x), np.cos(x), 0],
[0, 0, 0, 1]])
@staticmethod
def get_Ry(y):
return np.asmatrix([[np.cos(y), 0, np.sin(y), 0],
[0, 1, 0, 0],
[-np.sin(y), 0, np.cos(y), 0],
[0, 0, 0, 1]])
@staticmethod
def get_Rz(z):
return np.asmatrix([[np.cos(z), -np.sin(z), 0, 0],
[np.sin(z), np.cos(z), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def get_Rxyz(self, x, y, z):
if x != 0 or y != 0 or z != 0:
R = self.get_Rx(x) * self.get_Ry(y) * self.get_Rz(z)
return R
else:
return np.identity(4)
def get_RT(self, orientation, position):
roll = orientation[0]
pitch = orientation[1]
yaw = orientation[2]
x0 = position[0]
y0 = position[1]
z0 = position[2]
translation = np.asmatrix([[1, 0, 0, x0],
[0, 1, 0, y0],
[0, 0, 1, z0],
[0, 0, 0, 1]])
rotation = self.get_Rxyz(roll, pitch, yaw)
return rotation * translation
def transform(self, coord, rotation, translation):
vector = np.array([[coord[0]],
[coord[1]],
[coord[2]],
[1]])
transform_vector = self.get_RT(rotation, translation) * vector
return np.array([transform_vector[0, 0], transform_vector[1, 0], transform_vector[2, 0]])
@staticmethod
def check_domain(domain):
if domain > 1 or domain < -1:
if domain > 1:
domain = 0.99
else:
domain = -0.99
return domain
def _solve_IK(self, coord, hip, leg, foot, right_side):
domain = (coord[1] ** 2 + (-coord[2]) ** 2 - hip ** 2 + (-coord[0]) ** 2 - leg ** 2 - foot ** 2) / (2 * foot * leg)
domain = self.check_domain(domain)
gamma = np.arctan2(-np.sqrt(1 - domain ** 2), domain)
sqrt_value = coord[1] ** 2 + (-coord[2]) ** 2 - hip ** 2
if sqrt_value < 0.0:
sqrt_value = 0.0
alpha = np.arctan2(-coord[0], np.sqrt(sqrt_value)) - np.arctan2(foot * np.sin(gamma), leg + foot * np.cos(gamma))
hip_val = hip
if right_side:
hip_val = -hip
theta = -np.arctan2(coord[2], coord[1]) - np.arctan2(np.sqrt(sqrt_value), hip_val)
angles = np.array([theta, -alpha, -gamma])
return angles
def solve(self, orientation, position, frames=None):
if frames is not None:
self._frames = frames
foot_front_right = np.asarray([self._frames[0, 0], self._frames[0, 1], self._frames[0, 2]])
foot_front_left = np.asarray([self._frames[1, 0], self._frames[1, 1], self._frames[1, 2]])
foot_rear_right = np.asarray([self._frames[2, 0], self._frames[2, 1], self._frames[2, 2]])
foot_rear_left = np.asarray([self._frames[3, 0], self._frames[3, 1], self._frames[3, 2]])
# rotation vertices
hip_front_right_vertex = self.transform(self._hip_front_right_v, orientation, position)
hip_front_left_vertex = self.transform(self._hip_front_left_v, orientation, position)
hip_rear_right_vertex = self.transform(self._hip_rear_right_v, orientation, position)
hip_rear_left_vertex = self.transform(self._hip_rear_left_v, orientation, position)
# leg vectors
front_right_coord = foot_front_right - hip_front_right_vertex
front_left_coord = foot_front_left - hip_front_left_vertex
rear_right_coord = foot_rear_right - hip_rear_right_vertex
rear_left_coord = foot_rear_left - hip_rear_left_vertex
# leg vectors transformation
inv_orientation = -orientation
inv_position = -position
t_front_right_coord = self.transform(front_right_coord, inv_orientation, inv_position)
t_front_left_coord = self.transform(front_left_coord, inv_orientation, inv_position)
t_rear_right_coord = self.transform(rear_right_coord, inv_orientation, inv_position)
t_rear_left_coord = self.transform(rear_left_coord, inv_orientation, inv_position)
# solve IK
front_right_angles = self._solve_IK(t_front_right_coord, self._hip, self._leg, self._foot, True)
front_left_angles = self._solve_IK(t_front_left_coord, self._hip, self._leg, self._foot, False)
rear_right_angles = self._solve_IK(t_rear_right_coord, self._hip, self._leg, self._foot, True)
rear_left_angles = self._solve_IK(t_rear_left_coord, self._hip, self._leg, self._foot, False)
t_front_right = hip_front_right_vertex + t_front_right_coord
t_front_left = hip_front_left_vertex + t_front_left_coord
t_rear_right = hip_rear_right_vertex + t_rear_right_coord
t_rear_left = hip_rear_left_vertex + t_rear_left_coord
t_frames = np.asmatrix([[t_front_right[0], t_front_right[1], t_front_right[2]],
[t_front_left[0], t_front_left[1], t_front_left[2]],
[t_rear_right[0], t_rear_right[1], t_rear_right[2]],
[t_rear_left[0], t_rear_left[1], t_rear_left[2]]])
return front_right_angles, front_left_angles, rear_right_angles, rear_left_angles, t_frames
|
{"hexsha": "2022875ea045399b33707d0016f65a84a383b92f", "size": 6936, "ext": "py", "lang": "Python", "max_stars_repo_path": "rex_gym/model/kinematics.py", "max_stars_repo_name": "elvinaqa/rex-gym", "max_stars_repo_head_hexsha": "a57b5df5f356e228e47d4e0b778617a7f74834c4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rex_gym/model/kinematics.py", "max_issues_repo_name": "elvinaqa/rex-gym", "max_issues_repo_head_hexsha": "a57b5df5f356e228e47d4e0b778617a7f74834c4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rex_gym/model/kinematics.py", "max_forks_repo_name": "elvinaqa/rex-gym", "max_forks_repo_head_hexsha": "a57b5df5f356e228e47d4e0b778617a7f74834c4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-26T12:49:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-26T12:49:19.000Z", "avg_line_length": 48.5034965035, "max_line_length": 123, "alphanum_fraction": 0.5671856978, "include": true, "reason": "import numpy", "num_tokens": 1945}
|
\chapter{Constructing an formula}
\label{chapter:constructingaformula}
The class \formulaClass represents SMT formulas, which are
defined according to the following abstract grammar
\[
\begin{array}{rccccccccccccc}
p &\quad ::=\quad & a & | & b & | & x & | & (p + p) & | & (p \cdot p) & | & (p^e) \\
v &\quad ::=\quad & u & | & x \\
s &\quad ::=\quad & f(v,\ldots,v) & | & u & | & x \\
e &\quad ::=\quad & s = s \\
c &\quad ::=\quad & p = 0 & | & p < 0 & | & p \leq 0 & | & p > 0 & | & p \geq 0 & | & p \neq 0 \\
\varphi &\quad ::=\quad & c & | & (\neg \varphi) & | &
(\varphi\land\varphi) & | &
(\varphi\lor\varphi) & | &
(\varphi\rightarrow\varphi) & | \\ &&
(\varphi\leftrightarrow\varphi) & | &
(\varphi\oplus\varphi)
\end{array}
\]
where $a$ is a rational number, $e$ is a natural number greater one, $b$ is a \emph{Boolean variable} and the \emph{arithmetic variable} $x$ is an inherently existential quantified and either real- or integer-valued. We call $p$ a \emph{polynomial} and use a \carl multivariate polynomial with \cln rationals as coefficients to represent it. The \emph{uninterpreted function} $f$ is of a certain \emph{order} $o(f)$ and each of its $o(f)$ arguments are either an arithmetic variable or an \emph{uninterpreted variable} $u$, which is also inherently existential quantified, but has no domain specified. Than an \emph{uninterpreted equation} $e$ has either an uninterpreted function, an uninterpreted variable or an arithmetic variable as left-hand respectively right-hand side. A \emph{constraint} $c$ compares a polynomial to zero, using a \emph{relation symbol}. Furthermore, we keep constraints in a normalized representation to be able to differ them better.
\section{Normalized constraints}
A normalized constraint has the form
\[a_1\overbrace{x_{1,1}^{e_{1,1}}\cdot\ldots\cdot x_{1,k_1}^{e_{1,k_1}}}^{m_1}+\ldots+a_n\overbrace{x_{n,1}^{e_{n,1}}\cdot\ldots\cdot x_{n,k_n}^{e_{n,k_n}}}^{m_n}\ + \ d\ \sim \ 0\]
with $n\geq0$, the \emph{$i$th coefficient} $a_i$ being an integral number ($\neq 0$), $d$ being a integral number, $x_{i,j_i}$ being a real- or integer-valued variable and $e_{i,j_i}$ being a natural number greater zero (for all $1\leq i\leq n$ and $1\leq j_i\leq k_i$). Furthermore, it holds that
$x_{i,j_i}\neq x_{i,l_i}$ if $j_i\neq l_i$ (for all $1\leq i\leq n$ and $1\leq j_i, l_i\leq k_i$) and $m_{i_1}\neq m_{i_2}$ if $i_1\neq i_2$ (for all $1\leq i_1,i_2\leq n$). If $n$ is $0$ then $d$ is $0$ and $\sim$ is either $=$ or $<$. In the former case we have the normalized representation of any variable-free consistent constraint, which semantically equals \true, and in the latter case we have the normalized representation of any variable-free inconsistent constraint, which semantically equals \false. Note that the monomials and the variables in them are ordered according the \polynomialOrder of \carl.
Moreover, the first coefficient of a normalized constraint (with respect to this order) is always positive and the greatest common divisor of $a_1,\ldots,\ a_n,\ d$ is $1$. If all variable are integer valued the constraint is further simplified to
\[\frac{a_1}{g}\cdot m_1\ +\ \ldots\ +\ \frac{a_n}{g}\cdot m_n\ + \ d'\ \sim' \ 0,\]
where $g$ is the greatest common divisor of $a_1,\ldots,\ a_n$,
\[\sim'=\left\{
\begin{array}{ll}
\leq, &\text{ if }\sim\text{ is }< \\
\geq, &\text{ if }\sim\text{ is }> \\
\sim, &\text{ otherwise }
\end{array}
\right.\]
and
\[
d' = \left\{
\begin{array}{ll}
\lceil\frac{d}{g}\rceil &\text{ if }\sim'\text{ is }\leq \\[1.5ex]
\lfloor\frac{d}{g}\rfloor &\text{ if }\sim'\text{ is }\geq \\[1.5ex]
\frac{d}{g} &\text{ otherwise }
\end{array}
\right.\]
If additionally $\frac{d}{g}$ is not integral and $\sim'$ is $=$, the constraint is simplified $0<0$, or if $\sim'$ is $\neq$,
the constraint is simplified $0=0$.
We do some further simplifactions, such as the elimination of multiple roots of the left-hand sides in equations and inequalities with the relation symbol $\neq$, e.g., $x^3=0$ is simplified to $x=0$. We also simplify constraints whose left-hand sides are obviously positive (semi)/negative (semi) definite, e.g., $x^2\leq 0$ is simplified to $x^2=0$, which again can be simplified to $x=0$ according to the first simplification rule.
\section{Boolean combinations of constraints and Boolean variables}
A formula is stored as a directed acyclic graph, where the intermediate nodes represent the Boolean operations on the sub-formulas represented by the successors of this node. The leaves (nodes without successor) contain either a Boolean variable, a constraint or an uninterpreted equality. Equal formulas, that is formulas being leaves and containing the same element or formulas representing the same operation on the same sub-formulas, are stored only once.
The construction of formulas, which are represented by the \formulaClass, is mainly based on the presented abstract grammar. A formula being a leaf wraps the corresponding objects representing a Boolean variable, a constraint or an uninterpreted equality. A Boolean combination of Boolean variables, constraints and uninterpreted equalities consists of a Boolean operator and the sub-formulas it interconnects. For this purpose we either firstly create a set of formulas containing all sub-formulas and then construct the Formula or (if the formula shall not have more than three sub-formulas) construct the formula directly passing the operator and sub-formulas. Formulas, constraints and uninterpreted equalities are non-mutable, once they are constructed. %TODO: explain mutable member of formulas for information storage
We give a small example constructing the formula \[(\neg b\ \land\ x^2-y<0\ \land\ 4x+y-8y^7=0 )\ \rightarrow\ (\neg(x^2-y<0)\ \lor\ b ),\] with the Boolean variable $b$ and the real-valued variables $x$ and $y$, for demonstration. Furthermore, we construct the UF formula
\[v = f(u,u)\ \oplus\ w \neq u\]
with $u$, $v$ and $w$ being uninterpreted variables of not specified domains $S$ and $T$, respectively,
and $f$ is an uninterpreted function with not specified domain $T^{S\times S}$.
Firstly, we show how to create real valued (integer valued analogously with \texttt{VT\_INT}), Boolean and uninterpreted variables:
\scriptsize
\begin{verbatim}
carl::Variable x = smtrat::newVariable( "x", carl::VariableType::VT_REAL );
carl::Variable y = smtrat::newVariable( "y", carl::VariableType::VT_REAL );
carl::Variable b = smtrat::newVariable( "b", carl::VariableType::VT_BOOL );
carl::Variable u = smtrat::newVariable( "u", carl::VariableType::VT_UNINTERPRETED );
carl::Variable v = smtrat::newVariable( "v", carl::VariableType::VT_UNINTERPRETED );
carl::Variable w = smtrat::newVariable( "w", carl::VariableType::VT_UNINTERPRETED );
\end{verbatim}
\normalsize
Uninterpreted variables, functions and function instances combined in equations or inequalities comparing them are constructed the following way.
\scriptsize
\begin{verbatim}
carl::Sort sortS = smtrat::newSort( "S" );
carl::Sort sortT = smtrat::newSort( "T" );
carl::UVariable uu( u, sortS );
carl::UVariable uv( v, sortT );
carl::UVariable uw( w, sortS );
carl::UninterpretedFunction f = smtrat::newUF( "f", sortS, sortS, sortT );
carl::UFInstance f1 = smtrat::newUFInstance( f, uu, uw );
carl::UEquality ueqA( uv, f1, false );
carl::UEquality ueqB( uw, uu, true );
\end{verbatim}
\normalsize
Next we see an example how to create polynomials, which form the left-hand sides of the constraints:
\scriptsize
\begin{verbatim}
smtrat::Poly px( x );
smtrat::Poly py( y );
smtrat::Poly lhsA = px.pow(2) - py;
smtrat::Poly lhsB = smtrat::Rational(4) * px + py - smtrat::Rational(8) * py.pow(7);
\end{verbatim}
\normalsize
Constraints can then be constructed as follows:
\scriptsize
\begin{verbatim}
smtrat::ConstraintT constraintA( lhsA, carl::Relation::LESS );
smtrat::ConstraintT constraintB( lhsB, carl::Relation::EQ );
\end{verbatim}
\normalsize
Now, we can construct the atoms of the Boolean formula
\scriptsize
\begin{verbatim}
smtrat::FormulaT atomA( constraintA );
smtrat::FormulaT atomB( constraintB );
smtrat::FormulaT atomC( b );
smtrat::FormulaT atomD( ueqA );
smtrat::FormulaT atomE( ueqB );
\end{verbatim}
\normalsize
and the formulas itself (either with a set of arguments or directly):
\scriptsize
\begin{verbatim}
smtrat::FormulasT subformulasA;
subformulasA.insert( smtrat::FormulaT( carl::FormulaType::NOT, atomC ) );
subformulasA.insert( atomA );
subformulasA.insert( atomB );
smtrat::FormulaT phiA( carl::FormulaType::AND, std::move(subformulasA) );
smtrat::FormulaT phiB( carl::FormulaType::NOT, atomA )
smtrat::FormulaT phiC( carl::FormulaType::OR, phiB, atomC );
smtrat::FormulaT phiD( carl::FormulaType::IMPLIES, phiA, phiC );
smtrat::FormulaT phiE( carl::FormulaType::XOR, atomD, atomE );
\end{verbatim}
\normalsize
Note, that $\land$ and $\lor$ are $n$-ary constructors, $\neg$ is a unary constructor and all the other Boolean operators are binary.
|
{"hexsha": "a386713f08ee5608914bf385bda64818ef350a2e", "size": 8958, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "manual/constructingformulas.tex", "max_stars_repo_name": "minemebarsha/smtrat", "max_stars_repo_head_hexsha": "eaada50cdf9bbfe4dd4f6a54776387484c37b0f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "manual/constructingformulas.tex", "max_issues_repo_name": "minemebarsha/smtrat", "max_issues_repo_head_hexsha": "eaada50cdf9bbfe4dd4f6a54776387484c37b0f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manual/constructingformulas.tex", "max_forks_repo_name": "minemebarsha/smtrat", "max_forks_repo_head_hexsha": "eaada50cdf9bbfe4dd4f6a54776387484c37b0f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 68.9076923077, "max_line_length": 961, "alphanum_fraction": 0.7234873856, "num_tokens": 2732}
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 4 21:34:01 2018
@author: 大茄茄
"""
#对原始数据进行四阶巴特沃斯滤波
from scipy.signal import butter, lfilter
import pandas as pd
import os
from sklearn.externals.joblib import Parallel, delayed
SampFreq = 256
ChannelNum = 22
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='bandpass')
return b, a
def filterX_onset(path, savepath):
for per_file in os.listdir(path):
filename, suffix = os.path.splitext(per_file)
filepath = os.path.join(path, per_file)
data = pd.read_csv(filepath)
data = data.iloc[:, 1:]
filterData = []
for per_channel in range(ChannelNum):
X = data.iloc[per_channel, :]
filterX = butter_bandpass_filter(X, 0.01, 32, SampFreq, 4)
filterData.append(filterX)
filterData = pd.DataFrame(filterData)
filterData.to_csv(savepath + '\\{}.csv'.format(filename))
def filterX_interItcal(file, path, savepath):
filename, suffix = os.path.splitext(file)
if suffix == '.csv':
data = pd.read_csv(os.path.join(path, file))
data = data.iloc[:, 1:]
filterData = []
for per_channel in range(ChannelNum):
X = data.iloc[per_channel, :]
filterX = butter_bandpass_filter(X, 0.01, 32, SampFreq, 4)
filterData.append(filterX)
filterData = pd.DataFrame(filterData)
filterData.to_csv(savepath + '\\{}.csv'.format(filename))
def multiprocess(path, savepath):
Parallel(n_jobs=1)(delayed(filterX_interItcal)(i, path, savepath) for i in os.listdir(path))
if __name__ == '__main__':
pass
|
{"hexsha": "a520116991dd80361b958218c340e7a654118bc7", "size": 2069, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_process/band_filter.py", "max_stars_repo_name": "yolle103/eeg-lstm", "max_stars_repo_head_hexsha": "24a236a3ffa4af02b81a5a772f9a1f3130817ad4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-05T16:26:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-05T16:26:09.000Z", "max_issues_repo_path": "data_process/band_filter.py", "max_issues_repo_name": "yolle103/eeg-lstm", "max_issues_repo_head_hexsha": "24a236a3ffa4af02b81a5a772f9a1f3130817ad4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_process/band_filter.py", "max_forks_repo_name": "yolle103/eeg-lstm", "max_forks_repo_head_hexsha": "24a236a3ffa4af02b81a5a772f9a1f3130817ad4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9855072464, "max_line_length": 97, "alphanum_fraction": 0.5872402127, "include": true, "reason": "from scipy", "num_tokens": 555}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.