text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import os
import random
import numpy as np
# import h5py
import torch
from PIL import Image, ImageOps
from torch.utils import data
def correspondences_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size, except for correspondence points which will be a list of tensors"""
ims1 = torch.stack([b[0] for b in batch], 0)
ims2 = torch.stack([b[1] for b in batch], 0)
pts1 = [torch.from_numpy(b[2]) for b in batch]
pts2 = [torch.from_numpy(b[3]) for b in batch]
weights = [torch.from_numpy(b[4]) for b in batch]
return [ims1, ims2, pts1, pts2, weights]
def refine_correspondence_sample(ref_probmaps, other_probmaps, ref_inds,
other_inds, weights, remove_same_class=True, remove_classes=None):
probmap_ref_flat = ref_probmaps.view(
[ref_probmaps.size(0), ref_probmaps.size(1), -1])
probmap_other_flat = other_probmaps.view(
[other_probmaps.size(0), other_probmaps.size(1), -1])
ref_inds_out = list()
other_inds_out = list()
weights_out = list()
batch_inds_to_keep = torch.zeros(
[len(ref_inds)], dtype=torch.uint8, device=ref_inds[0].device)
for b in range(ref_probmaps.size(0)):
# get variables for this batch
inds_ref_this = ref_inds[b].type(torch.int64)
inds_other_this = other_inds[b].type(torch.int64)
weights_this = weights[b]
# convert to linear indices
lin_inds_ref = ref_probmaps.size(
2) * inds_ref_this[1, :] + inds_ref_this[0, :]
lin_inds_other = other_probmaps.size(
2) * inds_other_this[1, :] + inds_other_this[0, :]
# get predictions
ref_pred_flat = probmap_ref_flat[b, :, lin_inds_ref].max(0)[
1].squeeze_(0)
other_pred_flat = probmap_other_flat[b, :, lin_inds_other].max(0)[
1].squeeze_(0)
inds_to_keep = ref_pred_flat >= 0
# remove samples with non-stationary classes
if remove_classes is not None:
for class_to_remove in remove_classes:
inds_to_keep = inds_to_keep - \
(ref_pred_flat == class_to_remove)
# remove samples where correspondences have the same class
if remove_same_class:
inds_to_keep = torch.max(
inds_to_keep - (
ref_pred_flat == other_pred_flat), torch.tensor(
0, dtype=inds_to_keep.dtype, device=inds_to_keep.device))
# append correspondence that should
ref_inds_out.append(inds_ref_this[:, inds_to_keep])
other_inds_out.append(inds_other_this[:, inds_to_keep])
weights_out.append(weights_this[inds_to_keep, :])
if inds_to_keep.sum() > 0:
batch_inds_to_keep[b] = 1
return ref_inds_out, other_inds_out, weights_out, batch_inds_to_keep
def make_dataset(corr_path):
items = []
f_name_list = [fn for fn in os.listdir(corr_path) if fn.endswith('mat')]
for f_name in f_name_list:
item = (os.path.join(corr_path, f_name))
items.append(item)
return items
class Correspondences(data.Dataset):
def __init__(self, corr_path, im_path, input_size=(713, 713), mean_std=(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), input_transform=None, joint_transform=None):
self.data = make_dataset(corr_path)
if len(self.data) == 0:
raise RuntimeError('Found 0 images, please check the data set')
self.input_size = input_size
self.root_imgs = im_path
self.transform = input_transform
self.joint_transform = joint_transform
def __getitem__(self, index):
# Load data from one sample
mat_content = {}
# f = h5py.File(self.data[index], 'r')
f=[]
for k, v in f.items():
asd = 0
mat_content[k] = np.array(v)
im1name = ''.join(chr(a)
for a in mat_content['im_i_path']) # convert to string
im2name = ''.join(chr(a)
for a in mat_content['im_j_path']) # convert to string
mat_content['pt_i'] = np.swapaxes(mat_content['pt_i'], 0, 1)
mat_content['pt_j'] = np.swapaxes(mat_content['pt_j'], 0, 1)
mat_content['dist_from_center'] = np.swapaxes(
mat_content['dist_from_center'], 0, 1)
img1path = os.path.join(self.root_imgs, im1name)
img2path = os.path.join(self.root_imgs, im2name)
img1 = Image.open(img1path).convert('RGB')
img2 = Image.open(img2path).convert('RGB')
pts1 = mat_content['pt_i']
pts2 = mat_content['pt_j']
dists = mat_content['dist_from_center']
weights = np.exp((-1 * dists))
if self.joint_transform is not None:
img1, img2, pts1, pts2, weights = self.joint_transform(
img1, img2, pts1, pts2, weights)
img1 = self.transform(img1)
img2 = self.transform(img2)
return img1, img2, (pts1 + .5).astype(np.int32), (pts2 + .5).astype(
np.int32), weights.astype(np.float32)
def __len__(self):
return len(self.data)
|
{"hexsha": "953e8c173a1cf4d4385160072874c979730fc143", "size": 5161, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/correspondences.py", "max_stars_repo_name": "Triocrossing/cross-season-segmentation", "max_stars_repo_head_hexsha": "9cb4ff95065533845c21f418bf5b701248c92b41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets/correspondences.py", "max_issues_repo_name": "Triocrossing/cross-season-segmentation", "max_issues_repo_head_hexsha": "9cb4ff95065533845c21f418bf5b701248c92b41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/correspondences.py", "max_forks_repo_name": "Triocrossing/cross-season-segmentation", "max_forks_repo_head_hexsha": "9cb4ff95065533845c21f418bf5b701248c92b41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8642857143, "max_line_length": 143, "alphanum_fraction": 0.6223600078, "include": true, "reason": "import numpy", "num_tokens": 1324}
|
[STATEMENT]
lemma less_dag_set_of: "x < y \<Longrightarrow> set_of x \<subseteq> set_of y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x < y \<Longrightarrow> set_of x \<subseteq> set_of y
[PROOF STEP]
by (unfold less_dag_def, induct y, auto)
|
{"llama_tokens": 98, "file": "BDD_BinDag", "length": 1}
|
// Copyright (c) 2016-2017 Hypha
#include <hypha/core/database/database.h>
#include <hypha/core/database/databasegenerator.h>
#include <hypha/core/database/userdatabase.h>
#include <hypha/core/exceptions/configfilenotfound.h>
#include <hypha/core/settings/configgenerator.h>
#include <hypha/core/settings/databasesettings.h>
#include <hypha/core/settings/hyphasettings.h>
#include <hypha/core/settings/pluginsettings.h>
#include <gmock/gmock.h>
#include <boost/filesystem.hpp>
class database_test : public testing::Test {
public:
};
TEST_F(database_test, CheckHyphaDatabase) {
try {
std::string configFile = "database_test_config.xml";
hypha::settings::HyphaSettings hs(configFile);
hs.load(true);
hypha::settings::DatabaseSettings dbs(&hs);
hypha::database::Database db(&dbs);
db.connect();
hypha::database::DatabaseGenerator dbg;
try {
dbg.generateExampleDatabase(&dbs);
} catch (std::exception &e) {
std::cout << e.what() << std::endl;
}
hypha::settings::PluginSettings ps(&db);
ASSERT_TRUE(ps.getAllPluginIds().size() > 0);
ASSERT_TRUE(boost::filesystem::exists(configFile));
} catch (...) {
}
}
TEST_F(database_test, CheckHyphaUserDatabase) {
try {
std::string configFile = "database_test_config.xml";
hypha::settings::HyphaSettings hs(configFile);
hs.load(true);
hypha::settings::DatabaseSettings dbs(&hs);
hypha::database::Database db(&dbs);
db.connect();
hypha::settings::UserDatabaseSettings udbs(&hs);
hypha::database::UserDatabase *udb =
hypha::database::UserDatabase::factoreInstance(&udbs);
udb->connect();
hypha::database::DatabaseGenerator dbg;
try {
dbg.generateExampleUserDatabase(&udbs);
} catch (std::exception &e) {
std::cout << e.what() << std::endl;
}
hypha::settings::PluginSettings ps(&db);
ASSERT_TRUE(ps.getAllPluginIds().size() > 0);
ASSERT_TRUE(boost::filesystem::exists(configFile));
delete udb;
} catch (...) {
}
}
|
{"hexsha": "920cddfaea4b1b943686b4a1d456b12b0ec14d9a", "size": 2027, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "source/tests/core-test/database_test.cpp", "max_stars_repo_name": "hyphaproject/hypha", "max_stars_repo_head_hexsha": "2ab878529e859928dce0515c742368ad30a48dab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/tests/core-test/database_test.cpp", "max_issues_repo_name": "hyphaproject/hypha", "max_issues_repo_head_hexsha": "2ab878529e859928dce0515c742368ad30a48dab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/tests/core-test/database_test.cpp", "max_forks_repo_name": "hyphaproject/hypha", "max_forks_repo_head_hexsha": "2ab878529e859928dce0515c742368ad30a48dab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3246753247, "max_line_length": 62, "alphanum_fraction": 0.683275777, "num_tokens": 513}
|
""" Utilities for dealing with NRRD files. So far, this is mostly
a nifti library, so these will probably just be conversion
utilities. The existence of this library should imply the creation
of an array_util, as many of the functions in nifti_util are not
specific to niftis.
"""
import nibabel as nib
import numpy as np
import nrrd
def nrrd_2_numpy(input_nrrd, return_header=False):
""" Loads nrrd data and optionally return a nrrd header
in pynrrd's format. If array is 4D, swaps axes so
that time dimension is last to match nifti standard.
"""
nrrd_data, nrrd_options = nrrd.read(input_nrrd)
if nrrd_data.ndim == 4:
nrrd_data = np.rollaxis(nrrd_data, 0, 4)
if return_header:
return nrrd_data, nrrd_options
else:
return nrrd_data
def save_numpy_2_nrrd(input_numpy, reference_nrrd=[], output_filepath=''):
return
|
{"hexsha": "63c66732064eaa3a382cfefa12f6dfdc60fc268c", "size": 846, "ext": "py", "lang": "Python", "max_stars_repo_path": "qtim_tools/qtim_utilities/nrrd_util.py", "max_stars_repo_name": "QTIM-Lab/qtim_tools", "max_stars_repo_head_hexsha": "92bd15ec7a81c5eda70d11a015f74538f3c41e22", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-03-29T18:17:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-19T05:28:56.000Z", "max_issues_repo_path": "qtim_tools/qtim_utilities/nrrd_util.py", "max_issues_repo_name": "QTIM-Lab/qtim_tools", "max_issues_repo_head_hexsha": "92bd15ec7a81c5eda70d11a015f74538f3c41e22", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-03-08T21:06:01.000Z", "max_issues_repo_issues_event_max_datetime": "2017-06-21T19:01:58.000Z", "max_forks_repo_path": "qtim_tools/qtim_utilities/nrrd_util.py", "max_forks_repo_name": "QTIM-Lab/qtim_tools", "max_forks_repo_head_hexsha": "92bd15ec7a81c5eda70d11a015f74538f3c41e22", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-03-02T09:08:21.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-26T05:37:39.000Z", "avg_line_length": 28.2, "max_line_length": 74, "alphanum_fraction": 0.7588652482, "include": true, "reason": "import numpy", "num_tokens": 249}
|
###############################################################################
#
# Update carbon and water fluxes
#
# TODO: work more to make this function more customized
#
###############################################################################
"""
layer_fluxes!(
node::SPACMono{FT};
updating::Bool = false
) where {FT<:AbstractFloat}
Run carbon, water, energy, and SIF fluxes for all canopy layers, given
- `node` [`SPACMono`](@ref) type struct
- `updating` If true, update cavitation history
"""
function layer_fluxes!(
node::SPACMono{FT};
updating::Bool = false
) where {FT<:AbstractFloat}
# 0.1 unpack data
@unpack angles, can_opt, can_rad, canopy_rt, envirs, f_SL, ga, in_rad,
leaves_rt, n_canopy, photo_set, plant_hs, plant_ps, rt_con, rt_dim,
soil_opt, stomata_model, wl_set = node;
@unpack nAzi, nIncl = canopy_rt;
nSL = nAzi * nIncl;
canopy_geometry!(canopy_rt, angles, can_opt, rt_con);
canopy_matrices!(leaves_rt, can_opt);
short_wave!(canopy_rt, can_opt, can_rad, in_rad, soil_opt, rt_con);
canopy_fluxes!(canopy_rt, can_opt, can_rad, in_rad, soil_opt, leaves_rt,
wl_set, rt_con);
#
f_H₂O = 0;
f_GPP = 0;
f_NPP = 0;
for i_can in 1:n_canopy
iEN = envirs[i_can];
iHS = plant_hs.leaves[i_can];
iPS = plant_ps[i_can];
iRT = n_canopy + 1 - i_can;
iPS.T = can_rad.T_sun[i_can];
update_leaf_TP!(photo_set, iPS, iHS, iEN);
temperature_effects!(iHS, iPS.T);
# calculate the fraction of sunlit and shaded leaves
f_view = (can_opt.Ps[iRT] + can_opt.Ps[iRT+1]) / 2;
for iLF in 1:nSL
iPS.APAR[iLF] = can_rad.absPAR_sunCab[(iRT-1)*nSL+iLF] * FT(1e6);
iPS.LAIx[iLF] = f_view * f_SL[iLF];
end
iPS.APAR[end] = can_rad.absPAR_shadeCab[iRT] * FT(1e6);
iPS.LAIx[end] = 1 - f_view;
# iterate for N times until sum(iPS.Ag) does not change any more
sum_ag_last = sum(iPS.Ag);
count = 0;
while true
# calculate the photosynthetic rates
count += 1;
gas_exchange!(photo_set, iPS, iEN, GswDrive());
if typeof(stomata_model) <: EmpiricalStomatalModel
#
#
#
#
# also need to determine which β function is used
# to tune g1
# to tune Vcmax
#
#
#
#
prognostic_gsw!(iPS, iEN, stomata_model, FT(1), FT(120));
else
prognostic_gsw!(photo_set, iPS, iHS, iEN, stomata_model,
FT(120));
end
gsw_control!(photo_set, iPS, iEN);
sum_ag_curr = sum(iPS.Ag);
if (abs(sum_ag_curr - sum_ag_last) < 0.01) || count > 1000
break
else
sum_ag_last = sum_ag_curr;
end
end
# update the fluorescence quantum yield from leaf level calculation
can_rad.ϕ_sun[:,:,iRT] .= reshape(view(iPS.ϕs,1:nSL), nIncl, nAzi);
can_rad.ϕ_shade[iRT] = iPS.ϕs[end];
# update the flow rates
for iLF in 1:(nSL+1)
f_GPP += iPS.Ag[iLF] * iPS.LAIx[iLF] * iPS.LA;
f_NPP += iPS.An[iLF] * iPS.LAIx[iLF] * iPS.LA;
f_H₂O += iPS.g_lw[iLF] * (iPS.p_sat - iEN.p_H₂O) / iEN.p_atm *
iPS.LAIx[iLF] * iPS.LA;
end
end
# do SIF simulation
SIF_fluxes!(leaves_rt, can_opt, can_rad, canopy_rt, soil_opt,
wl_set, rt_con, rt_dim);
# update flow profile and pressure history along the tree
if updating
for i_can in 1:n_canopy
iEN = envirs[i_can];
iLF = plant_hs.leaves[i_can];
iPS = plant_ps[i_can];
iST = plant_hs.branch[i_can];
iLF.flow = sum(iPS.g_lw .* iPS.LAIx) *
(iPS.p_sat - iEN.p_H₂O) / iEN.p_atm;
iST.flow = iLF.flow * iPS.LA;
end
plant_hs.trunk.flow = sum([iST.flow for iST in plant_hs.branch]);
for iRT in plant_hs.roots
iRT.flow = plant_hs.trunk.flow / length(plant_hs.roots);
end
pressure_profile!(plant_hs, SteadyStateMode(); update=true);
end
# update the flows in SPACMono
node.f_gpp = f_GPP / ga;
node.f_npp = f_NPP / ga;
node.f_H₂O = f_H₂O / ga;
return nothing
end
|
{"hexsha": "04673b71e294ae07271feb160163dea6ab0491bc", "size": 4582, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SoilPlantAirContinuum/layers/layer_fluxes.jl", "max_stars_repo_name": "bbuman/Land", "max_stars_repo_head_hexsha": "b0f3a390eb17330abbfe1a6ddffefdad2c7353ff", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2020-05-06T19:15:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T23:17:26.000Z", "max_issues_repo_path": "src/SoilPlantAirContinuum/layers/layer_fluxes.jl", "max_issues_repo_name": "bbuman/Land", "max_issues_repo_head_hexsha": "b0f3a390eb17330abbfe1a6ddffefdad2c7353ff", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2020-05-07T16:15:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-23T04:04:16.000Z", "max_forks_repo_path": "src/SoilPlantAirContinuum/layers/layer_fluxes.jl", "max_forks_repo_name": "bbuman/Land", "max_forks_repo_head_hexsha": "b0f3a390eb17330abbfe1a6ddffefdad2c7353ff", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-09-28T17:50:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T21:24:34.000Z", "avg_line_length": 34.4511278195, "max_line_length": 79, "alphanum_fraction": 0.5264076822, "num_tokens": 1309}
|
'''
Created on Dec, 2016
@author: hugo
'''
from __future__ import absolute_import
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import f1_score, confusion_matrix, classification_report
def softmax_network(input_size, n_class):
model = Sequential()
model.add(Dense(n_class, activation='softmax', kernel_initializer='glorot_normal', input_dim=input_size))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def sigmoid_network(input_size, n_class):
model = Sequential()
model.add(Dense(n_class, activation='sigmoid', kernel_initializer='glorot_normal', input_dim=input_size))
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
def multiclass_classifier(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7):
clf = softmax_network(X_train.shape[1], Y_train.shape[1])
clf.fit(X_train, Y_train,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_data=(X_val, Y_val),
callbacks=[
ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'),
]
)
acc = clf.test_on_batch(X_test, Y_test)[1]
# confusion matrix and precision-recall
true = np.argmax(Y_test,axis=1)
pred = np.argmax(clf.predict(X_test), axis=1)
print(confusion_matrix(true, pred))
print(classification_report(true, pred))
return acc
def multilabel_classifier(X_train, Y_train, X_val, Y_val, X_test, Y_test, nb_epoch=200, batch_size=10, seed=7):
clf = sigmoid_network(X_train.shape[1], Y_train.shape[1])
clf.fit(X_train, Y_train,
nb_epoch=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_data=(X_val, Y_val),
callbacks=[
ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=0, mode='auto'),
]
)
pred = clf.predict(X_test)
pred = (pred > .5) * 1
macro_f1 = f1_score(Y_test, pred, average='macro')
micro_f1 = f1_score(Y_test, pred, average='micro')
return [macro_f1, micro_f1]
|
{"hexsha": "fea36004821c7a70c31a5887c7c1b97e8eb03a31", "size": 2761, "ext": "py", "lang": "Python", "max_stars_repo_path": "autoencoder/testing/classifier.py", "max_stars_repo_name": "qmeeus/KATE", "max_stars_repo_head_hexsha": "012a1c6b671b5eb4c26470d8bca4f277fff1ec74", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "autoencoder/testing/classifier.py", "max_issues_repo_name": "qmeeus/KATE", "max_issues_repo_head_hexsha": "012a1c6b671b5eb4c26470d8bca4f277fff1ec74", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autoencoder/testing/classifier.py", "max_forks_repo_name": "qmeeus/KATE", "max_forks_repo_head_hexsha": "012a1c6b671b5eb4c26470d8bca4f277fff1ec74", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-01T15:18:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-01T15:18:58.000Z", "avg_line_length": 41.8333333333, "max_line_length": 122, "alphanum_fraction": 0.6171676929, "include": true, "reason": "import numpy", "num_tokens": 630}
|
[STATEMENT]
lemma sorted_augmentum [simp]: "0 \<notin> set ns \<Longrightarrow> sorted (augmentum ns)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<notin> set ns \<Longrightarrow> sorted (augmentum ns)
[PROOF STEP]
by (induction ns) auto
|
{"llama_tokens": 88, "file": "Khovanskii_Theorem_Khovanskii", "length": 1}
|
#!/usr/bin/env python
import sys
if sys.version_info[0] >= 3:
import PySimpleGUI as sg
else:
import PySimpleGUI27 as sg
import os
from sys import exit as exit
from PIL import Image
import io
import numpy as np
thumbnails = {}
ROWS = 8
COLUMNS = 8
sg.SetOptions(border_width=0)
# Get the folder containing the images from the user
# folder = 'A:/TEMP/pdfs'
folder = sg.PopupGetFolder('Image folder to open')
if folder is None:
sg.PopupCancel('Cancelling')
exit(0)
def image_file_to_bytes(filename, size):
try:
image = Image.open(filename)
image.thumbnail(size, Image.ANTIALIAS)
bio = io.BytesIO() # a binary memory resident stream
image.save(bio, format='PNG') # save image as png to it
imgbytes = bio.getvalue()
except:
imgbytes = None
return imgbytes
def set_image_to_blank(key):
img = Image.new('RGB', (100, 100), (255, 255, 255))
img.thumbnail((1, 1), Image.ANTIALIAS)
bio = io.BytesIO()
img.save(bio, format='PNG')
imgbytes = bio.getvalue()
window.FindElement(key).Update(image_data=imgbytes)
# get list of PNG files in folder
png_files = [os.path.join(folder, f) for f in os.listdir(folder) if '.png' in f]
filenames_only = [f for f in os.listdir(folder) if '.png' in f]
if len(png_files) == 0:
sg.Popup('No PNG images in folder')
exit(0)
# define menu layout
menu = [['&File', ['&Open Folder', 'E&xit']], ['&Help', ['&About',]]]
buttons = []
for display_index in range(ROWS):
row = []
for j in range(COLUMNS):
row.append(sg.Button('',border_width=0,button_color=sg.COLOR_SYSTEM_DEFAULT, key=(display_index, j)))
buttons.append(row)
col_buttons = [[]]
# define layout, show and read the window
col = [[sg.Text(png_files[0], size=(80, 3), key='filename')],
[sg.Image(data=image_file_to_bytes(png_files[0], (500,500)), key='image')],]
layout = [[sg.Menu(menu)], [sg.Column(buttons), sg.Column([[sg.Slider((len(png_files),0),default_value=0,size=(38,20),orientation='v', key='_slider_', change_submits=True)]]), sg.Column(col)]]
window = sg.Window('Image Browser',
return_keyboard_events=True,
use_default_focus=False ).Layout(layout).Finalize()
# -------========= Event Loop =========--------
display_index=0
while True:
for x in range(ROWS): # update thumbnails
for y in range(COLUMNS):
cur_index = display_index + (x * 4) + y
if cur_index < len(png_files):
filename = png_files[cur_index]
if filename not in thumbnails:
imgbytes = image_file_to_bytes(filename, (100,100))
thumbnails[filename] = imgbytes
else:
imgbytes = thumbnails[filename]
button_elem = window.FindElement(key=(x,y))
button_elem.Update(image_data=imgbytes)
else:
set_image_to_blank((x,y))
event, values = window.Read()
display_index = values['_slider_']
# --------------------- Button & Keyboard ---------------------
if event in (None, 'Exit'):
break
elif event in ('MouseWheel:Down', 'Down:40',) and display_index < len(png_files)-1:
display_index += 4
elif event in ('MouseWheel:Up', 'Up:38',) and display_index > 0:
display_index -= 4
elif event in ('Prior:33', 'Prev'):
display_index -= 16
elif event in ('Next:34', 'Next'):
display_index += 16
window.FindElement('_slider_').Update(display_index)
# ----------------- Menu choices -----------------
if event == 'Open Folder':
newfolder = sg.PopupGetFolder('New folder', no_window=True)
if newfolder is None:
continue
folder = newfolder
png_files = [os.path.join(folder, f) for f in os.listdir(folder) if '.png' in f]
filenames_only = [f for f in os.listdir(folder) if '.png' in f]
display_index = 0
thumbnail = {}
for j in range(ROWS):
for i in range(COLUMNS):
set_image_to_blank((i,j))
elif event == 'About':
sg.Popup('Demo PNG Viewer Program', 'Please give PySimpleGUI a try!')
elif type(event) is tuple:
x, y = event
image_index = display_index + (x * 4) + y
if image_index < len(png_files):
filename = png_files[image_index]
imgbytes = image_file_to_bytes(filename, (500, 500))
window.FindElement('image').Update(data=imgbytes)
window.FindElement('filename').Update(filename)
|
{"hexsha": "945f6ce32f019feccbeee952d59100f2da9feb82", "size": 4599, "ext": "py", "lang": "Python", "max_stars_repo_path": "legacy/Python Pipe Tuner/ImageViewer.py", "max_stars_repo_name": "Iarumas/BagPypeTuner", "max_stars_repo_head_hexsha": "7926ae63031e072fa61e8db845ed845348b7ea44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-15T06:25:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-15T06:25:31.000Z", "max_issues_repo_path": "legacy/Python Pipe Tuner/ImageViewer.py", "max_issues_repo_name": "Iarumas/BagPypeTuner", "max_issues_repo_head_hexsha": "7926ae63031e072fa61e8db845ed845348b7ea44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "legacy/Python Pipe Tuner/ImageViewer.py", "max_forks_repo_name": "Iarumas/BagPypeTuner", "max_forks_repo_head_hexsha": "7926ae63031e072fa61e8db845ed845348b7ea44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-15T06:25:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-15T06:25:32.000Z", "avg_line_length": 34.8409090909, "max_line_length": 192, "alphanum_fraction": 0.6001304631, "include": true, "reason": "import numpy", "num_tokens": 1143}
|
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from nose.plugins.skip import SkipTest
import torch
from cleverhans.devtools.checks import CleverHansTest
from cleverhans.future.torch.attacks.fast_gradient_method import fast_gradient_method
from cleverhans.future.torch.attacks.projected_gradient_descent import projected_gradient_descent
from cleverhans.future.torch.attacks.spsa import spsa
from cleverhans.future.torch.attacks.hop_skip_jump_attack import hop_skip_jump_attack
class SimpleModel(torch.nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.w1 = torch.tensor([[1.5, .3], [-2, .3]])
self.w2 = torch.tensor([[-2.4, 1.2], [.5, -2.3]])
def forward(self, x):
x = torch.matmul(x, self.w1)
x = torch.sigmoid(x)
x = torch.matmul(x, self.w2)
return x
class CommonAttackProperties(CleverHansTest):
def setUp(self):
super(CommonAttackProperties, self).setUp()
self.model = SimpleModel()
self.x = torch.randn(100, 2)
self.normalized_x = torch.rand(100, 2) # truncated between [0, 1)
self.red_ind = list(range(1, len(self.x.size())))
self.ord_list = [1, 2, np.inf]
def help_adv_examples_success_rate(self, **kwargs):
x_adv = self.attack(model_fn=self.model, x=self.normalized_x, **kwargs)
_, ori_label = self.model(self.normalized_x).max(1)
_, adv_label = self.model(x_adv).max(1)
adv_acc = (
adv_label.eq(ori_label).sum().to(torch.float)
/ self.normalized_x.size(0))
self.assertLess(adv_acc, .5)
def help_targeted_adv_examples_success_rate(self, **kwargs):
y_target = torch.randint(low=0, high=2, size=(self.normalized_x.size(0),))
x_adv = self.attack(
model_fn=self.model, x=self.normalized_x,
y=y_target, targeted=True, **kwargs)
_, adv_label = self.model(x_adv).max(1)
adv_success = (
adv_label.eq(y_target).sum().to(torch.float)
/ self.normalized_x.size(0))
self.assertGreater(adv_success, .7)
class TestFastGradientMethod(CommonAttackProperties):
def setUp(self):
super(TestFastGradientMethod, self).setUp()
self.attack = fast_gradient_method
self.eps_list = [0, .1, .3, 1., 3]
self.attack_param = {
'eps' : .5,
'clip_min' : -5,
'clip_max' : 5
}
def test_invalid_input(self):
x = torch.tensor([[-2., 3.]])
for norm in self.ord_list:
self.assertRaises(
AssertionError, self.attack, model_fn=self.model, x=x, eps=.1,
norm=norm, clip_min=-1., clip_max=1., sanity_checks=True
)
def test_invalid_eps(self):
for norm in self.ord_list:
self.assertRaises(
ValueError, self.attack, model_fn=self.model,
x=self.x, eps=-.1, norm=norm)
def test_eps_equals_zero(self):
for norm in self.ord_list:
self.assertClose(
self.attack(model_fn=self.model, x=self.x, eps=0, norm=norm),
self.x)
def test_eps(self):
# test if the attack respects the norm constraint
# NOTE this has been tested with the optimize_linear function in
# test_utils, so duplicate tests are not needed here.
# Although, if ever switch the engine of the FGM method to some
# function other than optimize_linear. This test should be added.
raise SkipTest()
def test_clips(self):
clip_min = -1.
clip_max = 1.
for norm in self.ord_list:
x_adv = self.attack(
model_fn=self.model, x=self.normalized_x, eps=.3, norm=norm,
clip_min=clip_min, clip_max=clip_max
)
self.assertTrue(torch.all(x_adv <= clip_max))
self.assertTrue(torch.all(x_adv >= clip_min))
def test_invalid_clips(self):
clip_min = .5
clip_max = -.5
for norm in self.ord_list:
self.assertRaises(
ValueError, self.attack, model_fn=self.model, x=self.x, eps=.1,
norm=norm, clip_min=clip_min, clip_max=clip_max
)
def test_adv_example_success_rate_linf(self):
# use normalized_x to make sure the same eps gives uniformly high attack
# success rate across randomized tests
self.help_adv_examples_success_rate(
norm=np.inf, **self.attack_param)
def test_targeted_adv_example_success_rate_linf(self):
self.help_targeted_adv_examples_success_rate(
norm=np.inf, **self.attack_param)
def test_adv_example_success_rate_l1(self):
self.help_adv_examples_success_rate(
norm=1, **self.attack_param)
def test_targeted_adv_example_success_rate_l1(self):
self.help_targeted_adv_examples_success_rate(
norm=1, **self.attack_param)
def test_adv_example_success_rate_l2(self):
self.help_adv_examples_success_rate(
norm=2, **self.attack_param)
def test_targeted_adv_example_success_rate_l2(self):
self.help_targeted_adv_examples_success_rate(
norm=2, **self.attack_param)
class TestProjectedGradientMethod(CommonAttackProperties):
def setUp(self):
super(TestProjectedGradientMethod, self).setUp()
self.attack = projected_gradient_descent
self.attack_param = {
'eps' : .5,
'clip_min' : -5,
'clip_max' : 5,
'eps_iter' : .05,
'nb_iter' : 20,
}
def test_invalid_input(self):
x = torch.tensor([[-2., 3.]])
for norm in self.ord_list:
if norm == 1:
self.assertRaises(
NotImplementedError, self.attack, model_fn=self.model, x=x, eps=.1,
nb_iter=1, eps_iter=.05, norm=norm, clip_min=-1., clip_max=1.,
sanity_checks=True)
else:
self.assertRaises(
AssertionError, self.attack, model_fn=self.model, x=x, eps=.1,
nb_iter=1, eps_iter=.05, norm=norm, clip_min=-1., clip_max=1.,
sanity_checks=True)
def test_invalid_eps(self):
for norm in self.ord_list:
if norm == 1:
self.assertRaises(
NotImplementedError, self.attack, model_fn=self.model,
x=self.x, eps=-.1, norm=norm, nb_iter=1, eps_iter=.01)
else:
self.assertRaises(
ValueError, self.attack, model_fn=self.model,
x=self.x, eps=-.1, norm=norm, nb_iter=1, eps_iter=.01)
def test_invalid_eps_iter(self):
for norm in self.ord_list:
if norm == 1:
self.assertRaises(
NotImplementedError, self.attack, model_fn=self.model,
x=self.x, eps=.1, norm=norm, nb_iter=1, eps_iter=-.01)
else:
self.assertRaises(
ValueError, self.attack, model_fn=self.model,
x=self.x, eps=.1, norm=norm, nb_iter=1, eps_iter=-.01)
def test_eps_equals_zero(self):
for norm in self.ord_list:
if norm == 1:
self.assertRaises(
NotImplementedError, self.attack, model_fn=self.model,
x=self.x, eps=0, norm=norm, nb_iter=10, eps_iter=.01)
else:
self.assertClose(
self.attack(
model_fn=self.model, x=self.x, eps=0, norm=norm, nb_iter=10,
eps_iter=.01),
self.x)
def test_eps_iter_equals_zero(self):
for norm in self.ord_list:
if norm == 1:
self.assertRaises(
NotImplementedError, self.attack, model_fn=self.model, x=self.x,
eps=.5, norm=norm, nb_iter=10, eps_iter=0)
else:
self.assertClose(
self.attack(
model_fn=self.model, x=self.x, eps=.5, norm=norm, nb_iter=10,
eps_iter=0),
self.x)
def test_invalid_clips(self):
clip_min = .5
clip_max = -.5
for norm in self.ord_list:
if norm == 1:
self.assertRaises(
NotImplementedError, self.attack, model_fn=self.model, x=self.x, eps=.1,
norm=norm, clip_min=clip_min, clip_max=clip_max, nb_iter=10,
eps_iter=.01)
else:
self.assertRaises(
ValueError, self.attack, model_fn=self.model, x=self.x, eps=.1,
norm=norm, clip_min=clip_min, clip_max=clip_max, nb_iter=10,
eps_iter=.01)
def test_adv_example_success_rate_linf(self):
# use normalized_x to make sure the same eps gives uniformly high attack
# success rate across randomized tests
self.help_adv_examples_success_rate(
norm=np.inf, **self.attack_param)
def test_targeted_adv_example_success_rate_linf(self):
self.help_targeted_adv_examples_success_rate(
norm=np.inf, **self.attack_param)
def test_adv_example_success_rate_l1(self):
self.assertRaises(
NotImplementedError, self.help_adv_examples_success_rate, norm=1,
**self.attack_param)
# TODO uncomment the actual test below after we have implemented the L1 attack
# self.help_adv_examples_success_rate(
# norm=1, **self.attack_param)
def test_targeted_adv_example_success_rate_l1(self):
self.assertRaises(
NotImplementedError, self.help_targeted_adv_examples_success_rate,
norm=1, **self.attack_param)
# TODO uncomment the actual test below after we have implemented the L1 attack
# self.help_targeted_adv_examples_success_rate(
# norm=1, **self.attack_param)
def test_adv_example_success_rate_l2(self):
self.help_adv_examples_success_rate(
norm=2, **self.attack_param)
def test_targeted_adv_example_success_rate_l2(self):
self.help_targeted_adv_examples_success_rate(
norm=2, **self.attack_param)
def test_do_not_reach_lp_boundary(self):
for norm in self.ord_list:
if norm == 1:
self.assertRaises(
NotImplementedError, self.attack, model_fn=self.model,
x=self.normalized_x, eps=.5, nb_iter=10, norm=norm, eps_iter=.01)
continue
else:
x_adv = self.attack(
model_fn=self.model, x=self.normalized_x, eps=.5, nb_iter=10,
norm=norm, eps_iter=.01)
if norm == np.inf:
delta, _ = torch.abs(x_adv - self.normalized_x).max(dim=1)
elif norm == 1:
delta = torch.abs(x_adv - self.normalized_x).sum(dim=1)
elif norm == 2:
delta = torch.pow(x_adv - self.normalized_x, 2).sum(dim=1).pow(.5)
diff = torch.max(.5 - delta)
self.assertTrue(diff > .25)
def test_attack_strength(self):
x_adv = self.attack(
model_fn=self.model, x=self.normalized_x, eps=1.,
eps_iter=.05, norm=np.inf, clip_min=.5, clip_max=.7, nb_iter=5,
sanity_checks=False)
_, ori_label = self.model(self.normalized_x).max(1)
_, adv_label = self.model(x_adv).max(1)
adv_acc = (
adv_label.eq(ori_label).sum().to(torch.float)
/ self.normalized_x.size(0))
self.assertLess(adv_acc, .1)
def test_eps(self):
# test if the attack respects the norm constraint
# NOTE clip_eta makes sure that at each step, adv_x respects the eps
# norm constraint. Therefore, this is essentially a test on clip_eta,
# which is implemented in a separate test_clip_eta
raise SkipTest()
def test_clip_eta(self):
# NOTE: this has been tested with test_clip_eta in test_utils
raise SkipTest()
def test_clips(self):
clip_min = -1.
clip_max = 1.
for norm in self.ord_list:
if norm == 1:
self.assertRaises(
NotImplementedError, model_fn=self.model, x=self.normalized_x,
eps=.3, eps_iter=.03, norm=norm, nb_iter=10, clip_min=clip_min,
clip_max=clip_max)
continue
else:
x_adv = self.attack(
model_fn=self.model, x=self.normalized_x, eps=.3, eps_iter=.03,
norm=norm, nb_iter=10, clip_min=clip_min, clip_max=clip_max)
self.assertTrue(torch.all(x_adv <= clip_max))
self.assertTrue(torch.all(x_adv >= clip_min))
def test_attack_does_not_cache_graph_computation_for_nb_iter(self):
# TODO not sure what the original test does in tests_tf/test_attacks
pass
def test_multiple_initial_random_step(self):
_, ori_label = self.model(self.normalized_x).max(1)
new_label_multi = ori_label.clone().detach()
for _ in range(10):
x_adv = self.attack(
model_fn=self.model, x=self.normalized_x, eps=.5, eps_iter=.05,
norm=np.inf, clip_min=.5, clip_max=.7, nb_iter=2, sanity_checks=False)
_, new_label = self.model(x_adv).max(1)
# examples for which we have not found adversarial examples
i = ori_label.eq(new_label_multi)
new_label_multi[i] = new_label[i]
failed_attack = (
ori_label.eq(new_label_multi).sum().to(torch.float)
/ self.normalized_x.size(0))
self.assertLess(failed_attack, .5)
class TestSPSA(CommonAttackProperties):
def setUp(self):
super(TestSPSA, self).setUp()
self.attack = spsa
self.attack_param = {
'eps': .5,
'clip_min': -5,
'clip_max': 5,
'nb_iter': 50,
}
def test_invalid_input(self):
x = torch.tensor([[-20., 30.]])
self.assertRaises(AssertionError, self.attack, model_fn=self.model, x=x, eps=.1,
nb_iter=1, clip_min=-1., clip_max=1., sanity_checks=True)
def test_invalid_eps(self):
self.assertRaises(
ValueError, self.attack, model_fn=self.model, x=self.x, eps=-.1, nb_iter=1)
def test_eps_equals_zero(self):
self.assertClose(
self.attack(model_fn=self.model, x=self.x, eps=0, nb_iter=10),
self.x)
def test_invalid_clips(self):
self.assertRaises(
ValueError, self.attack, model_fn=self.model, x=self.x, eps=.1,
clip_min=.5, clip_max=-.5, nb_iter=10)
def test_adv_example_success_rate_linf(self):
# use normalized_x to make sure the same eps gives uniformly high attack
# success rate across randomized tests
self.help_adv_examples_success_rate(**self.attack_param)
def test_targeted_adv_example_success_rate_linf(self):
self.help_targeted_adv_examples_success_rate(**self.attack_param)
def test_adv_example_success_rate_l1(self):
self.assertRaises(
NotImplementedError, self.help_adv_examples_success_rate, norm=1,
**self.attack_param)
# TODO uncomment the actual test below after we have implemented the L1 attack
# self.help_adv_examples_success_rate(
# norm=1, **self.attack_param)
def test_targeted_adv_example_success_rate_l1(self):
self.assertRaises(
NotImplementedError, self.help_targeted_adv_examples_success_rate,
norm=1, **self.attack_param)
# TODO uncomment the actual test below after we have implemented the L1 attack
# self.help_targeted_adv_examples_success_rate(
# norm=1, **self.attack_param)
def test_adv_example_success_rate_l2(self):
self.help_adv_examples_success_rate(
norm=2, **self.attack_param)
def test_targeted_adv_example_success_rate_l2(self):
self.help_targeted_adv_examples_success_rate(
norm=2, **self.attack_param)
def test_attack_strength(self):
x_adv = self.attack(
model_fn=self.model, x=self.normalized_x, eps=1.,
clip_min=.5, clip_max=.7, nb_iter=20,
sanity_checks=False)
_, ori_label = self.model(self.normalized_x).max(1)
_, adv_label = self.model(x_adv).max(1)
adv_acc = (
adv_label.eq(ori_label).sum().to(torch.float)
/ self.normalized_x.size(0))
self.assertLess(adv_acc, .1)
def test_eps(self):
x_adv = self.attack(
model_fn=self.model, x=self.normalized_x, eps=.5, nb_iter=10)
delta, _ = torch.abs(x_adv - self.normalized_x).max(dim=1)
self.assertTrue(torch.all(delta <= .5 + 1e-6))
def test_clips(self):
clip_min = -1.
clip_max = 1.
x_adv = self.attack(
model_fn=self.model, x=self.normalized_x, eps=.3,
nb_iter=10, clip_min=clip_min, clip_max=clip_max)
self.assertTrue(torch.all(x_adv <= clip_max))
self.assertTrue(torch.all(x_adv >= clip_min))
class TestHopSkipJumpAttack(CommonAttackProperties):
def setUp(self):
super(TestHopSkipJumpAttack, self).setUp()
self.attack = hop_skip_jump_attack
def test_generate_np_untargeted_l2(self):
x_val = torch.rand(50, 2)
bapp_params = {
'norm': 2,
'stepsize_search': 'geometric_progression',
'num_iterations': 10,
'verbose': True,
}
x_adv = self.attack(model_fn=self.model, x=x_val, **bapp_params)
_, ori_label = self.model(x_val).max(1)
_, adv_label = self.model(x_adv).max(1)
adv_acc = (
adv_label.eq(ori_label).sum().to(torch.float)
/ x_val.size(0))
self.assertLess(adv_acc, .1)
def test_generate_untargeted_linf(self):
x_val = torch.rand(50, 2)
bapp_params = {
'norm': np.inf,
'stepsize_search': 'grid_search',
'num_iterations': 10,
'verbose': True,
}
x_adv = self.attack(model_fn=self.model, x=x_val, **bapp_params)
_, ori_label = self.model(x_val).max(1)
_, adv_label = self.model(x_adv).max(1)
adv_acc = (
adv_label.eq(ori_label).sum().to(torch.float)
/ x_val.size(0))
self.assertLess(adv_acc, .1)
def test_generate_np_targeted_linf(self):
x_val = torch.rand(200, 2)
_, ori_label = self.model(x_val).max(1)
x_val_pos = x_val[ori_label == 1]
x_val_neg = x_val[ori_label == 0]
x_val_under_attack = torch.cat(
(x_val_pos[:25], x_val_neg[:25]), dim=0)
y_target = torch.cat([torch.zeros(25, dtype=torch.int64), torch.ones(25, dtype=torch.int64)])
image_target = torch.cat((x_val_neg[25:50], x_val_pos[25:50]), dim=0)
bapp_params = {
'norm': np.inf,
'stepsize_search': 'geometric_progression',
'num_iterations': 10,
'verbose': True,
'y_target': y_target,
'image_target': image_target,
}
x_adv = self.attack(model_fn=self.model, x=x_val_under_attack, **bapp_params)
_, new_labs = self.model(x_adv).max(1)
adv_acc = (
new_labs.eq(y_target).sum().to(torch.float)
/ y_target.size(0))
self.assertGreater(adv_acc, .9)
def test_generate_targeted_l2(self):
# Create data in numpy arrays.
x_val = torch.rand(200, 2)
_, ori_label = self.model(x_val).max(1)
x_val_pos = x_val[ori_label == 1]
x_val_neg = x_val[ori_label == 0]
x_val_under_attack = torch.cat(
(x_val_pos[:25], x_val_neg[:25]), dim=0)
y_target = torch.cat([torch.zeros(25, dtype=torch.int64), torch.ones(25, dtype=torch.int64)])
image_target = torch.cat((x_val_neg[25:50], x_val_pos[25:50]), dim=0)
# Create graph.
bapp_params = {
'norm': 'l2',
'stepsize_search': 'grid_search',
'num_iterations': 10,
'verbose': True,
'y_target': y_target,
'image_target': image_target,
}
x_adv = self.attack(model_fn=self.model, x=x_val_under_attack, **bapp_params)
_, new_labs = self.model(x_adv).max(1)
adv_acc = (
new_labs.eq(y_target).sum().to(torch.float)
/ y_target.size(0))
self.assertGreater(adv_acc, .9)
|
{"hexsha": "d9c5eec89d9bfd30a520208a12d375cce1b21769", "size": 18989, "ext": "py", "lang": "Python", "max_stars_repo_path": "cleverhans/future/torch/tests/test_attacks.py", "max_stars_repo_name": "iamgroot42/cleverhans", "max_stars_repo_head_hexsha": "53da9cd6daf9d7457800831c3eaa75f729a39145", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-22T07:30:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-22T07:30:38.000Z", "max_issues_repo_path": "cleverhans/future/torch/tests/test_attacks.py", "max_issues_repo_name": "iamgroot42/cleverhans", "max_issues_repo_head_hexsha": "53da9cd6daf9d7457800831c3eaa75f729a39145", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cleverhans/future/torch/tests/test_attacks.py", "max_forks_repo_name": "iamgroot42/cleverhans", "max_forks_repo_head_hexsha": "53da9cd6daf9d7457800831c3eaa75f729a39145", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-29T14:13:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-29T14:13:21.000Z", "avg_line_length": 34.5254545455, "max_line_length": 97, "alphanum_fraction": 0.6615935542, "include": true, "reason": "import numpy", "num_tokens": 4897}
|
import numpy as np
import argparse
import imutils
import time
import cv2
import os
import torchvision
import torchvision.transforms as transforms
ap = argparse.ArgumentParser()
ap.add_argument("input", help="path to the input file")
ap.add_argument("output", help="path to output file")
# ap.add_argument("-o", "--output", required=True, help="path to the output video file")
# ap.add_argument("-m", "--mask-rcnn", required=True, help="path to the mask-rcnn directory")
# ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections")
# ap.add_argument("-t", "--threshold", type=float, default=0.3, help="minimum threshold for pixel-wise mask segmentation")
args = ap.parse_args()
capture = cv2.VideoCapture(args.input)
writer = None
capture.set(cv2.CAP_PROP_FPS, 1)
classes = [
'background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'street sign',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack',
'umbrella', 'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk',
'toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'blender', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'hair brush']
# load the model
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# set the model in evaluation model
model.eval()
# read the file:
# trying to determine the total number of frames in the video:
#try:
# prop = cv2.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
# else cv2.CV_CAP_PROP_FRAME_COUNT
# total = int(capture.get(prop))
# print("[INFO] {} total frames in video".format(total))
#except:
# print("[INFO] could not determine # of frames in the video")
# total = -1
# frame processing loop:
while True:
# read the tnext frame from the filter
(grabbed, frame) = capture.read()
fpsLimit =1
startTime=time.time()
cv = cv2.VideoCapture(args.input)
nowTime = time.time()
if (int(nowTime-startTime)) > fpsLimit:
startTime = time.time()
# if the frame was not grabbed, we have reached the end of the stream:
if not grabbed:
break
transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])
nn_input = transform(frame)
output = model([nn_input])
# let's iterate over the network output for all boxes
processing_time = time.time()
for mask, box, score in zip(output[0]['masks'].detach().numpy(),
output[0]['boxes'].detach().numpy(),
output[0]['scores'].detach().numpy()):
if score > 0.5:
box = [(box[0], box[1]), (box[2], box[3])]
# overlay the segmentation mask on the image with random color
frame[(mask > 0.5).squeeze(), :] = np.random.uniform(0, 255, size=3)
# draw the bounding boxes
cv2.rectangle(img=frame,
pt1=box[0],
pt2=box[1],
color=(255, 255, 255),
thickness=2)
cv2.imshow("object detection", frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "c0f2ff9b233d221e6c22f5f3c5d91880b424be6a", "size": 3805, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/mask_rcnn_model.py", "max_stars_repo_name": "Hira63S/DeepLearningResearch", "max_stars_repo_head_hexsha": "b6e8298a88fbc81de06d8e202603a80af8bbdaa2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Scripts/mask_rcnn_model.py", "max_issues_repo_name": "Hira63S/DeepLearningResearch", "max_issues_repo_head_hexsha": "b6e8298a88fbc81de06d8e202603a80af8bbdaa2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scripts/mask_rcnn_model.py", "max_forks_repo_name": "Hira63S/DeepLearningResearch", "max_forks_repo_head_hexsha": "b6e8298a88fbc81de06d8e202603a80af8bbdaa2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9417475728, "max_line_length": 122, "alphanum_fraction": 0.6252299606, "include": true, "reason": "import numpy", "num_tokens": 1029}
|
from scipy.stats import wasserstein_distance, ks_2samp, energy_distance, anderson_ksamp
def frobenium_norm(data1, data2):
pass
def l2_norm(data1, data2):
pass
def frechet_inception_distance(data1, data2):
pass
def t_test(data1, data2):
pass
def energy_dist(data1, data2):
# data1 = data1.flatten()
# data2 = data2.flatten()
ene = energy_distance(data1, data2)
print(ene)
return str(ene)
def ks_test(data1, data2):
# data1 = data1.flatten()
# data2 = data2.flatten()
ks = ks_2samp(data1, data2)
print(ks)
return str(ks)
def shapiro_will_test(data1, data2):
pass
def anderson_darling_test(data1, data2):
# data1 = data1.flatten()
# data2 = data2.flatten()
ander = anderson_ksamp([data1, data2])
print(ander)
return str(ander)
def wass_distance(data1, data2):
# data1 = data1.flatten()
# data2 = data2.flatten()
wass = wasserstein_distance(data1, data2)
print(wass)
return str(wass)
def epps_singleton_test(data1, data2):
# data1 = data1.flatten()
# data2 = data2.flatten()
epps = epps_singleton_2samp(data1, data2)
print(epps)
return str(epps)
def calculateDistance(data1,data2,metrics):
results = {}
for metric in metrics:
if metric == 'Wasserstein Distance':
results[metric] = wass_distance(data1,data2)
elif metric == 'Frobenius Norm':
results[metric] = frobenium_norm(data1, data2)
elif metric == 'L2 Norm':
results[metric] = l2_norm(data1, data2)
elif metric == 'Energy Distance':
results[metric] = energy_dist(data1, data2)
elif metric == 'Frechet Inception Distance':
results[metric] = frechet_inception_distance(data1, data2)
elif metric == 'Students T-test':
results[metric] = t_test(data1, data2)
elif metric == 'KS Test':
results[metric] = ks_test(data1, data2)
elif metric == 'Shapiro Wil Test':
results[metric] = shapiro_will_test(data1, data2)
elif metric == 'Anderson Darling Test':
results[metric] = anderson_darling_test(data1, data2)
elif metric == 'Epps Singleton Test':
results[metric] = epps_singleton_test(data1, data2)
return results
|
{"hexsha": "b64cc9800079938fc9ea71372855ab46b63c9385", "size": 2308, "ext": "py", "lang": "Python", "max_stars_repo_path": "EvaluationMetrics/General_Diseases/distance_calculator.py", "max_stars_repo_name": "vampypandya/HealthGAN-Pre", "max_stars_repo_head_hexsha": "94a1f5c5672849ad3bad5791efdba74b79252a5d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "EvaluationMetrics/General_Diseases/distance_calculator.py", "max_issues_repo_name": "vampypandya/HealthGAN-Pre", "max_issues_repo_head_hexsha": "94a1f5c5672849ad3bad5791efdba74b79252a5d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EvaluationMetrics/General_Diseases/distance_calculator.py", "max_forks_repo_name": "vampypandya/HealthGAN-Pre", "max_forks_repo_head_hexsha": "94a1f5c5672849ad3bad5791efdba74b79252a5d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5287356322, "max_line_length": 87, "alphanum_fraction": 0.6403812825, "include": true, "reason": "from scipy", "num_tokens": 633}
|
import os
import numpy as np
import theano
import theano.tensor as T
from lib.utils.theano_utils import *
from lib.utils.lasagne_utils import *
from lib.utils.data_utils import *
from lib.utils.dr_utils import *
from lib.utils.attack_utils import *
from lib.utils.plot_utils import *
from lib.utils.model_utils import *
def gradient_calc(rd, model_dict, X_train, y_train, X_test, y_test, X_val, y_val):
# Parameters
rev_flag = None
dim_red = model_dict['dim_red']
data_dict, test_prediction, dr_alg, X_test, input_var, target_var = \
model_setup(model_dict, X_train, y_train, X_test, y_test, X_val, y_val,
rd, rev=rev_flag)
test_len = data_dict['test_len']
no_of_features = data_dict['no_of_features']
X_test_dr = X_test.reshape((test_len, no_of_features))
var_array = np.sqrt(np.var(X_test, axis=0))
var_list = list(var_array)
gradient_comp = avg_grad_calc(input_var, target_var, test_prediction,
X_test, y_test)
gradient_list = list(gradient_comp)
return zip(var_list, gradient_list)
def main():
# Create model_dict from arguments
model_dict = model_dict_create()
# Reduced dimensions used
# rd_list = [784, 331, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10]
rd_list = [784, 331]
# Load dataset specified in model_dict
print('Loading data...')
dataset = model_dict['dataset']
if (dataset == 'MNIST') or (dataset == 'GTSRB'):
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(model_dict)
elif dataset == 'HAR':
X_train, y_train, X_test, y_test = load_dataset(model_dict)
no_of_dims=len(rd_list)
gradient_var_list=[]
for rd in rd_list:
gradient_var_list.append(gradient_calc(rd, model_dict, X_train, y_train,
X_test, y_test, X_val, y_val))
mag_var_scatter(gradient_var_list, no_of_dims)
if __name__ == "__main__":
main()
|
{"hexsha": "0c42784a1fd04988f873d760521827cf7d491a6d", "size": 1975, "ext": "py", "lang": "Python", "max_stars_repo_path": "dr_theory.py", "max_stars_repo_name": "inspire-group/ml_defense", "max_stars_repo_head_hexsha": "e7e8944d617885389a013061c320fa3553e779f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2017-04-27T12:54:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-23T23:40:01.000Z", "max_issues_repo_path": "dr_theory.py", "max_issues_repo_name": "HangJie720/ml_defense", "max_issues_repo_head_hexsha": "e7e8944d617885389a013061c320fa3553e779f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dr_theory.py", "max_forks_repo_name": "HangJie720/ml_defense", "max_forks_repo_head_hexsha": "e7e8944d617885389a013061c320fa3553e779f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-09-12T01:45:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T20:18:33.000Z", "avg_line_length": 30.859375, "max_line_length": 82, "alphanum_fraction": 0.6764556962, "include": true, "reason": "import numpy,import theano", "num_tokens": 517}
|
"""
Model contributed by: MITRE Corporation
Adapted from: https://github.com/craston/MARS
"""
import logging
from art.classifiers import PyTorchClassifier
import numpy as np
from PIL import Image
import torch
from torch import optim
from MARS.opts import parse_opts
from MARS.models.model import generate_model
from MARS.dataset import preprocess_data
logger = logging.getLogger(__name__)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MEAN = np.array([114.7748, 107.7354, 99.4750], dtype=np.float32)
STD = np.array([1, 1, 1], dtype=np.float32)
def preprocessing_fn_numpy(inputs):
"""
Inputs is comprised of one or more videos, where each video
is given as an ndarray with shape (1, time, height, width, 3).
Preprocessing resizes the height and width to 112 x 112 and reshapes
each video to (n_stack, 3, 16, height, width), where n_stack = int(time/16).
Outputs is a list of videos, each of shape (n_stack, 3, 16, 112, 112)
"""
sample_duration = 16 # expected number of consecutive frames as input to the model
outputs = []
if inputs.dtype == np.uint8: # inputs is a single video, i.e., batch size == 1
inputs = [inputs]
# else, inputs is an ndarray (of type object) of ndarrays
for (
input
) in inputs: # each input is (1, time, height, width, 3) from the same video
input = np.squeeze(input)
# select a fixed number of consecutive frames
total_frames = input.shape[0]
if total_frames <= sample_duration: # cyclic pad if not enough frames
input_fixed = np.vstack(
(input, input[: sample_duration - total_frames, ...])
)
assert input_fixed.shape[0] == sample_duration
else:
input_fixed = input
# apply MARS preprocessing: scaling, cropping, normalizing
opt = parse_opts(arguments=[])
opt.modality = "RGB"
opt.sample_size = 112
input_Image = [] # convert each frame to PIL Image
for f in input_fixed:
input_Image.append(Image.fromarray(f))
input_mars_preprocessed = preprocess_data.scale_crop(input_Image, 0, opt)
# reshape
input_reshaped = []
for ns in range(int(total_frames / sample_duration)):
np_frames = input_mars_preprocessed[
:, ns * sample_duration : (ns + 1) * sample_duration, :, :
].numpy()
input_reshaped.append(np_frames)
outputs.append(np.array(input_reshaped, dtype=np.float32))
return outputs
def preprocessing_fn_torch(
batch, consecutive_frames=16, scale_first=True, align_corners=False
):
"""
inputs - batch of videos each with shape (frames, height, width, channel)
outputs - batch of videos each with shape (n_stack, channel, stack_frames, new_height, new_width)
frames = n_stack * stack_frames (after padding)
new_height = new_width = 112
consecutive_frames - number of consecutive frames (stack_frames)
After resizing, a center crop is performed to make the image square
This is a differentiable alternative to MARS' PIL-based preprocessing.
There are some
"""
if not isinstance(batch, torch.Tensor):
logger.warning(f"batch {type(batch)} is not a torch.Tensor. Casting")
batch = torch.from_numpy(batch).to(DEVICE)
# raise ValueError(f"batch {type(batch)} is not a torch.Tensor")
if batch.dtype != torch.float32:
raise ValueError(f"batch {batch.dtype} should be torch.float32")
if batch.shape[0] != 1:
raise ValueError(f"Batch size {batch.shape[0]} != 1")
video = batch[0]
if video.ndim != 4:
raise ValueError(
f"video dims {video.ndim} != 4 (frames, height, width, channel)"
)
if video.shape[0] < 1:
raise ValueError("video must have at least one frame")
if tuple(video.shape[1:]) != (240, 320, 3):
raise ValueError(f"frame shape {tuple(video.shape[1:])} != (240, 320, 3)")
if video.max() > 1.0 or video.min() < 0.0:
raise ValueError("input should be float32 in [0, 1] range")
if not isinstance(consecutive_frames, int):
raise ValueError(f"consecutive_frames {consecutive_frames} must be an int")
if consecutive_frames < 1:
raise ValueError(f"consecutive_frames {consecutive_frames} must be positive")
# Select a integer multiple of consecutive frames
while len(video) < consecutive_frames:
# cyclic pad if insufficient for a single stack
video = torch.cat([video, video[: consecutive_frames - len(video)]])
if len(video) % consecutive_frames != 0:
# cut trailing frames
video = video[: len(video) - (len(video) % consecutive_frames)]
if scale_first:
# Attempts to directly follow MARS approach
# (frames, height, width, channel) to (frames, channel, height, width)
video = video.permute(0, 3, 1, 2)
sample_width, sample_height = 149, 112
video = torch.nn.functional.interpolate(
video,
size=(sample_height, sample_width),
mode="bilinear",
align_corners=align_corners,
)
crop_left = 18 # round((149 - 112)/2.0)
video = video[:, :, :, crop_left : crop_left + sample_height]
else:
# More efficient, but not MARS approach
# Center crop
sample_size = 112
upsample, downsample = 7, 15
assert video.shape[1] * upsample / downsample == sample_size
crop_width = 40
assert crop_width == (video.shape[2] - video.shape[1]) / 2
assert video.shape[1] + 2 * crop_width == video.shape[2]
video = video[:, :, crop_width : video.shape[2] - crop_width, :]
assert video.shape[1] == video.shape[2] == 240
# Downsample to (112, 112) frame size
# (frames, height, width, channel) to (frames, channel, height, width)
video = video.permute(0, 3, 1, 2)
video = torch.nn.functional.interpolate(
video,
size=(sample_size, sample_size),
mode="bilinear",
align_corners=align_corners,
)
if video.max() > 1.0:
raise ValueError("Video exceeded max after interpolation")
if video.min() < 0.0:
raise ValueError("Video under min after interpolation")
# reshape into stacks of frames
video = torch.reshape(video, (-1, consecutive_frames) + video.shape[1:])
# transpose to (stacks, channel, stack_frames, height, width)
video = video.permute(0, 2, 1, 3, 4)
# video = torch.transpose(video, axes=(0, 4, 1, 2, 3))
# normalize before changing channel position?
video = torch.transpose(video, 1, 4)
video = ((video * 255) - torch.from_numpy(MEAN).to(DEVICE)) / torch.from_numpy(
STD
).to(DEVICE)
video = torch.transpose(video, 4, 1)
return video
def fit_preprocessing_fn_numpy(batch):
"""
Randomly sample a single stack from each video
"""
x = preprocessing_fn_numpy(batch)
x = np.stack([x_i[np.random.randint(x_i.shape[0])] for x_i in x])
return x
preprocessing_fn = fit_preprocessing_fn_numpy
def make_model(model_status="ucf101_trained", weights_path=None):
statuses = ("ucf101_trained", "kinetics_pretrained")
if model_status not in statuses:
raise ValueError(f"model_status {model_status} not in {statuses}")
trained = model_status == "ucf101_trained"
if not trained and weights_path is None:
raise ValueError("weights_path cannot be None for 'kinetics_pretrained'")
opt = parse_opts(arguments=[])
opt.dataset = "UCF101"
opt.only_RGB = True
opt.log = 0
opt.batch_size = 1
opt.arch = f"{opt.model}-{opt.model_depth}"
if trained:
opt.n_classes = 101
else:
opt.n_classes = 400
opt.n_finetune_classes = 101
opt.batch_size = 32
opt.ft_begin_index = 4
opt.pretrain_path = weights_path
logger.info(f"Loading model... {opt.model} {opt.model_depth}")
model, parameters = generate_model(opt)
if trained and weights_path is not None:
checkpoint = torch.load(weights_path, map_location=DEVICE)
model.load_state_dict(checkpoint["state_dict"])
# Initializing the optimizer
if opt.pretrain_path:
opt.weight_decay = 1e-5
opt.learning_rate = 0.001
if opt.nesterov:
dampening = 0
else:
dampening = opt.dampening
optimizer = optim.SGD(
parameters,
lr=opt.learning_rate,
momentum=opt.momentum,
dampening=dampening,
weight_decay=opt.weight_decay,
nesterov=opt.nesterov,
)
return model, optimizer
class OuterModel(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x):
if self.training:
# Use preprocessing_fn_numpy in dataset preprocessing
return self.model(x)
else:
x = preprocessing_fn_torch(x)
stack_outputs = self.model(x)
output = stack_outputs.mean(axis=0, keepdims=True)
return output
def get_art_model(model_kwargs, wrapper_kwargs, weights_path):
inner_model, optimizer = make_model(weights_path=weights_path, **model_kwargs)
inner_model.to(DEVICE)
model = OuterModel(inner_model)
model.to(DEVICE)
wrapped_model = PyTorchClassifier(
model,
loss=torch.nn.CrossEntropyLoss(),
optimizer=optimizer,
input_shape=(None, 240, 320, 3),
nb_classes=101,
clip_values=(0.0, 1.0),
**wrapper_kwargs,
)
return wrapped_model
|
{"hexsha": "5ff974c582ad9053e19a0b57819b663fe2305df1", "size": 9716, "ext": "py", "lang": "Python", "max_stars_repo_path": "armory/baseline_models/pytorch/ucf101_mars.py", "max_stars_repo_name": "paperwhite/armory", "max_stars_repo_head_hexsha": "3868cf5dd86578b58105f5901139a2f0b939ab15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "armory/baseline_models/pytorch/ucf101_mars.py", "max_issues_repo_name": "paperwhite/armory", "max_issues_repo_head_hexsha": "3868cf5dd86578b58105f5901139a2f0b939ab15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "armory/baseline_models/pytorch/ucf101_mars.py", "max_forks_repo_name": "paperwhite/armory", "max_forks_repo_head_hexsha": "3868cf5dd86578b58105f5901139a2f0b939ab15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5765124555, "max_line_length": 101, "alphanum_fraction": 0.6423425278, "include": true, "reason": "import numpy", "num_tokens": 2397}
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for symbolic.mutators."""
import copy
import itertools
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import jax
from symbolic_functionals.syfes.symbolic import enhancement_factors
from symbolic_functionals.syfes.symbolic import instructions
from symbolic_functionals.syfes.symbolic import mutators
from symbolic_functionals.syfes.symbolic import xc_functionals
jax.config.update('jax_enable_x64', True)
class EnhancementFactorMutatorTest(parameterized.TestCase):
# tests on initialization
def test_initialization_with_unknown_instruction(self):
with self.assertRaisesRegex(
ValueError, 'Unknown instruction: UnknownInstruction'):
mutators.EnhancementFactorMutator(
instruction_pool={'UnknownInstruction': 1.})
def test_initialization_with_unnormalized_probability_for_instructions(self):
with self.assertRaisesRegex(
ValueError, 'Instruction probabilities are not normalized to 1'):
mutators.EnhancementFactorMutator(
instruction_pool={'AdditionInstruction': 1., 'Power2Instruction': 1.})
def test_initialization_with_negative_probability_for_instructions(self):
with self.assertRaisesRegex(
ValueError, 'Instruction pool contains negative probabilities'):
mutators.EnhancementFactorMutator(
instruction_pool={
'AdditionInstruction': -0.2,
'Power2Instruction': 0.6,
'MultiplicationInstruction': 0.6})
def test_initialization_with_unknown_mutation(self):
with self.assertRaisesRegex(
ValueError, 'Unknown mutation type: apply_unknown_mutation'):
mutators.EnhancementFactorMutator(
mutation_pool={'apply_unknown_mutation': 1.})
def test_initialization_with_unnormalized_probability_for_mutations(self):
with self.assertRaisesRegex(
ValueError, 'Mutation probabilities are not normalized to 1'):
mutators.EnhancementFactorMutator(
mutation_pool={'insert_instruction': 1., 'remove_instruction': 1.})
def test_initialization_with_negative_probability_for_mutations(self):
with self.assertRaisesRegex(
ValueError, 'Mutation pool contains negative probabilities'):
mutators.EnhancementFactorMutator(
mutation_pool={
'insert_instruction': -0.2,
'replace_instruction': 0.6,
'change_argument': 0.6})
# tests on helper functions
def test_get_random_mutation_type(self):
mutator = mutators.EnhancementFactorMutator(
mutation_pool={
'insert_instruction': 0.25,
'remove_instruction': 0.25,
'replace_instruction': 0.25,
'change_argument': 0.25},
seed=3)
mutation_types = [mutator.get_random_mutation_type() for _ in range(10)]
self.assertEqual(mutation_types, [
'replace_instruction',
'replace_instruction',
'remove_instruction',
'replace_instruction',
'change_argument',
'change_argument',
'insert_instruction',
'insert_instruction',
'insert_instruction',
'remove_instruction'])
@parameterized.parameters(0, 1, 2)
def test_get_random_instruction_name(self, max_num_bound_parameters):
mutator = mutators.EnhancementFactorMutator(
max_num_bound_parameters=max_num_bound_parameters)
instruction_class = instructions.INSTRUCTION_CLASSES[
mutator.get_random_instruction_name(existing_bound_parameters=[])]
self.assertLessEqual(
instruction_class.get_num_bound_parameters(), max_num_bound_parameters)
@parameterized.parameters(
(1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)
)
def test_get_random_instruction_name_num_inputs(self,
num_inputs,
max_num_bound_parameters):
mutator = mutators.EnhancementFactorMutator(
max_num_bound_parameters=max_num_bound_parameters)
instruction_class = instructions.INSTRUCTION_CLASSES[
mutator.get_random_instruction_name(
existing_bound_parameters=[], num_inputs=num_inputs)]
self.assertEqual(instruction_class.get_num_inputs(), num_inputs)
self.assertLessEqual(
instruction_class.get_num_bound_parameters(), max_num_bound_parameters)
def test_get_random_instruction_name_existing_bound_parameters(self):
mutator = mutators.EnhancementFactorMutator(
instruction_pool={'UTransformInstruction': 0.5, 'B88XInstruction': 0.5},
max_num_bound_parameters=1)
instruction_name = mutator.get_random_instruction_name(
existing_bound_parameters=['gamma_utransform'], num_inputs=1)
self.assertEqual(instruction_name, 'UTransformInstruction')
def test_get_random_instruction_name_wrong_instruction_pool(self):
mutator = mutators.EnhancementFactorMutator(
instruction_pool={'PBECInstruction': 1.0},
max_num_bound_parameters=2)
with self.assertRaisesRegex(
ValueError, 'No instruction in instruction pool satisfies conditions: '
r"num_inputs = 2, existing_bound_parameters = \['gamma_utransform'\]"):
mutator.get_random_instruction_name(
existing_bound_parameters=['gamma_utransform'], num_inputs=2)
with self.assertRaisesRegex(
ValueError, 'No instruction in instruction pool satisfies conditions: '
r'num_inputs = 1, existing_bound_parameters = \[\]'):
mutator.get_random_instruction_name(
existing_bound_parameters=[], num_inputs=1)
def test_get_random_instruction_name_wrong_bound_parameter(self):
mutator = mutators.EnhancementFactorMutator(max_num_bound_parameters=0)
with self.assertRaisesRegex(
ValueError, 'No instruction in instruction pool satisfies conditions: '
r"num_inputs = 2, existing_bound_parameters = \['gamma_utransform'\]"):
mutator.get_random_instruction_name(
existing_bound_parameters=['gamma_utransform'], num_inputs=2)
@parameterized.parameters(
*instructions.Instruction.__subclasses__()
)
def test_get_random_instruction(self, instruction_class):
mutator = mutators.EnhancementFactorMutator(
instruction_pool={instruction_class.__name__: 1.0})
new_instruction = mutator.get_random_instruction(
enhancement_factor=enhancement_factors.f_empty)
self.assertIsInstance(new_instruction, instruction_class)
# tests on mutations
def test_mutate_yield_correct_length_of_instruction_list(self):
with mock.patch.object(
mutators.EnhancementFactorMutator, 'get_random_mutation_type',
side_effect=['insert_instruction', 'remove_instruction',
'replace_instruction', 'change_argument']):
mutator = mutators.EnhancementFactorMutator()
enhancement_factor, mutation_type, _, _ = mutator.mutate(
enhancement_factor=enhancement_factors.f_x_wb97mv_short,
verbose=False)
self.assertEqual(mutation_type, 'insert_instruction')
self.assertEqual(enhancement_factor.num_instructions, 7)
enhancement_factor, mutation_type, _, _ = mutator.mutate(
enhancement_factor=enhancement_factor, verbose=False)
self.assertEqual(mutation_type, 'remove_instruction')
self.assertEqual(enhancement_factor.num_instructions, 6)
enhancement_factor, mutation_type, _, _ = mutator.mutate(
enhancement_factor=enhancement_factor, verbose=False)
self.assertEqual(mutation_type, 'replace_instruction')
self.assertEqual(enhancement_factor.num_instructions, 6)
enhancement_factor, mutation_type, _, _ = mutator.mutate(
enhancement_factor=enhancement_factor, verbose=False)
self.assertEqual(mutation_type, 'change_argument')
self.assertEqual(enhancement_factor.num_instructions, 6)
def test_mutate_with_empty_instruction_list(self):
with mock.patch.object(
mutators.EnhancementFactorMutator, 'get_random_mutation_type',
return_value='remove_instruction'):
mutator = mutators.EnhancementFactorMutator()
_, mutation_type, _, _ = mutator.mutate(
enhancement_factor=enhancement_factors.f_empty, verbose=False)
self.assertEqual(mutation_type, 'insert_instruction')
def test_mutate_with_empty_instruction_list_no_insertion(self):
mutator = mutators.EnhancementFactorMutator(
mutation_pool={'remove_instruction': 1.0})
with self.assertRaisesRegex(
ValueError,
'Mutation cannot proceed on empty instruction list with '
'zero insertion probability'):
mutator.mutate(
enhancement_factor=enhancement_factors.f_empty, verbose=False)
def test_mutate_until_maximum_number_of_instructions(self):
with mock.patch.object(
mutators.EnhancementFactorMutator, 'get_random_mutation_type',
side_effect=['insert_instruction', 'replace_instruction']):
mutator = mutators.EnhancementFactorMutator(max_num_instructions=6)
new_enhancement_factor, mutation_type, _, _ = mutator.mutate(
enhancement_factor=enhancement_factors.f_x_wb97mv_short,
verbose=False)
self.assertEqual(mutation_type, 'replace_instruction')
self.assertEqual(new_enhancement_factor.num_instructions, 6)
def test_mutate_until_maximum_number_of_instructions_only_insertion(self):
mutator = mutators.EnhancementFactorMutator(
mutation_pool={'insert_instruction': 1.0},
max_num_instructions=6)
with self.assertRaisesRegex(
ValueError,
'Mutation cannot proceed on max_num_instructions with '
'only insertions allowed'):
mutator.mutate(
enhancement_factor=enhancement_factors.f_x_wb97mv_short,
verbose=False)
def test_mutate_beyond_maximum_number_of_instructions(self):
mutator = mutators.EnhancementFactorMutator(max_num_instructions=5)
with self.assertRaisesRegex(
ValueError,
'Mutation cannot proceed with instruction_list longer '
'than max_num_instructions'):
mutator.mutate(
enhancement_factor=enhancement_factors.f_x_wb97mv_short,
verbose=False)
def test_mutate_beyond_maximum_number_of_bound_parameters(self):
mutator = mutators.EnhancementFactorMutator(max_num_bound_parameters=0)
with self.assertRaisesRegex(
ValueError,
'Mutation cannot proceed with number of bound parameters greater '
'than max_num_bound_parameters'):
mutator.mutate(
enhancement_factor=enhancement_factors.f_x_wb97mv_short,
verbose=False)
def test_insert_instruction(self):
with mock.patch.object(
mutators.EnhancementFactorMutator, 'get_random_mutation_type',
return_value='insert_instruction'):
mutator = mutators.EnhancementFactorMutator()
enhancement_factor = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
new_enhancement_factor, mutation_type, instruction_index, change = (
mutator.mutate(enhancement_factor=enhancement_factor, verbose=False))
self.assertEqual(enhancement_factor, enhancement_factors.f_x_wb97mv_short)
self.assertEqual(mutation_type, 'insert_instruction')
self.assertIsNone(change[0])
self.assertEqual(
change[1],
new_enhancement_factor.instruction_list[instruction_index])
new_enhancement_factor.instruction_list.pop(instruction_index)
self.assertEqual(
new_enhancement_factor, enhancement_factors.f_x_wb97mv_short)
def test_remove_instruction(self):
with mock.patch.object(
mutators.EnhancementFactorMutator, 'get_random_mutation_type',
return_value='remove_instruction'):
mutator = mutators.EnhancementFactorMutator()
enhancement_factor = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
new_enhancement_factor, mutation_type, instruction_index, change = (
mutator.mutate(enhancement_factor=enhancement_factor, verbose=False))
self.assertEqual(enhancement_factor, enhancement_factors.f_x_wb97mv_short)
self.assertEqual(mutation_type, 'remove_instruction')
self.assertEqual(
change[0],
enhancement_factors.f_x_wb97mv_short.instruction_list[
instruction_index])
self.assertIsNone(change[1])
new_enhancement_factor.instruction_list.insert(
instruction_index, change[0])
self.assertEqual(
new_enhancement_factor, enhancement_factors.f_x_wb97mv_short)
def test_replace_instruction(self):
with mock.patch.object(
mutators.EnhancementFactorMutator, 'get_random_mutation_type',
return_value='replace_instruction'):
mutator = mutators.EnhancementFactorMutator()
enhancement_factor = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
new_enhancement_factor, mutation_type, instruction_index, change = (
mutator.mutate(enhancement_factor=enhancement_factor, verbose=False))
self.assertEqual(enhancement_factor, enhancement_factors.f_x_wb97mv_short)
self.assertEqual(mutation_type, 'replace_instruction')
self.assertEqual(
change[0],
enhancement_factors.f_x_wb97mv_short.instruction_list[
instruction_index])
self.assertEqual(
change[1],
new_enhancement_factor.instruction_list[instruction_index])
self.assertEqual(change[0].args, change[1].args)
new_enhancement_factor.instruction_list[instruction_index] = change[0]
self.assertEqual(
new_enhancement_factor, enhancement_factors.f_x_wb97mv_short)
def test_change_argument(self):
with mock.patch.object(
mutators.EnhancementFactorMutator, 'get_random_mutation_type',
return_value='change_argument'):
mutator = mutators.EnhancementFactorMutator()
enhancement_factor = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
new_enhancement_factor, mutation_type, instruction_index, change = (
mutator.mutate(enhancement_factor=enhancement_factor, verbose=False))
self.assertEqual(enhancement_factor, enhancement_factors.f_x_wb97mv_short)
self.assertEqual(mutation_type, 'change_argument')
self.assertEqual(
change[0],
enhancement_factors.f_x_wb97mv_short.instruction_list[
instruction_index])
self.assertEqual(
change[1],
new_enhancement_factor.instruction_list[instruction_index])
self.assertEqual(type(change[0]), type(change[1]))
new_enhancement_factor.instruction_list[instruction_index] = change[0]
self.assertEqual(
new_enhancement_factor, enhancement_factors.f_x_wb97mv_short)
def test_randomize_instruction_list(self):
expected_instruction_list = [
instructions.UTransformInstruction('u', 'x2'),
instructions.AdditionInstruction(
'enhancement_factor', 'c00', 'enhancement_factor'),
instructions.MultiplicationInstruction(
'linear_term', 'c10', 'w'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
instructions.MultiplicationInstruction(
'linear_term', 'c01', 'u'),
instructions.AdditionInstruction(
'enhancement_factor', 'enhancement_factor', 'linear_term'),
]
with mock.patch.object(
mutators.EnhancementFactorMutator, 'get_random_instruction',
side_effect=expected_instruction_list):
mutator = mutators.EnhancementFactorMutator(
mutation_pool={'randomize_instruction_list': 1.})
enhancement_factor = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
new_enhancement_factor, mutation_type, _, _ = (
mutator.mutate(enhancement_factor=enhancement_factor, verbose=False))
self.assertEqual(enhancement_factor, enhancement_factors.f_x_wb97mv_short)
self.assertEqual(mutation_type, 'randomize_instruction_list')
self.assertEqual(
new_enhancement_factor.instruction_list, expected_instruction_list)
def test_randomize_instruction_list_fixed_num_instructions(self):
expected_instruction_list = [
instructions.AdditionInstruction('enhancement_factor', 'c10', 'u'),
instructions.MultiplicationInstruction(
'enhancement_factor', 'c10', 'u'),
instructions.DivisionInstruction('enhancement_factor', 'c10', 'u')]
with mock.patch.object(
mutators.EnhancementFactorMutator, 'get_random_instruction',
side_effect=expected_instruction_list):
mutator = mutators.EnhancementFactorMutator(
mutation_pool={'randomize_instruction_list': 1.})
enhancement_factor = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
new_instruction_list, _, _, _ = mutator.randomize_instruction_list(
enhancement_factor, num_instructions=2)
self.assertEqual(new_instruction_list, expected_instruction_list[:2])
@parameterized.parameters(0, 2, 4)
def test_randomize_instruction_list_max_num_bound_parameters(
self, max_num_bound_parameters):
mutator = mutators.EnhancementFactorMutator(
mutation_pool={'randomize_instruction_list': 1.},
max_num_instructions=10,
max_num_bound_parameters=max_num_bound_parameters)
enhancement_factor = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
new_instruction_list, _, _, _ = mutator.randomize_instruction_list(
enhancement_factor)
self.assertLessEqual(
len(set(itertools.chain(
*[instruction.get_bound_parameters()
for instruction in new_instruction_list]))),
max_num_bound_parameters)
def test_randomize_instruction_list_with_fixed_instructions(self):
mutator = mutators.EnhancementFactorMutator(
mutation_pool={'randomize_instruction_list': 1.},
num_fixed_instructions=1)
enhancement_factor = copy.deepcopy(enhancement_factors.f_x_wb97mv_short)
with self.assertRaisesRegex(
ValueError, 'randomize_instruction_list cannot be applied with '
'fixed instructions'):
mutator.randomize_instruction_list(enhancement_factor)
class XCFunctionalMutatorTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.mutator_x = mutators.EnhancementFactorMutator()
self.mutator_css = mutators.EnhancementFactorMutator()
self.mutator_cos = mutators.EnhancementFactorMutator()
def test_initialization_with_wrong_probability_length(self):
with self.assertRaisesRegex(
ValueError,
'Wrong length for component_mutation_probabilities. Expected 3, got 2'):
mutators.XCFunctionalMutator(
self.mutator_x, self.mutator_css, self.mutator_cos,
component_mutation_probabilities=[0.5, 0.5])
def test_initialization_with_unnormalized_probability(self):
with self.assertRaisesRegex(
ValueError, 'component_mutation_probabilities not normalized to 1'):
mutators.XCFunctionalMutator(
self.mutator_x, self.mutator_css, self.mutator_cos,
component_mutation_probabilities=[0.4, 0.4, 0.4])
def test_initialization_with_negative_probability(self):
with self.assertRaisesRegex(
ValueError,
'component_mutation_probabilities contains negative probabilities'):
mutators.XCFunctionalMutator(
self.mutator_x, self.mutator_css, self.mutator_cos,
component_mutation_probabilities=[-0.2, 0.6, 0.6])
def test_get_random_component(self):
mutator = mutators.XCFunctionalMutator(
self.mutator_x, self.mutator_css, self.mutator_cos, seed=1)
self.assertEqual(
[mutator.get_random_component() for _ in range(10)],
['f_css', 'f_cos', 'f_x', 'f_x', 'f_x',
'f_x', 'f_x', 'f_css', 'f_css', 'f_css'])
@parameterized.parameters('f_x', 'f_css', 'f_cos')
def test_mutate(self, component):
with mock.patch.object(
mutators.XCFunctionalMutator,
'get_random_component',
return_value=component), mock.patch.object(
mutators.EnhancementFactorMutator,
'get_random_mutation_type',
return_value='remove_instruction'):
mutator = mutators.XCFunctionalMutator(
mutator_x=mutators.EnhancementFactorMutator(),
mutator_css=mutators.EnhancementFactorMutator(),
mutator_cos=mutators.EnhancementFactorMutator())
functional = copy.deepcopy(xc_functionals.b97_u)
new_functional, mutated_component, _, instruction_index, change = (
mutator.mutate(functional, verbose=False))
self.assertEqual(functional, xc_functionals.b97_u)
self.assertEqual(mutated_component, component)
new_enhancement_factor = getattr(new_functional, component)
self.assertEqual(new_enhancement_factor.num_instructions, 4)
new_enhancement_factor.instruction_list.insert(
instruction_index, change[0])
self.assertEqual(new_functional, functional)
if __name__ == '__main__':
absltest.main()
|
{"hexsha": "af672d05dbf769867965181cda5939a10a609ba9", "size": 21643, "ext": "py", "lang": "Python", "max_stars_repo_path": "symbolic_functionals/syfes/symbolic/mutators_test.py", "max_stars_repo_name": "gunpowder78/google-research", "max_stars_repo_head_hexsha": "d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-19T04:26:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T04:26:12.000Z", "max_issues_repo_path": "symbolic_functionals/syfes/symbolic/mutators_test.py", "max_issues_repo_name": "gunpowder78/google-research", "max_issues_repo_head_hexsha": "d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "symbolic_functionals/syfes/symbolic/mutators_test.py", "max_forks_repo_name": "gunpowder78/google-research", "max_forks_repo_head_hexsha": "d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-30T07:20:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:20:29.000Z", "avg_line_length": 42.9424603175, "max_line_length": 80, "alphanum_fraction": 0.7351568637, "include": true, "reason": "import jax", "num_tokens": 4584}
|
from scipy.spatial.distance import pdist, squareform
from scipy.linalg import eigh
import numpy as np
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_examples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
-----------
X_pc: {NumPy ndarray}, shape = [n_examples, k_features]
Projected dataset
"""
# Calculate pairwise squared Euclidean distances in the
# MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = np.exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# scipy.linalg.eigh returns them in ascending order
eigvals, eigvecs = eigh(K)
eigvals, eigvecs = eigvals[::-1], eigvecs[:, ::-1]
# Collect the top k eigenvectors (projected examples)
alphas = np.column_stack([eigvecs[:, i] for i in range(n_components)])
# Collect the corresponding eigenvalues
lambdas = [eigvals[i] for i in range(n_components)]
return alphas, lambdas
|
{"hexsha": "6409704c06d6fa38786fbce870795a14c77b468a", "size": 1462, "ext": "py", "lang": "Python", "max_stars_repo_path": "O5/_26_kernel_principal_component_analysis/kpca.py", "max_stars_repo_name": "ShAlireza/ML-Tries", "max_stars_repo_head_hexsha": "4516be7a3275c9bdedd7bd258800be384b6b34f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "O5/_26_kernel_principal_component_analysis/kpca.py", "max_issues_repo_name": "ShAlireza/ML-Tries", "max_issues_repo_head_hexsha": "4516be7a3275c9bdedd7bd258800be384b6b34f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "O5/_26_kernel_principal_component_analysis/kpca.py", "max_forks_repo_name": "ShAlireza/ML-Tries", "max_forks_repo_head_hexsha": "4516be7a3275c9bdedd7bd258800be384b6b34f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0740740741, "max_line_length": 74, "alphanum_fraction": 0.6614227086, "include": true, "reason": "import numpy,from scipy", "num_tokens": 381}
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""GAIL algorithm."""
import numpy as np
from pyarrow import deserialize
import lz4.frame
from xt.algorithm import Algorithm, alg_builder
from xt.framework.register import Registers
from xt.util.common import import_config
from xt.util.data import get_data, get_datalen, init_file
@Registers.algorithm
class GAIL(Algorithm):
"""IMPALA algorithm"""
def __init__(self, model_info, alg_config, **kwargs):
import_config(globals(), alg_config)
super(GAIL, self).__init__(alg_name="gail", model_info=model_info["gail"],
alg_config=alg_config)
alg_para = {}
alg_para.update({'alg_name': alg_config['alg_name']})
alg_para.update({'async_flag': alg_config['async_flag']})
alg_para.update({'model_info': model_info})
alg_para.update({'alg_config': alg_config})
# print(alg_para)
self.policy_alg = alg_builder(**alg_para)
if alg_config.get("type", 'learner') != 'actor':
expert_data_path = alg_config.get("expert_data", None)
self.expert_data = init_file(expert_data_path)
self.expert_data_len = get_datalen(self.expert_data)
self.data_index = 0
self.labels = np.zeros((1280, 2))
self.state = None
self.action = None
def train(self, **kwargs):
"""refer to alg"""
actor_loss = self.policy_alg.train()
expert_state, expert_action = self.get_expert_data()
data_len = min(self.state.shape[0], expert_state.shape[0])
if data_len <= 1:
return 0.
# print('====data len====', data_len, self.state.shape[0], expert_state.shape[0])
self.actor.train(
[self.state[0:data_len], self.action[0:data_len],
expert_state[0:data_len], expert_action[0:data_len]],
self.labels[0:data_len])
return actor_loss
def save(self, model_path, model_index):
"""use policy alg to save"""
actor_name = self.policy_alg.save(model_path, model_index)
return actor_name
def restore(self, model_name, model_weights=None):
self.policy_alg.load(model_name, model_weights)
def get_weights(self):
"""use sub policy alg to get weights"""
return self.policy_alg.get_weights()
def prepare_data(self, train_data, **kwargs):
# episode_data = train_data[1]
# get reward from gail
states = np.asarray(train_data["cur_state"])
actions = np.asarray(train_data["real_action"])
action_matrix = np.eye(self.action_dim)[actions.reshape(-1)]
# new_reward = self.actor.predict([states, action_matrix, states, action_matrix])
# fixme: gail algorithm not ready # pylint: disable=W0511
# for i, data in enumerate(train_data):
# data[2] = new_reward[i]
# push new data to polcy algorithm
self.policy_alg.prepare_data(train_data)
self.state = states
self.action = action_matrix
def output(self, state):
"""refer to alg"""
pred = self.policy_alg.output(state)
return pred
def get_expert_data(self):
data = get_data(self.expert_data, self.data_index)
data = deserialize(lz4.frame.decompress(data))
episode_data = data[1]
states = np.asarray([e[0] for e in episode_data]) * 256
states = states.astype('int8')
actions = np.asarray([e[1] for e in episode_data])
actions = np.eye(self.action_dim)[actions.reshape(-1)]
self.data_index = (self.data_index + 1) % self.expert_data_len
return states, actions
|
{"hexsha": "cc017dfc8ec016634002286fc1dad5f251869203", "size": 4760, "ext": "py", "lang": "Python", "max_stars_repo_path": "built-in/TensorFlow/Research/reinforcement-learning/ModelZoo_PPO_TensorFlow/rl/xt/algorithm/gail/gail.py", "max_stars_repo_name": "Huawei-Ascend/modelzoo", "max_stars_repo_head_hexsha": "df51ed9c1d6dbde1deef63f2a037a369f8554406", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "built-in/TensorFlow/Research/reinforcement-learning/ModelZoo_PPO_TensorFlow/rl/xt/algorithm/gail/gail.py", "max_issues_repo_name": "Huawei-Ascend/modelzoo", "max_issues_repo_head_hexsha": "df51ed9c1d6dbde1deef63f2a037a369f8554406", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-31T20:15:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:50:46.000Z", "max_forks_repo_path": "built-in/TensorFlow/Research/reinforcement-learning/ModelZoo_PPO_TensorFlow/rl/xt/algorithm/gail/gail.py", "max_forks_repo_name": "Huawei-Ascend/modelzoo", "max_forks_repo_head_hexsha": "df51ed9c1d6dbde1deef63f2a037a369f8554406", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3913043478, "max_line_length": 89, "alphanum_fraction": 0.6710084034, "include": true, "reason": "import numpy", "num_tokens": 1074}
|
from scipy import ndimage
sobel_mode = "reflect"
def grad_x(image):
return ndimage.sobel(image, axis=1, mode=sobel_mode)
def grad_y(image):
return ndimage.sobel(image, axis=0, mode=sobel_mode)
|
{"hexsha": "089adb04c337526b41ddbc78fc968b5daedb0f01", "size": 207, "ext": "py", "lang": "Python", "max_stars_repo_path": "tadataka/gradient.py", "max_stars_repo_name": "IshitaTakeshi/Tadataka", "max_stars_repo_head_hexsha": "852c7afb904503005e51884408e1492ef0be836f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2019-11-15T16:30:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T15:18:54.000Z", "max_issues_repo_path": "tadataka/gradient.py", "max_issues_repo_name": "IshitaTakeshi/Tadataka", "max_issues_repo_head_hexsha": "852c7afb904503005e51884408e1492ef0be836f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-02-28T08:28:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-07T04:47:12.000Z", "max_forks_repo_path": "tadataka/gradient.py", "max_forks_repo_name": "IshitaTakeshi/Tadataka", "max_forks_repo_head_hexsha": "852c7afb904503005e51884408e1492ef0be836f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-26T13:59:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-26T13:59:40.000Z", "avg_line_length": 15.9230769231, "max_line_length": 56, "alphanum_fraction": 0.729468599, "include": true, "reason": "from scipy", "num_tokens": 57}
|
import sys
from typing import TYPE_CHECKING, Any, List, Sequence, Tuple, Union, overload
# %% Taken from https://github.com/numpy/numpy/tree/master/numpy/typing
from numpy import dtype, ndarray
if sys.version_info >= (3, 8):
from typing import Protocol, TypedDict
HAVE_PROTOCOL = True
else:
try:
from typing_extensions import Protocol, TypedDict
except ImportError:
HAVE_PROTOCOL = False
else:
HAVE_PROTOCOL = True
_Shape = Tuple[int, ...]
# Anything that can be coerced to a shape tuple
_ShapeLike = Union[int, Sequence[int]]
_DtypeLikeNested = Any # TODO: wait for support for recursive types
if TYPE_CHECKING or HAVE_PROTOCOL:
# Mandatory keys
class _DtypeDictBase(TypedDict):
names: Sequence[str]
formats: Sequence[_DtypeLikeNested]
# Mandatory + optional keys
class _DtypeDict(_DtypeDictBase, total=False):
offsets: Sequence[int]
# Only `str` elements are usable as indexing aliases, but all objects are legal
titles: Sequence[Any]
itemsize: int
aligned: bool
# A protocol for anything with the dtype attribute
class _SupportsDtype(Protocol):
dtype: _DtypeLikeNested
else:
_DtypeDict = Any
_SupportsDtype = Any
DtypeLike = Union[
dtype, None, type, _SupportsDtype, str, Tuple[_DtypeLikeNested, int],
Tuple[_DtypeLikeNested, _ShapeLike], List[Any], _DtypeDict,
Tuple[_DtypeLikeNested, _DtypeLikeNested],
]
if TYPE_CHECKING or HAVE_PROTOCOL:
class _SupportsArray(Protocol):
@overload
def __array__(self, __dtype: DtypeLike = ...) -> ndarray: ...
@overload
def __array__(self, dtype: DtypeLike = ...) -> ndarray: ...
else:
_SupportsArray = Any
ArrayLike = Union[bool, int, float, complex, _SupportsArray, Sequence]
# %%
|
{"hexsha": "122e289c12a62ea665754fbb98de0fe3fee51ee6", "size": 1831, "ext": "py", "lang": "Python", "max_stars_repo_path": "5-assignment/mytypes.py", "max_stars_repo_name": "eirik-ff/TTK4250-Sensor-fusion", "max_stars_repo_head_hexsha": "9bba4d641c5b9bb17fa943b330b502c220b58c3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "5-assignment/mytypes.py", "max_issues_repo_name": "eirik-ff/TTK4250-Sensor-fusion", "max_issues_repo_head_hexsha": "9bba4d641c5b9bb17fa943b330b502c220b58c3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "5-assignment/mytypes.py", "max_forks_repo_name": "eirik-ff/TTK4250-Sensor-fusion", "max_forks_repo_head_hexsha": "9bba4d641c5b9bb17fa943b330b502c220b58c3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9264705882, "max_line_length": 87, "alphanum_fraction": 0.6881485527, "include": true, "reason": "from numpy", "num_tokens": 467}
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
import casadi as c
import numpy
import unittest
from types import *
from helpers import *
from time import time
import sys
from scipy import linalg, std, mean
from scipy.stats import t
student = t
class ComplexityTests(casadiTestCase):
maxt = 0.4 # [s] Aim for execution times up to maxt
mint = 0.04 # [s] Only trust execution times larger than mint
rejectat = 0.01 # [-] Reject the null hypothesis if p-score is smaller than rejectat
testorders = [0,1,2,3] # Test these orders
debug = False
check = True # Don't benchmark, just check for code errors
def checkOrders(self,Ns,ts):
"""
Test the hypothesis that the slope of Ns is order.
An order -N means that the order is between (N-1) and N
"""
# Test statistic http://stattrek.com/AP-Statistics-4/Test-Slope.aspx?Tutorial=Stat
# http://mathworld.wolfram.com/LeastSquaresFitting.html
# http://en.wikipedia.org/wiki/Student%27s_t-test#Slope_of_a_regression_line
orders = self.testorders
rejectat = self.rejectat
Ns = log(Ns)
ts = log(ts)
m = Ns.shape[0]
sigmaN = std(Ns)
sigmaT = std(ts)
# covariance
cov = mean(ts*Ns)-mean(ts)*mean(Ns)
# correlation coefficient
rho = (mean(ts*Ns)-mean(ts)*mean(Ns))/sigmaN/sigmaT
# sqrt(Variance) estimate
s = m/sqrt(m-2)*sqrt(sigmaT**2 - cov**2/sigmaN**2)
# Estimate for b
b = sigmaT/sigmaN * rho
a = mean(ts) - b * mean(Ns)
# Standard error on estimate for b
sigmab = s/sqrt(m*sigmaN**2)
# Standard deviation
sigmaa = s * sqrt(1/m + mean(ts)**2/(m*sigmaT**2))
conf = 0.05
results = []
for order in orders:
# Null hypothesis: the slope of the regression is equal to order
# test statistic
t = abs(b-order)/sigmab
p = (1-student.cdf(t,m))
if self.debug:
print("If the order were really %d, then the measurements have a p-score of %.8f" % (order,p))
if p >= rejectat:
results.append(order)
if len(results)==1:
order = results[0]
a = mean(ts - order*Ns)
sigmaa = std(ts - order*Ns)/sqrt(m)
print("O(f) = %.3e N^%d [s] | 95%% confidence: [%.3e , %.3e] N^%d [s]" % (exp(a),order,exp(student.ppf(conf, m, loc=a, scale=sigmaa)),exp(student.ppf(1-conf, m, loc=a, scale=sigmaa)),order))
else:
print("raw fit O(f) = %.3e N^(%.3f) [s]" % (exp(a),b))
for i, order in zip(list(range(len(orders)))[1:],orders[1:]):
if b < order and b > order-1 and not(order in results) and not(order -1 in results):
results.append(-order)
return results
def complexity(self,setupfun,fun, order, depth = 0):
N = 1
Ns = []
ts = []
dt = 0
while dt<self.maxt:
sys.stdout.write("\r%d..." % N)
sys.stdout.flush()
p = setupfun(self,N) # Setup
for i in range(10):
t = time()
fun(self,N,p) # Run the function
dt = time()-t
Ns.append(N)
ts.append(dt)
N=int(N*1.5)
N+=1
if self.check: break
print("")
Ns = array(Ns)
ts = array(ts)
valid = ts > self.mint
if not(self.check):
orders = self.checkOrders(Ns[valid],ts[valid])
if not(self.check) and (len(orders)!=1 or orders[0]!=order):
if (depth<3):
return self.complexity(setupfun,fun, order, depth+1 )
else:
self.assertTrue(False,"We expected order %d, but found %s" % (order,str(orders)))
def test_DMadd(self):
self.message("DM add column vectors")
def setupfun(self,N):
return {'A': DM(N,1,0), 'B': DM(N,1,0)}
def fun(self,N,setup):
setup['A'] + setup['B']
self.complexity(setupfun,fun, 1)
self.message("DM add rows vectors")
def setupfun(self,N):
return {'A': DM(1,N,0), 'B': DM(1,N,0)}
self.complexity(setupfun,fun, 1)
def test_SX_funadd(self):
return
self.message("SX add column vectors")
def setupfun(self,N):
A = SX.sym("A",N,1)
B = SX.sym("B",N,1)
f = Function('f', [A,B],[A+B])
return {'f':f}
def fun(self,N,setup):
setup['f'].evaluate()
self.complexity(setupfun,fun, 1)
def test_SX_funprodvec(self):
return
self.message("SX prod column vectors")
def setupfun(self,N):
A = SX.sym("A",N,1)
B = SX.sym("B",N,1)
f = Function('f', [A,B],[c.dot(A.T,B)])
return {'f':f}
def fun(self,N,setup):
setup['f'].evaluate()
self.complexity(setupfun,fun, 1)
def test_SX_funprodsparse(self):
self.message("SX prod sparse")
def setupfun(self,N):
A = SX.sym("A",Sparsity.diag(N))
A[-1,0]=SX("off") # Have one of-diagonal element
B = SX.sym("B",N,1)
f = Function('f', [A,B],[c.dot(A,B)])
return {'f':f}
def fun(self,N,setup):
setup['f'].evaluate()
self.complexity(setupfun,fun, 1)
def test_MX_funprodvec(self):
self.message("MX prod")
def setupfun(self,N):
G = MX.sym("G",N,1)
X = MX.sym("X",N,1)
f = Function('f', [G,X],[c.prod(G.T,X)])
return {'f':f}
def fun(self,N,setup):
setup['f'].evaluate()
self.complexity(setupfun,fun, 1)
def test_MX_funprodsparse(self):
self.message("MX sparse product")
def setupfun(self,N):
s = Sparsity.diag(N)
s[-1,0]=1
H = MX.sym("H",s)
X = MX.sym("X",N,1)
f = Function('f', [H,X],[c.prod(H,X)])
return {'f':f}
def fun(self,N,setup):
setup['f'].evaluate()
self.complexity(setupfun,fun, 2) # 1
self.message("MX sparse sparse product")
def setupfun(self,N):
s = Sparsity.diag(N)
s[-1,0]=1
H = MX.sym("H",s)
s = Sparsity.diag(N)
s[-1,0]=1
X = MX.sym("X",s)
f = Function('f', [H,X],[c.prod(H,X)])
return {'f':f}
self.complexity(setupfun,fun, 2) # 1
def test_DMdot(self):
self.message("DM inner dot vectors")
def setupfun(self,N):
return {'A': DM(1,N,0), 'B': DM(N,1,0)}
def fun(self,N,setup):
c.dot(setup['A'],setup['B'])
self.complexity(setupfun,fun, 1)
self.message("DM outer dot vectors")
def setupfun(self,N):
return {'A': DM(N,1,0), 'B': DM(1,N,0)}
def fun(self,N,setup):
c.dot(setup['A'],setup['B'])
self.complexity(setupfun,fun, 2) # strangely, the dot product is O(N^2)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "58cce3778301b1c93546d1a9559f4f1907d8d93f", "size": 7399, "ext": "py", "lang": "Python", "max_stars_repo_path": "crane_controllers/external/casadi-3.4.5/test/python/complexity.py", "max_stars_repo_name": "tingelst/crane", "max_stars_repo_head_hexsha": "e14bca2bd4e2397dce09180029223832aad9b070", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-22T08:50:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T03:04:18.000Z", "max_issues_repo_path": "crane_controllers/external/casadi-3.4.5/test/python/complexity.py", "max_issues_repo_name": "tingelst/crane", "max_issues_repo_head_hexsha": "e14bca2bd4e2397dce09180029223832aad9b070", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crane_controllers/external/casadi-3.4.5/test/python/complexity.py", "max_forks_repo_name": "tingelst/crane", "max_forks_repo_head_hexsha": "e14bca2bd4e2397dce09180029223832aad9b070", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-14T04:28:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T05:29:01.000Z", "avg_line_length": 27.7116104869, "max_line_length": 201, "alphanum_fraction": 0.5930531153, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2291}
|
[STATEMENT]
lemma tt_in_keys:
assumes "p \<noteq> 0"
shows "tt p \<in> keys p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. tt p \<in> keys p
[PROOF STEP]
unfolding tt_alt[OF assms]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ord_term_lin.Min (keys p) \<in> keys p
[PROOF STEP]
by (rule ord_term_lin.Min_in, fact finite_keys, simp add: assms)
|
{"llama_tokens": 163, "file": "Polynomials_MPoly_Type_Class_Ordered", "length": 2}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 16 12:04:22 2016
@author: Tom
"""
import numpy as np
import aubio as aub
import sys
sys.path.append('../')
import math
from datagrabber import extractAndSave,extractAndSaveYoutubeData
IEMOCAP_LOCATION = "../../../../local"
YOUTUBE_LOCATION = "../../../../local/wild_dataset/10_to_20_seconds"
coefficientsCount = 12
labels = ['mfcccoeff%s' % str(i) for i in range(coefficientsCount)]
def mfcc(frame, audiofile):
'''
Computes the MEL FREQUENCY CEPSTRAL COEFFICIENTS for the frame,
the frame is zero padded to achieve a frame lenght which is a power
of two if this is not already the case. The power spectrum is then computed
and this is placed into filterbanks on a mel-scale. The coefficents of
12 of the banks is then returned.
'''
coefficientsCount = 12
sampleRate = audiofile['sample_rate']
frame_size = audiofile['frame_size']
fftsize = pow(2, int(math.log(frame_size, 2) + 0.5)) # Round to nearest power of 2 to facilitate FFT
m = aub.mfcc(fftsize, 40, coefficientsCount, sampleRate)
#first we need to convert this frame to the power spectrum using a DFT
p = aub.pvoc(fftsize, int(frame_size))
#in order to compute DFT the frame must be of a length which is a power of 2, so expand to fftsize using zero padding
if len(frame) != 16000:
frame = np.pad(frame,(0,frame_size-len(frame)),'constant',constant_values=0)
#compute the power spectrum
spec = p(frame.astype(np.float32))
#compute the MFCC, which returns the coefficents of each of the 12 coefficents
mfcc_out = m(spec)
return mfcc_out
# 1. Frame the signal into short frames.
# 2. For each frame calculate the periodogram estimate of the power spectrum.
# 3. Apply the mel filterbank to the power spectra, sum the energy in each filter.
# 4. Take the logarithm of all filterbank energies.
# 5. Take the DCT of the log filterbank energies.
# 6. Keep DCT coefficients 2-13, discard the rest
#http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#computing-the-mel-filterbank
# Extract MFCC from IEMOCAP and YouTube datasets
extractAndSave(mfcc,labels,IEMOCAP_LOCATION,2,True,True)
extractAndSaveYoutubeData(mfcc,labels,YOUTUBE_LOCATION,2,True,True)
|
{"hexsha": "ad47383e48e50709e67840cfc43551d8a1169d1f", "size": 2359, "ext": "py", "lang": "Python", "max_stars_repo_path": "EmotionCommotion/backend/featureExtractors/MFCC.py", "max_stars_repo_name": "hmajid2301/EmotionCommotion", "max_stars_repo_head_hexsha": "7f32c092e9cb461bacfa033fb1bbc9ef565ee79a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "EmotionCommotion/backend/featureExtractors/MFCC.py", "max_issues_repo_name": "hmajid2301/EmotionCommotion", "max_issues_repo_head_hexsha": "7f32c092e9cb461bacfa033fb1bbc9ef565ee79a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EmotionCommotion/backend/featureExtractors/MFCC.py", "max_forks_repo_name": "hmajid2301/EmotionCommotion", "max_forks_repo_head_hexsha": "7f32c092e9cb461bacfa033fb1bbc9ef565ee79a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6911764706, "max_line_length": 142, "alphanum_fraction": 0.7270029674, "include": true, "reason": "import numpy", "num_tokens": 641}
|
# Benchmark between tinyscaler, OpenCV, Pillow, and skImage using bilinear filtering
import numpy as np
import tinyscaler
import cv2
import time
from PIL import Image
from skimage.transform import resize
# Disable multithreading and GPU support for OpenCV for a single-threaded CPU comparison
cv2.setNumThreads(1)
cv2.ocl.setUseOpenCL(False)
# Number of scales to perform
numScales = 100
# Loading this image: https://github.com/Cykooz/fast_image_resize/blob/main/data/nasa-4928x3279.png
img8 = cv2.cvtColor(cv2.imread("nasa-4928x3279.png"), cv2.COLOR_BGR2RGBA)
img = (img8 / 255.0).astype(np.float32) # Preferred format
targetSize = (852, 567)
dst = np.empty((targetSize[1], targetSize[0], 4), dtype=np.float32)
start = time.perf_counter()
for t in range(numScales):
tinyscaler.scale(img, targetSize, mode='bilinear', dst=dst)
end = time.perf_counter()
print("Time elapsed for tinyscaler: " + str(end - start))
# Save the result from tinyscaler for viewing
cv2.imwrite("result.png", cv2.cvtColor((dst * 255.0).astype(np.uint8), cv2.COLOR_RGBA2BGR))
start = time.perf_counter()
for t in range(numScales):
cv2.resize(img, targetSize, dst=dst, interpolation=cv2.INTER_LINEAR)
end = time.perf_counter()
cv2.imwrite("result_cv.png", cv2.cvtColor((dst * 255.0).astype(np.uint8), cv2.COLOR_RGBA2BGR))
print("Time elapsed for OpenCV: " + str(end - start))
pimg = Image.fromarray(img8)
start = time.perf_counter()
for t in range(numScales):
pimg.resize(targetSize, Image.Resampling.BILINEAR)
end = time.perf_counter()
print("Time elapsed for Pillow: " + str(end - start))
start = time.perf_counter()
for t in range(numScales):
resize(img, targetSize)
end = time.perf_counter()
print("Time elapsed for skimage: " + str(end - start))
|
{"hexsha": "07c46946710ebe8c7527405d84e57b806477e353", "size": 1765, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/benchmark_bilinear.py", "max_stars_repo_name": "Farama-Foundation/tinyscaler", "max_stars_repo_head_hexsha": "5a90c027a6e6a4a86c8315efcadc1a8fa4588fa9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/benchmark_bilinear.py", "max_issues_repo_name": "Farama-Foundation/tinyscaler", "max_issues_repo_head_hexsha": "5a90c027a6e6a4a86c8315efcadc1a8fa4588fa9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/benchmark_bilinear.py", "max_forks_repo_name": "Farama-Foundation/tinyscaler", "max_forks_repo_head_hexsha": "5a90c027a6e6a4a86c8315efcadc1a8fa4588fa9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3432835821, "max_line_length": 99, "alphanum_fraction": 0.7422096317, "include": true, "reason": "import numpy", "num_tokens": 496}
|
import LeanCodePrompts.Translate
import LeanCodePrompts.Utils
open Lean Meta Elab
def translateWithDataM (s: String)(numSim : Nat:= 10)(numKW: Nat := 1)(includeFixed: Bool := Bool.false)(queryNum: Nat := 5)(temp : JsonNumber := ⟨2, 1⟩)(scoreBound: Float := 0.2)(matchBound: Nat := 15) :
TermElabM ((Option (Expr × (Array String) )) × Array String) := do
let js ←
getCodeJson s numSim numKW includeFixed queryNum temp scoreBound matchBound
let output ← GPT.jsonToExprStrArray js
let output := output.toList.eraseDups.toArray
let res ← arrayToExpr? output
return (res, output)
def translateWithDataCore (s: String)(numSim : Nat:= 10)(numKW: Nat := 1)(includeFixed: Bool := Bool.false)(queryNum: Nat := 5)(temp : JsonNumber := ⟨2, 1⟩)(scoreBound: Float := 0.2)(matchBound: Nat := 15) :
CoreM ((Option (Expr × (Array String) )) × Array String) :=
(translateWithDataM s
numSim numKW includeFixed
queryNum temp scoreBound matchBound).run'.run'
def checkTranslatedThmsM(type: String := "thm")(numSim : Nat:= 10)(numKW: Nat := 1)(includeFixed: Bool := Bool.false)(queryNum: Nat := 5)(temp : JsonNumber := ⟨2, 1⟩) : TermElabM Json := do
elabLog s!"Writing to file: {type}-elab-{numSim}-{numKW}-{includeFixed}-{queryNum}-{temp.mantissa}.json"
let promptsFile ← reroutePath <| System.mkFilePath ["data",
s!"prompts-{type}-{numSim}-{numKW}-{includeFixed}-{queryNum}-{temp.mantissa}.jsonl"]
let h ← IO.FS.Handle.mk promptsFile IO.FS.Mode.append Bool.false
let file ← reroutePath <| System.mkFilePath [s!"data/{type}-prompts.txt"]
let prompts ← IO.FS.lines file
let prompts :=
prompts.map <| fun s => s.replace "<br>" "\n"
let mut count := 0
let mut elaborated := 0
let mut elabPairs: Array (String × String × (Array String)) := #[]
let mut failed : Array String := #[]
for prompt in prompts do
trace[Translate.info] m!"{prompt}"
IO.println ""
IO.println prompt
let (res?, outputs) ←
translateWithDataM prompt
numSim numKW includeFixed queryNum temp
let fullPrompt := (← logs 1).head!
let js := Json.mkObj [("text", Json.str prompt), ("fullPrompt", Json.str fullPrompt)]
h.putStrLn <| js.pretty 10000
count := count + 1
match res? with
| some (e, thms) =>
elabLog "success"
let v ← e.view
elabLog s!"theorem {v}"
IO.println s!"theorem {v}"
elaborated := elaborated + 1
elabPairs := elabPairs.push (prompt, v, thms)
| none =>
elabLog "failed to elaborate"
IO.println "failed to elaborate"
failed := failed.push prompt
elabLog s!"outputs: {outputs}"
elabLog s!"total : {count}"
elabLog s!"elaborated: {elaborated}"
IO.println s!"total : {count}"
IO.println s!"elaborated: {elaborated}"
IO.sleep 20000
let js :=
Json.mkObj
[("total-prompts", count),
("elaborated", elaborated),
("number-similar-sentences", numSim),
("number-keyword-sentences", numKW),
("include-fixed", includeFixed),
("query-number", queryNum),
("temperature", Json.num temp),
("elaborated-prompts",
Json.arr <| ← elabPairs.mapM <|
fun (p, s, thms) => do
return Json.mkObj [
("prompt", p), ("theorem", s),
("all-elabs", Json.arr <| thms.map (Json.str)),
("comments", ""), ("correct", Json.null),
("some-correct", Json.null)
]),
("failures", Json.arr <| failed.map (Json.str))
]
return js
def checkTranslatedThmsCore(type: String := "thm")(numSim : Nat:= 10)(numKW: Nat := 1)(includeFixed: Bool := Bool.false)(queryNum: Nat := 5)(temp : JsonNumber := ⟨2, 1⟩) : CoreM Json :=
(checkTranslatedThmsM type
numSim numKW includeFixed queryNum temp).run'.run'
def parsedThmsPrompt : IO (Array String) := do
let file ← reroutePath <| System.mkFilePath ["data/parsed_thms.txt"]
IO.FS.lines file
def elabThmSplit(start? size?: Option Nat := none) : TermElabM ((Array String) × (Array String)) := do
let deps ← parsedThmsPrompt
let deps := deps.toList.drop (start?.getD 0)
let deps := deps.take (size?.getD (deps.length))
let deps := deps.toArray
let mut succ: Array String := Array.empty
let mut fail: Array String := Array.empty
let mut count := start?.getD 0
let succFile ← reroutePath <| System.mkFilePath ["data/elab_thms.txt"]
let h ← IO.FS.Handle.mk succFile IO.FS.Mode.append Bool.false
IO.println s!"total: {deps.size}"
for thm in deps do
IO.println s!"parsing theorem {thm}"
let chk ← hasElab thm (some 25)
count := count + 1
if chk then
succ := succ.push thm
h.putStrLn thm
else
fail := fail.push thm
IO.println s!"parsed: {count}"
IO.println s!"elaborated: {succ.size}"
return (succ, fail)
def elabThmSplitCore(start? size?: Option Nat := none) : CoreM ((Array String) × (Array String)) :=
(elabThmSplit start? size?).run'.run'
def outputFromCompletionsM (s: String) :
TermElabM (String) := do
let output ← jsonStringToExprStrArray s
let output := output ++ (output.map (fun s => ": " ++ s))
let output := output.toList.eraseDups.toArray
-- IO.println s!"output: {output}"
let res? ← arrayToExpr? output
let js : Json ← match res? with
| some (thm, elabs) => do
let thm ← thm.view
pure <| Json.mkObj [("success", Bool.true), ("theorem", thm),
("all-elabs", Json.arr <| elabs.map (Json.str))]
| none => pure <| Json.mkObj [("success", Bool.false)]
return js.pretty 10000
def outputFromCompletionsCore (s: String) : CoreM String :=
(outputFromCompletionsM s).run'.run'
|
{"author": "siddhartha-gadgil", "repo": "LeanAide", "sha": "7862af73ee2f0be08b20fd3e4148e20bf4a81054", "save_path": "github-repos/lean/siddhartha-gadgil-LeanAide", "path": "github-repos/lean/siddhartha-gadgil-LeanAide/LeanAide-7862af73ee2f0be08b20fd3e4148e20bf4a81054/LeanCodePrompts/BatchTranslate.lean"}
|
This editor can edit this entry and tell us a bit about themselves by clicking the Edit icon.
20081004 09:19:50 nbsp Welcome to the Wiki. Please read Welcome to the Wiki/Business Owner; it will help explain how you can make the wiki a positive experience for you without clashing with established wiki social norms. For instance continually referring to your business on pages about your competitors is pretty poor form. Users/JasonAller
20081009 22:54:10 nbsp I just had bad experience at the cable car wash. I just try to give people who might have same experience as I did to try other location.
I do not know why their page is not on the wiki as they should put themself on this page. I know I was talking to the owner they
have internet page already. I do not remember the name If I knew I would tell you as well. I am sorry you did not
like my comments but I was letting people aware there is another car wash in town now. Users/sue
|
{"hexsha": "6ae14bae21041ad04ea2e0a273c9596b2d878e94", "size": 946, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/sue.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/sue.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/sue.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 94.6, "max_line_length": 345, "alphanum_fraction": 0.789640592, "num_tokens": 219}
|
[STATEMENT]
lemma round_add_inv [rule_format]:
"index_less index key \<longrightarrow> bn_inv p q t \<longrightarrow> add_inv n t \<longrightarrow>
add_inv n (round index key p q r t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. index_less index key \<longrightarrow> bn_inv p q t \<longrightarrow> add_inv n t \<longrightarrow> add_inv n (round index key p q r t)
[PROOF STEP]
using [[simproc del: defined_all]]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. index_less index key \<longrightarrow> bn_inv p q t \<longrightarrow> add_inv n t \<longrightarrow> add_inv n (round index key p q r t)
[PROOF STEP]
proof (induction index key p q r t arbitrary: n rule: round.induct, simp_all
add: Let_def split: prod.split, (rule allI)+, (rule impI)+, erule conjE,
(rule_tac [2] allI)+, (rule_tac [2] impI)+, (erule_tac [2] conjE)+,
rule_tac [2] ssubst [OF add_base_zero], simp_all add: add_suc)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>index key p q r u ns xs n x1 a b. \<lbrakk>\<And>na. foldl (+) 0 ns = na \<and> n - Suc 0 = na \<longrightarrow> foldl (+) 0 a = na \<and> length b = na; round index key p q r (u, ns, tl xs) = (x1, a, b); index_less index key; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (foldl (+) 0 ns) = n; length xs = n\<rbrakk> \<Longrightarrow> Suc (foldl (+) 0 a) = n \<and> Suc (length b) = n
2. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
fix n ns ns' and xs' :: "'a list"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>index key p q r u ns xs n x1 a b. \<lbrakk>\<And>na. foldl (+) 0 ns = na \<and> n - Suc 0 = na \<longrightarrow> foldl (+) 0 a = na \<and> length b = na; round index key p q r (u, ns, tl xs) = (x1, a, b); index_less index key; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (foldl (+) 0 ns) = n; length xs = n\<rbrakk> \<Longrightarrow> Suc (foldl (+) 0 a) = n \<and> Suc (length b) = n
2. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
assume "\<And>n'. foldl (+) 0 ns = n' \<and> n - Suc 0 = n' \<longrightarrow>
foldl (+) 0 ns' = n' \<and> length xs' = n'"
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns = ?n' \<and> n - Suc 0 = ?n' \<longrightarrow> foldl (+) 0 ns' = ?n' \<and> length xs' = ?n'
goal (2 subgoals):
1. \<And>index key p q r u ns xs n x1 a b. \<lbrakk>\<And>na. foldl (+) 0 ns = na \<and> n - Suc 0 = na \<longrightarrow> foldl (+) 0 a = na \<and> length b = na; round index key p q r (u, ns, tl xs) = (x1, a, b); index_less index key; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (foldl (+) 0 ns) = n; length xs = n\<rbrakk> \<Longrightarrow> Suc (foldl (+) 0 a) = n \<and> Suc (length b) = n
2. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
hence "foldl (+) 0 ns = n - Suc 0 \<longrightarrow>
foldl (+) 0 ns' = n - Suc 0 \<and> length xs' = n - Suc 0"
[PROOF STATE]
proof (prove)
using this:
foldl (+) 0 ns = ?n' \<and> n - Suc 0 = ?n' \<longrightarrow> foldl (+) 0 ns' = ?n' \<and> length xs' = ?n'
goal (1 subgoal):
1. foldl (+) 0 ns = n - Suc 0 \<longrightarrow> foldl (+) 0 ns' = n - Suc 0 \<and> length xs' = n - Suc 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns = n - Suc 0 \<longrightarrow> foldl (+) 0 ns' = n - Suc 0 \<and> length xs' = n - Suc 0
goal (2 subgoals):
1. \<And>index key p q r u ns xs n x1 a b. \<lbrakk>\<And>na. foldl (+) 0 ns = na \<and> n - Suc 0 = na \<longrightarrow> foldl (+) 0 a = na \<and> length b = na; round index key p q r (u, ns, tl xs) = (x1, a, b); index_less index key; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (foldl (+) 0 ns) = n; length xs = n\<rbrakk> \<Longrightarrow> Suc (foldl (+) 0 a) = n \<and> Suc (length b) = n
2. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns = n - Suc 0 \<longrightarrow> foldl (+) 0 ns' = n - Suc 0 \<and> length xs' = n - Suc 0
goal (2 subgoals):
1. \<And>index key p q r u ns xs n x1 a b. \<lbrakk>\<And>na. foldl (+) 0 ns = na \<and> n - Suc 0 = na \<longrightarrow> foldl (+) 0 a = na \<and> length b = na; round index key p q r (u, ns, tl xs) = (x1, a, b); index_less index key; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (foldl (+) 0 ns) = n; length xs = n\<rbrakk> \<Longrightarrow> Suc (foldl (+) 0 a) = n \<and> Suc (length b) = n
2. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
assume "Suc (foldl (+) 0 ns) = n"
[PROOF STATE]
proof (state)
this:
Suc (foldl (+) 0 ns) = n
goal (2 subgoals):
1. \<And>index key p q r u ns xs n x1 a b. \<lbrakk>\<And>na. foldl (+) 0 ns = na \<and> n - Suc 0 = na \<longrightarrow> foldl (+) 0 a = na \<and> length b = na; round index key p q r (u, ns, tl xs) = (x1, a, b); index_less index key; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (foldl (+) 0 ns) = n; length xs = n\<rbrakk> \<Longrightarrow> Suc (foldl (+) 0 a) = n \<and> Suc (length b) = n
2. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
foldl (+) 0 ns = n - Suc 0 \<longrightarrow> foldl (+) 0 ns' = n - Suc 0 \<and> length xs' = n - Suc 0
Suc (foldl (+) 0 ns) = n
[PROOF STEP]
show "Suc (foldl (+) 0 ns') = n \<and> Suc (length xs') = n"
[PROOF STATE]
proof (prove)
using this:
foldl (+) 0 ns = n - Suc 0 \<longrightarrow> foldl (+) 0 ns' = n - Suc 0 \<and> length xs' = n - Suc 0
Suc (foldl (+) 0 ns) = n
goal (1 subgoal):
1. Suc (foldl (+) 0 ns') = n \<and> Suc (length xs') = n
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Suc (foldl (+) 0 ns') = n \<and> Suc (length xs') = n
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
fix index p q r u m m' ns v ms' ws' ns' n
and key :: "'a \<Rightarrow> 'b" and xs :: "'a list" and xs' :: "'a list" and r' :: nat
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
let ?ws = "take (Suc (Suc m)) xs"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
assume
A: "round_suc_suc index key ?ws m m' u = (v, ms', ws')" and
B: "bn_comp m p q r = (m', r')" and
C: "index_less index key" and
D: "bn_valid m p q" and
E: "length xs = n"
[PROOF STATE]
proof (state)
this:
round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')
bn_comp m p q r = (m', r')
index_less index key
bn_valid m p q
length xs = n
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
assume "\<And>ws a b c d e f g h n'.
ws = ?ws \<Longrightarrow> a = (m', r') \<Longrightarrow> b = m' \<and> c = r' \<Longrightarrow>
d = (v, ms', ws') \<Longrightarrow> e = v \<and> f = (ms', ws') \<Longrightarrow> g = ms' \<and> h = ws' \<Longrightarrow>
foldl (+) 0 ns = n' \<and> n - Suc (Suc m) = n' \<longrightarrow>
foldl (+) 0 ns' = n' \<and> length xs' = n'"
[PROOF STATE]
proof (state)
this:
\<lbrakk>?ws = take (Suc (Suc m)) xs; ?a = (m', r'); ?b = m' \<and> ?c = r'; ?d = (v, ms', ws'); ?e = v \<and> ?f = (ms', ws'); ?g = ms' \<and> ?h = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = ?n' \<and> n - Suc (Suc m) = ?n' \<longrightarrow> foldl (+) 0 ns' = ?n' \<and> length xs' = ?n'
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<lbrakk>?ws = take (Suc (Suc m)) xs; ?a = (m', r'); ?b = m' \<and> ?c = r'; ?d = (v, ms', ws'); ?e = v \<and> ?f = (ms', ws'); ?g = ms' \<and> ?h = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = ?n' \<and> n - Suc (Suc m) = ?n' \<longrightarrow> foldl (+) 0 ns' = ?n' \<and> length xs' = ?n'
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
assume "Suc (Suc (foldl (+) m ns)) = n"
[PROOF STATE]
proof (state)
this:
Suc (Suc (foldl (+) m ns)) = n
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
hence F: "foldl (+) 0 ns + Suc (Suc m) = n"
[PROOF STATE]
proof (prove)
using this:
Suc (Suc (foldl (+) m ns)) = n
goal (1 subgoal):
1. foldl (+) 0 ns + Suc (Suc m) = n
[PROOF STEP]
by (subst (asm) add_base_zero, simp)
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns + Suc (Suc m) = n
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?ws = take (Suc (Suc m)) xs; ?a = (m', r'); ?b = m' \<and> ?c = r'; ?d = (v, ms', ws'); ?e = v \<and> ?f = (ms', ws'); ?g = ms' \<and> ?h = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = ?n' \<and> n - Suc (Suc m) = ?n' \<longrightarrow> foldl (+) 0 ns' = ?n' \<and> length xs' = ?n'
foldl (+) 0 ns + Suc (Suc m) = n
[PROOF STEP]
have
G: "foldl (+) 0 ns' = n - Suc (Suc m) \<and> length xs' = n - Suc (Suc m)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?ws = take (Suc (Suc m)) xs; ?a = (m', r'); ?b = m' \<and> ?c = r'; ?d = (v, ms', ws'); ?e = v \<and> ?f = (ms', ws'); ?g = ms' \<and> ?h = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = ?n' \<and> n - Suc (Suc m) = ?n' \<longrightarrow> foldl (+) 0 ns' = ?n' \<and> length xs' = ?n'
foldl (+) 0 ns + Suc (Suc m) = n
goal (1 subgoal):
1. foldl (+) 0 ns' = n - Suc (Suc m) \<and> length xs' = n - Suc (Suc m)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns' = n - Suc (Suc m) \<and> length xs' = n - Suc (Suc m)
goal (1 subgoal):
1. \<And>index key p q r u n ns xs na x1 x2 x1a a b x1b aa ba. \<lbrakk>\<And>x xa xb y xaa xc ya xd yb nb. \<lbrakk>x = take (Suc (Suc n)) xs; xa = (x1, x2); xb = x1 \<and> y = x2; xaa = (x1a, a, b); xc = x1a \<and> ya = (a, b); xd = a \<and> yb = b\<rbrakk> \<Longrightarrow> foldl (+) 0 ns = nb \<and> na - Suc (Suc n) = nb \<longrightarrow> foldl (+) 0 aa = nb \<and> length ba = nb; round index key p q x2 (x1a, ns, drop (Suc (Suc n)) xs) = (x1b, aa, ba); round_suc_suc index key (take (Suc (Suc n)) xs) n x1 u = (x1a, a, b); bn_comp n p q r = (x1, x2); index_less index key; bn_valid n p q; \<forall>x\<in>set ns. case x of 0 \<Rightarrow> True | Suc 0 \<Rightarrow> True | Suc (Suc m) \<Rightarrow> bn_valid m p q; Suc (Suc (foldl (+) n ns)) = na; length xs = na\<rbrakk> \<Longrightarrow> foldl (+) 0 aa + foldl (+) 0 a = na \<and> length b + length ba = na
[PROOF STEP]
from A
[PROOF STATE]
proof (chain)
picking this:
round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')
[PROOF STEP]
show "foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and>
length ws' + length xs' = n"
[PROOF STATE]
proof (prove)
using this:
round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')
goal (1 subgoal):
1. foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
proof (subst (2) add_base_zero, simp add: round_suc_suc_def Let_def split:
if_split_asm, (erule_tac [!] conjE)+, simp_all)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
assume "Suc 0 # Suc 0 # replicate m (Suc 0) = ms'"
[PROOF STATE]
proof (state)
this:
Suc 0 # Suc 0 # replicate m (Suc 0) = ms'
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
hence "ms' = Suc 0 # Suc 0 # replicate m (Suc 0)"
[PROOF STATE]
proof (prove)
using this:
Suc 0 # Suc 0 # replicate m (Suc 0) = ms'
goal (1 subgoal):
1. ms' = Suc 0 # Suc 0 # replicate m (Suc 0)
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
ms' = Suc 0 # Suc 0 # replicate m (Suc 0)
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
hence "foldl (+) 0 ms' = Suc (Suc m)"
[PROOF STATE]
proof (prove)
using this:
ms' = Suc 0 # Suc 0 # replicate m (Suc 0)
goal (1 subgoal):
1. foldl (+) 0 ms' = Suc (Suc m)
[PROOF STEP]
by (simp add: add_replicate)
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ms' = Suc (Suc m)
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
hence "foldl (+) 0 ns' + foldl (+) 0 ms' = n"
[PROOF STATE]
proof (prove)
using this:
foldl (+) 0 ms' = Suc (Suc m)
goal (1 subgoal):
1. foldl (+) 0 ns' + foldl (+) 0 ms' = n
[PROOF STEP]
using F and G
[PROOF STATE]
proof (prove)
using this:
foldl (+) 0 ms' = Suc (Suc m)
foldl (+) 0 ns + Suc (Suc m) = n
foldl (+) 0 ns' = n - Suc (Suc m) \<and> length xs' = n - Suc (Suc m)
goal (1 subgoal):
1. foldl (+) 0 ns' + foldl (+) 0 ms' = n
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns' + foldl (+) 0 ms' = n
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns' + foldl (+) 0 ms' = n
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
assume "?ws = ws'"
[PROOF STATE]
proof (state)
this:
take (Suc (Suc m)) xs = ws'
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
hence "ws' = ?ws"
[PROOF STATE]
proof (prove)
using this:
take (Suc (Suc m)) xs = ws'
goal (1 subgoal):
1. ws' = take (Suc (Suc m)) xs
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
ws' = take (Suc (Suc m)) xs
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
hence "length ws' = Suc (Suc m)"
[PROOF STATE]
proof (prove)
using this:
ws' = take (Suc (Suc m)) xs
goal (1 subgoal):
1. length ws' = Suc (Suc m)
[PROOF STEP]
using F and E
[PROOF STATE]
proof (prove)
using this:
ws' = take (Suc (Suc m)) xs
foldl (+) 0 ns + Suc (Suc m) = n
length xs = n
goal (1 subgoal):
1. length ws' = Suc (Suc m)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length ws' = Suc (Suc m)
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
hence "length ws' + length xs' = n"
[PROOF STATE]
proof (prove)
using this:
length ws' = Suc (Suc m)
goal (1 subgoal):
1. length ws' + length xs' = n
[PROOF STEP]
using F and G
[PROOF STATE]
proof (prove)
using this:
length ws' = Suc (Suc m)
foldl (+) 0 ns + Suc (Suc m) = n
foldl (+) 0 ns' = n - Suc (Suc m) \<and> length xs' = n - Suc (Suc m)
goal (1 subgoal):
1. length ws' + length xs' = n
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length ws' + length xs' = n
goal (2 subgoals):
1. \<lbrakk>key (ws' ! mini ws' key) = key (ws' ! maxi ws' key); round_suc_suc index key ws' m m' u = (v, ms', ws'); u + m' - m = v; Suc 0 # Suc 0 # replicate m (Suc 0) = ms'; take (Suc (Suc m)) xs = ws'\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
2. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
foldl (+) 0 ns' + foldl (+) 0 ms' = n
length ws' + length xs' = n
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
foldl (+) 0 ns' + foldl (+) 0 ms' = n
length ws' + length xs' = n
goal (1 subgoal):
1. foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
let ?nmi = "mini ?ws key"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
let ?nma = "maxi ?ws key"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
let ?xmi = "?ws ! ?nmi"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
let ?xma = "?ws ! ?nma"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
let ?mi = "key ?xmi"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
let ?ma = "key ?xma"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
let ?k = "case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m'"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
let ?zs = "nths ?ws (- {?nmi, ?nma})"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
let ?ms = "enum ?zs index key ?k ?mi ?ma"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
assume "Suc 0 # ?ms @ [Suc 0] = ms'"
[PROOF STATE]
proof (state)
this:
Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
hence "ms' = Suc 0 # ?ms @ [Suc 0]"
[PROOF STATE]
proof (prove)
using this:
Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'
goal (1 subgoal):
1. ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
assume
"?xmi # map the (fill ?zs (offs ?ms 0) index key m ?mi ?ma) @ [?xma] = ws'"
[PROOF STATE]
proof (state)
this:
take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
hence "ws' = ?xmi # map the (fill ?zs (offs ?ms 0) index key m ?mi ?ma)
@ [?xma]"
[PROOF STATE]
proof (prove)
using this:
take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'
goal (1 subgoal):
1. ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]
goal (1 subgoal):
1. \<lbrakk>key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key) \<noteq> key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key); u + m' - (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') = v; Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0] = ms'; take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key] = ws'; round_suc_suc index key (take (Suc (Suc m)) xs) m m' u = (v, ms', ws')\<rbrakk> \<Longrightarrow> foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]
ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]
ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]
goal (1 subgoal):
1. foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
[PROOF STEP]
proof (simp add: fill_length, subst (2) add_base_zero, simp, cases m)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = 0\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
2. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
case 0
[PROOF STATE]
proof (state)
this:
m = 0
goal (2 subgoals):
1. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = 0\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
2. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
m = 0
goal (2 subgoals):
1. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = 0\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
2. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
m = 0
[PROOF STEP]
have "length ?ms = 0"
[PROOF STATE]
proof (prove)
using this:
m = 0
goal (1 subgoal):
1. length (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = 0
[PROOF STEP]
by (simp add: enum_length)
[PROOF STATE]
proof (state)
this:
length (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = 0
goal (2 subgoals):
1. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = 0\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
2. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
m = 0
length (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = 0
[PROOF STEP]
show "Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 ?ms)) = n \<and>
Suc (Suc (m + length xs')) = n"
[PROOF STATE]
proof (prove)
using this:
m = 0
length (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = 0
goal (1 subgoal):
1. Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
using F and G
[PROOF STATE]
proof (prove)
using this:
m = 0
length (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = 0
foldl (+) 0 ns + Suc (Suc m) = n
foldl (+) 0 ns' = n - Suc (Suc m) \<and> length xs' = n - Suc (Suc m)
goal (1 subgoal):
1. Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
case Suc
[PROOF STATE]
proof (state)
this:
m = Suc nat_
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
m = Suc nat_
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
m = Suc nat_
[PROOF STEP]
have "0 < fst (bn_comp m p q r)"
[PROOF STATE]
proof (prove)
using this:
m = Suc nat_
goal (1 subgoal):
1. 0 < fst (bn_comp m p q r)
[PROOF STEP]
by (rule_tac bn_comp_fst_nonzero [OF D], simp)
[PROOF STATE]
proof (state)
this:
0 < fst (bn_comp m p q r)
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
hence "0 < m'"
[PROOF STATE]
proof (prove)
using this:
0 < fst (bn_comp m p q r)
goal (1 subgoal):
1. 0 < m'
[PROOF STEP]
using B
[PROOF STATE]
proof (prove)
using this:
0 < fst (bn_comp m p q r)
bn_comp m p q r = (m', r')
goal (1 subgoal):
1. 0 < m'
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 < m'
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
m = Suc nat_
0 < m'
[PROOF STEP]
have H: "0 < ?k"
[PROOF STATE]
proof (prove)
using this:
m = Suc nat_
0 < m'
goal (1 subgoal):
1. 0 < (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m')
[PROOF STEP]
by (simp split: nat.split)
[PROOF STATE]
proof (state)
this:
0 < (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m')
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
have "foldl (+) 0 ?ms = length ?zs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key}))
[PROOF STEP]
by (rule enum_add [OF C H], simp, rule conjI,
((rule mini_lb | rule maxi_ub), erule in_set_nthsD)+)
[PROOF STATE]
proof (state)
this:
foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key}))
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key}))
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
have "length ?ws = Suc (Suc m)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (take (Suc (Suc m)) xs) = Suc (Suc m)
[PROOF STEP]
using F and E
[PROOF STATE]
proof (prove)
using this:
foldl (+) 0 ns + Suc (Suc m) = n
length xs = n
goal (1 subgoal):
1. length (take (Suc (Suc m)) xs) = Suc (Suc m)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length (take (Suc (Suc m)) xs) = Suc (Suc m)
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
hence "length ?zs = m"
[PROOF STATE]
proof (prove)
using this:
length (take (Suc (Suc m)) xs) = Suc (Suc m)
goal (1 subgoal):
1. length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) = m
[PROOF STEP]
by (simp add: mini_maxi_nths)
[PROOF STATE]
proof (state)
this:
length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) = m
goal (1 subgoal):
1. \<And>nat. \<lbrakk>ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; ms' = Suc 0 # enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key)) @ [Suc 0]; ws' = take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key # map the (fill (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) (offs (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) 0) index key m (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) @ [take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key]; m = Suc nat\<rbrakk> \<Longrightarrow> Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key}))
length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) = m
[PROOF STEP]
show "Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 ?ms)) = n \<and>
Suc (Suc (m + length xs')) = n"
[PROOF STATE]
proof (prove)
using this:
foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key}))
length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) = m
goal (1 subgoal):
1. Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
using F and G
[PROOF STATE]
proof (prove)
using this:
foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))) = length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key}))
length (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) = m
foldl (+) 0 ns + Suc (Suc m) = n
foldl (+) 0 ns' = n - Suc (Suc m) \<and> length xs' = n - Suc (Suc m)
goal (1 subgoal):
1. Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Suc (Suc (foldl (+) 0 ns' + foldl (+) 0 (enum (nths (take (Suc (Suc m)) xs) (- {mini (take (Suc (Suc m)) xs) key, maxi (take (Suc (Suc m)) xs) key})) index key (case m of 0 \<Rightarrow> m | Suc 0 \<Rightarrow> m | Suc (Suc i) \<Rightarrow> u + m') (key (take (Suc (Suc m)) xs ! mini (take (Suc (Suc m)) xs) key)) (key (take (Suc (Suc m)) xs ! maxi (take (Suc (Suc m)) xs) key))))) = n \<and> Suc (Suc (m + length xs')) = n
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
foldl (+) 0 ns' + foldl (+) 0 ms' = n \<and> length ws' + length xs' = n
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 62701, "file": "Generalized_Counting_Sort_Algorithm", "length": 103}
|
import json
import os
import numpy as np
class ModelConfig:
"""
Contains all necessary information to use the model for inference. May also include training metadata.
Model directory should contain config.json and this class can be directly initialized from a model dir with fromDir.
"""
def __init__(self, model_name, arch, model_n_out, sz, N, mean, std, meta={}):
self.config_dict = {'model_name':model_name,
'arch':arch,
'model_n_out':model_n_out,
'sz':sz,
'N':N,
'mean':list(mean.astype(str)),
'std':list(std.astype(str)),
'meta':meta}
@classmethod
def fromDir(cls, dir_path):
config_path = os.path.join(dir_path,'config.json')
with open(config_path) as json_file:
data = json.load(json_file)
model_name = data['model_name']
arch = data['arch']
model_n_out = data['model_n_out']
sz = data['sz']
N = data['N']
mean = np.array(data['mean'])
std = np.array(data['std'])
meta = data['meta']
return cls(model_name, arch, model_n_out, sz, N, mean, std, meta)
def toDir(self, dir_path):
config_path = os.path.join(dir_path,'config.json')
with open(config_path, 'w') as outfile:
json.dump(self.config_dict, outfile, indent=4)
def getField(self, field):
if field in self.config_dict.keys():
return self.config_dict[field]
return None
def getMetaField(self, field):
if field in self.config_dict['meta'].keys():
return self.config_dict['meta'][field]
return None
|
{"hexsha": "b3e7dda4151d11bf0a633c9752089ca97f28d6d5", "size": 1829, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/model/model_config.py", "max_stars_repo_name": "jpjuvo/PANDA-challenge-raehmae", "max_stars_repo_head_hexsha": "5748cd23f18e2dd36d56918dcee495b822d2a5cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "training/model/model_config.py", "max_issues_repo_name": "jpjuvo/PANDA-challenge-raehmae", "max_issues_repo_head_hexsha": "5748cd23f18e2dd36d56918dcee495b822d2a5cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training/model/model_config.py", "max_forks_repo_name": "jpjuvo/PANDA-challenge-raehmae", "max_forks_repo_head_hexsha": "5748cd23f18e2dd36d56918dcee495b822d2a5cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-20T04:37:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-20T04:37:47.000Z", "avg_line_length": 35.862745098, "max_line_length": 122, "alphanum_fraction": 0.5489338436, "include": true, "reason": "import numpy", "num_tokens": 392}
|
import sys
import os
import numpy as _np
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../src/")
import finoptions as fo
def test_PlainVanillaPayoff():
S = 100
K = 100
t = 1 / 12
sigma = 0.4
r = 0.10
b = 0.1
dt = 1 / 360
eps = _np.genfromtxt(
"./pytest/sobol_path_test.csv", delimiter=","
) # load sobol paths from R since python version is slighly different in third path
path = fo.monte_carlo_options.WienerPath(eps, sigma, dt, b)
print(path.generate_path())
payoff = fo.monte_carlo_options.PlainVanillaPayoff(
path=path, S=S, K=K, t=t, sigma=sigma, r=r, b=b
)
# test two call options, elements 1 and 10 from fOptions
# using weiner paths generated from sobol innovations
assert _np.allclose(
payoff.call()[[0, 9]], [37.11255, 37.83272]
), "PlainVanillaPayoff no matching R's fOptions."
# test two put options, elements 1 and 10 from fOptions
# using weiner paths generated from sobol innovations
assert _np.allclose(
payoff.put()[[0, 9]], [0, 0]
), "PlainVanillaPayoff no matching R's fOptions."
# make K bigger so that the option value is > 0
payoff = fo.monte_carlo_options.PlainVanillaPayoff(
path=path, S=S, K=140, t=t, sigma=sigma, r=r, b=b
)
assert _np.allclose(
payoff.put()[[0, 9]], [2.555497, 1.835328]
), "PlainVanillaPayoff no matching R's fOptions."
def test_ArimeticAsianPayoff():
S = 100
K = 100
t = 1 / 12
sigma = 0.4
r = 0.10
b = 0.1
dt = 1 / 360
eps = _np.genfromtxt(
"./pytest/sobol_path_test.csv", delimiter=","
) # load sobol paths from R since python version is slighly different in third path
path = fo.monte_carlo_options.WienerPath(eps, sigma, dt, b)
payoff = fo.monte_carlo_options.ArithmeticAsianPayoff(
path=path, S=S, K=K, t=t, sigma=sigma, r=r, b=b
)
# test two call options, elements 1 and 10 from fOptions
# using weiner paths generated from sobol innovations
assert _np.allclose(
payoff.call()[[0, 9]], [18.19441, 20.43197]
), "ArithmeticAsianPayoff not matching R's fOptions."
# test two put options, elements 1 and 10 from fOptions
# using weiner paths generated from sobol innovations
assert _np.allclose(
payoff.put()[[0, 9]], [0, 0]
), "ArithmeticAsianPayoff not matching R's fOptions."
# make K bigger so that the option value is > 0
payoff = fo.monte_carlo_options.ArithmeticAsianPayoff(
path=path, S=S, K=140, t=t, sigma=sigma, r=r, b=b
)
assert _np.allclose(
payoff.put()[[0, 9]], [21.47364, 19.23608]
), "ArithmeticAsianPayoff not matching R's fOptions."
if __name__ == "__main__":
test_PlainVanillaPayoff()
|
{"hexsha": "4e715b9c000e3dc03b5a5f359c368258f5af7e0b", "size": 2802, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytest/test_mc_payoffs.py", "max_stars_repo_name": "bbcho/finoptions-dev", "max_stars_repo_head_hexsha": "81365b6d93693b0b546be92448db858ccce44d5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-09-21T05:39:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T22:27:46.000Z", "max_issues_repo_path": "pytest/test_mc_payoffs.py", "max_issues_repo_name": "bbcho/energyderivatives", "max_issues_repo_head_hexsha": "81365b6d93693b0b546be92448db858ccce44d5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytest/test_mc_payoffs.py", "max_forks_repo_name": "bbcho/energyderivatives", "max_forks_repo_head_hexsha": "81365b6d93693b0b546be92448db858ccce44d5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5918367347, "max_line_length": 88, "alphanum_fraction": 0.6423982869, "include": true, "reason": "import numpy", "num_tokens": 881}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This version of the `moon` module calculates lunar phase angle for a geocentric
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# Third-party
import numpy as np
from astropy.coordinates import get_moon, get_sun
__all__ = ["moon_phase_angle", "moon_illumination"]
def moon_phase_angle(time, ephemeris=None):
"""
Calculate lunar orbital phase in radians.
Parameters
----------
time : `~astropy.time.Time`
Time of observation
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
`~astropy.coordinates.solar_system_ephemeris` (which is
set to 'builtin' by default).
Returns
-------
i : float
Phase angle of the moon [radians]
"""
# TODO: cache these sun/moon SkyCoord objects
sun = get_sun(time)
moon = get_moon(time, ephemeris=ephemeris)
elongation = sun.separation(moon)
return np.arctan2(sun.distance*np.sin(elongation),
moon.distance - sun.distance*np.cos(elongation))
def moon_illumination(time, ephemeris=None):
"""
Calculate fraction of the moon illuminated.
Parameters
----------
time : `~astropy.time.Time`
Time of observation
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
`~astropy.coordinates.solar_system_ephemeris` (which is
set to 'builtin' by default).
Returns
-------
k : float
Fraction of moon illuminated
"""
i = moon_phase_angle(time, ephemeris=ephemeris)
k = (1 + np.cos(i))/2.0
return k.value
|
{"hexsha": "4818639e351cd6b3370dea4714eb317922ee0722", "size": 1732, "ext": "py", "lang": "Python", "max_stars_repo_path": "astroplan/moon.py", "max_stars_repo_name": "lordaniket06/astroplan", "max_stars_repo_head_hexsha": "cc28c3204cb1d8338f2a91e1609a95415aa9df71", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-07T11:24:29.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-07T11:24:29.000Z", "max_issues_repo_path": "astroplan/moon.py", "max_issues_repo_name": "lordaniket06/astroplan", "max_issues_repo_head_hexsha": "cc28c3204cb1d8338f2a91e1609a95415aa9df71", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-08-27T21:16:39.000Z", "max_issues_repo_issues_event_max_datetime": "2016-07-28T16:27:09.000Z", "max_forks_repo_path": "astroplan/moon.py", "max_forks_repo_name": "lordaniket06/astroplan", "max_forks_repo_head_hexsha": "cc28c3204cb1d8338f2a91e1609a95415aa9df71", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2015-06-18T16:14:52.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-28T13:19:38.000Z", "avg_line_length": 26.2424242424, "max_line_length": 79, "alphanum_fraction": 0.6443418014, "include": true, "reason": "import numpy,from astropy", "num_tokens": 438}
|
/*
* Copyright (c) 2011, Peter Thorson. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the WebSocket++ Project nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL PETER THORSON BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "chat_client_handler.hpp"
#include <boost/algorithm/string/replace.hpp>
using websocketchat::chat_client_handler;
using websocketpp::client_session_ptr;
void chat_client_handler::on_open(session_ptr s) {
// not sure if anything needs to happen here.
m_session = s;
std::cout << "Successfully connected" << std::endl;
}
void chat_client_handler::on_close(session_ptr s) {
// not sure if anything needs to happen here either.
m_session = client_session_ptr();
std::cout << "client was disconnected" << std::endl;
}
void chat_client_handler::on_message(session_ptr s,const std::string &msg) {
//std::cout << "message from server: " << msg << std::endl;
decode_server_msg(msg);
}
// CLIENT API
// client api methods will be called from outside the io_service.run thread
// they need to be careful to not touch unsyncronized member variables.
void chat_client_handler::send(const std::string &msg) {
if (!m_session) {
std::cerr << "Error: no connected session" << std::endl;
return;
}
m_session->io_service().post(boost::bind(&chat_client_handler::do_send, this, msg));
}
void chat_client_handler::close() {
if (!m_session) {
std::cerr << "Error: no connected session" << std::endl;
return;
}
m_session->io_service().post(boost::bind(&chat_client_handler::do_close,this));
}
// END CLIENT API
void chat_client_handler::do_send(const std::string &msg) {
if (!m_session) {
std::cerr << "Error: no connected session" << std::endl;
return;
}
// check for local commands
if (msg == "/list") {
std::cout << "list all participants" << std::endl;
} else if (msg == "/close") {
do_close();
} else {
m_session->send(msg);
}
}
void chat_client_handler::do_close() {
if (!m_session) {
std::cerr << "Error: no connected session" << std::endl;
return;
}
m_session->close(websocketpp::session::CLOSE_STATUS_GOING_AWAY,"");
}
// {"type":"participants","value":[<participant>,...]}
// {"type":"msg","sender":"<sender>","value":"<msg>" }
void chat_client_handler::decode_server_msg(const std::string &msg) {
// for messages of type participants, erase and rebuild m_participants
// for messages of type msg, print out message
// NOTE: The chat server was written with the intention of the client having a built in
// JSON parser. To keep external dependencies low for this demonstration chat client I am
// parsing the server messages by hand.
std::string::size_type start = 9;
std::string::size_type end;
if (msg.substr(0,start) != "{\"type\":\"") {
// ignore
std::cout << "invalid message" << std::endl;
return;
}
if (msg.substr(start,15) == "msg\",\"sender\":\"") {
// parse message
std::string sender;
std::string message;
start += 15;
end = msg.find("\"",start);
while (end != std::string::npos) {
if (msg[end-1] == '\\') {
sender += msg.substr(start,end-start-1) + "\"";
start = end+1;
end = msg.find("\"",start);
} else {
sender += msg.substr(start,end-start);
start = end;
break;
}
}
if (msg.substr(start,11) != "\",\"value\":\"") {
std::cout << "invalid message" << std::endl;
return;
}
start += 11;
end = msg.find("\"",start);
while (end != std::string::npos) {
if (msg[end-1] == '\\') {
message += msg.substr(start,end-start-1) + "\"";
start = end+1;
end = msg.find("\"",start);
} else {
message += msg.substr(start,end-start);
start = end;
break;
}
}
std::cout << "[" << sender << "] " << message << std::endl;
} else if (msg.substr(start,23) == "participants\",\"value\":[") {
// parse participants
std::cout << "participants message" << std::endl;
} else {
// unknown message type
std::cout << "unknown message" << std::endl;
}
}
|
{"hexsha": "e16a5f471736143f13cc5b20f8daaf3f3751a27d", "size": 5328, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "server/websocketpp/examples/chat_client/chat_client_handler.cpp", "max_stars_repo_name": "urbenlegend/WebStreamer", "max_stars_repo_head_hexsha": "562b16a4b8e10cce25c4088e38e83f93bc87e1ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 42.0, "max_stars_repo_stars_event_min_datetime": "2015-09-19T13:33:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-12T18:36:51.000Z", "max_issues_repo_path": "server/websocketpp/examples/chat_client/chat_client_handler.cpp", "max_issues_repo_name": "cqzhanghy/WebStreamer", "max_issues_repo_head_hexsha": "562b16a4b8e10cce25c4088e38e83f93bc87e1ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2015-08-05T19:09:03.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-08T18:30:53.000Z", "max_forks_repo_path": "server/websocketpp/examples/chat_client/chat_client_handler.cpp", "max_forks_repo_name": "cqzhanghy/WebStreamer", "max_forks_repo_head_hexsha": "562b16a4b8e10cce25c4088e38e83f93bc87e1ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14.0, "max_forks_repo_forks_event_min_datetime": "2015-09-20T14:01:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-06T17:26:28.000Z", "avg_line_length": 30.976744186, "max_line_length": 90, "alphanum_fraction": 0.672484985, "num_tokens": 1342}
|
[STATEMENT]
lemma supp_subst: "supp (e[y::=x]) \<subseteq> (supp e - {atom y}) \<union> {atom x}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. supp e[y::=x] \<subseteq> supp e - {atom y} \<union> {atom x}
[PROOF STEP]
using supp_subst_eq
[PROOF STATE]
proof (prove)
using this:
supp ?e[?y::=?x] = supp ?e - {atom ?y} \<union> (if atom ?y \<in> supp ?e then {atom ?x} else {})
goal (1 subgoal):
1. supp e[y::=x] \<subseteq> supp e - {atom y} \<union> {atom x}
[PROOF STEP]
by auto
|
{"llama_tokens": 212, "file": "Launchbury_Substitution", "length": 2}
|
#[T. Mueller et al. Phys. Rev. C 83, 054615 (2011).]
import numpy as np
a_U235 = dict(a1 = 3.217, a2 = -3.111, a3 = 1.395, a4 = -3.690e-1, a5 = 4.445e-2, a6 = -2.053e-3)
a_Pu239 = dict(a1 = 6.413, a2 = -7.432, a3 = 3.535, a4 = -8.820e-1, a5 = 1.025e-1, a6 = -4.550e-3)
a_U238 = dict(a1 = 4.833e-1, a2 = 1.927e-1, a3 = -1.283e-1, a4 = -6.762e-3, a5 = 2.233e-3, a6 = -1.536e-4)
a_Pu241 = dict(a1 = 3.251, a2 = -3.204, a3 = 1.428, a4 = -3.675e-1, a5 = 4.254e-2, a6 = -1.896e-3)
elements = dict(U235 = a_U235, Pu239 = a_Pu239, U238 = a_U238, Pu241 = a_Pu241)
def mueller(E, params): #Get Mueller Spectrum
a_list = np.asarray(list(params.values()))
y = np.zeros(len(E))
for i, a in enumerate(a_list):
temp = a * np.power(E, i)
y = y + temp
return np.exp(y)
def spectrum(E, element):
return mueller(E, elements[element])
|
{"hexsha": "6fe1a3aabea7cc3b833af910a706aa3b53f0cfb1", "size": 859, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyreactors/mueller/mueller.py", "max_stars_repo_name": "michelemontuschi/pyreactors", "max_stars_repo_head_hexsha": "1b1f7edccb2ca7f9b1281385dbc9017d3791510d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyreactors/mueller/mueller.py", "max_issues_repo_name": "michelemontuschi/pyreactors", "max_issues_repo_head_hexsha": "1b1f7edccb2ca7f9b1281385dbc9017d3791510d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyreactors/mueller/mueller.py", "max_forks_repo_name": "michelemontuschi/pyreactors", "max_forks_repo_head_hexsha": "1b1f7edccb2ca7f9b1281385dbc9017d3791510d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0454545455, "max_line_length": 106, "alphanum_fraction": 0.5809080326, "include": true, "reason": "import numpy", "num_tokens": 399}
|
import torch
from torch.nn import init
import numpy as np
import random
import math
import os
from matplotlib import pyplot as plt
from PIL import Image
import scipy.signal
from tqdm import tqdm
from torch.autograd import Variable
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') # For old pytorch, you may use kaiming_normal.
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
elif classname.find('BatchNorm1d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.normal_(m.weight.data, std=0.001)
init.constant_(m.bias.data, 0.0)
class RandomErasing(object):
def __init__(self, probability = 0.5, sl = 0.02, sh = 0.4, r1 = 0.3, mean=[0.4914, 0.4822, 0.4465]):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) > self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1/self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def train_one_epoch(model, criterion, optimizer, epoch, epochs, step, train_loader, cuda):
loss = 0
print('Start Train')
model = model.train()
with tqdm(total=step, desc=f'Epoch {epoch + 1}/{epochs}', postfix=dict, mininterval=0.3) as pbar:
for iteration, batch in enumerate(train_loader):
if iteration >= step:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = Variable(images.cuda().detach())
targets = Variable(targets.cuda().detach())
else:
images = Variable(images)
targets = Variable(targets)
optimizer.zero_grad()
outputs = model(images)
loss_value = criterion(outputs, targets)
loss_value.backward()
optimizer.step()
loss += loss_value.item()
pbar.set_postfix(**{'loss' : loss / (iteration + 1), 'lr' : get_lr(optimizer)})
pbar.update(1)
print('Finish Train')
return loss
def val_one_epoch(model, criterion, optimizer, epoch, epochs, step, val_loader, cuda):
loss = 0
model.eval()
print('Start Validation')
with tqdm(total=step, desc=f'Epoch {epoch + 1}/{epochs}', postfix=dict, mininterval=0.3) as pbar:
for iteration, batch in enumerate(val_loader):
if iteration >= step:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = Variable(images.cuda().detach())
targets = Variable(targets.cuda().detach())
else:
images = Variable(images)
targets = Variable(targets)
optimizer.zero_grad()
outputs = model(images)
loss_value = criterion(outputs, targets)
loss += loss_value.item()
pbar.set_postfix(**{'val_loss': loss / (iteration + 1)})
pbar.update(1)
print('Finish Validation')
return loss
def fliplr(image):
inv_idx = torch.arange(image.size(3) - 1, -1, -1).long()
img_flip = image.index_select(3, inv_idx)
return img_flip
def extract_feature(model, dataloader):
features = torch.FloatTensor()
for data in dataloader:
image, label = data
image_f = fliplr(image)
input_image = Variable(image).cuda()
input_image_f = Variable(image_f).cuda()
outputs = model(input_image) + model(input_image_f)
# 计算每个特征的二范数
feature_norm = torch.norm(outputs, p=2, dim=1, keepdim=True)
feature = outputs.div(feature_norm.expand_as(outputs))
features = torch.cat((features, feature.data.cpu()), 0)
return features
def get_id(img_path):
camera_id = []
labels = []
for path, _ in img_path:
filename = os.path.basename(path)
# 获取标签(分类id)
label = filename[0:4]
if label[0:2] == '-1':
labels.append(-1)
else:
labels.append(int(label))
# 获取camera的id
camera = filename.split('c')[1]
camera_id.append(int(camera[0]))
return camera_id, labels
def evaluate(qf, ql, qc, gf, gl, gc):
query = qf.view(-1, 1) # 把query特征放到一列上
score = torch.mm(gf, query) # 计算余弦距离,余弦距离等于L2归一化之后的内积
score = score.squeeze(1).cpu()
score = score.numpy()
index = np.argsort(score) # 按余弦距离进行排序,对应名次
index = index[::-1] # 逆序
query_index = np.argwhere(gl == ql) # 找出gallery label和query label相同的位置
camera_index = np.argwhere(gc == qc) # 找出gallery camera和query camera相同的位置
good_index = np.setdiff1d(query_index, camera_index, assume_unique=True) # 找出label相同但camera不同的位置
junk_index1 = np.argwhere(gl == -1) # 错误检测的图像
junk_index2 = np.intersect1d(query_index, camera_index) # 相同的人在同一摄像头下的图像
junk_index = np.append(junk_index2, junk_index1) #.flatten())
CMC_tmp = compute_mAP(index, good_index, junk_index)
return CMC_tmp
def compute_mAP(index, good_index, junk_index):
ap = 0
cmc = torch.IntTensor(len(index)).zero_()
if good_index.size == 0: # if empty
cmc[0] = -1
return ap, cmc
# remove junk_index
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
# find good_index index
ngood = len(good_index)
mask = np.in1d(index, good_index)
rows_good = np.argwhere(mask==True)
rows_good = rows_good.flatten()
cmc[rows_good[0]:] = 1
for i in range(ngood):
d_recall = 1.0 / ngood
precision = (i + 1) * 1.0 / (rows_good[i] + 1)
if rows_good[i] != 0:
old_precision = i * 1.0 / rows_good[i]
else:
old_precision = 1.0
ap = ap + d_recall * (old_precision + precision) / 2
return ap, cmc
|
{"hexsha": "d1197d34b88f971b9ee394f83008dea52c262481", "size": 7284, "ext": "py", "lang": "Python", "max_stars_repo_path": "reid/utils/utils.py", "max_stars_repo_name": "sht1998/Tracking-PyTorch", "max_stars_repo_head_hexsha": "928c5b0e9e196da207a1eed086ce1c414d3de91e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reid/utils/utils.py", "max_issues_repo_name": "sht1998/Tracking-PyTorch", "max_issues_repo_head_hexsha": "928c5b0e9e196da207a1eed086ce1c414d3de91e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reid/utils/utils.py", "max_forks_repo_name": "sht1998/Tracking-PyTorch", "max_forks_repo_head_hexsha": "928c5b0e9e196da207a1eed086ce1c414d3de91e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2618025751, "max_line_length": 110, "alphanum_fraction": 0.5733113674, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1980}
|
import cv2
import dlib
import numpy as np
import matplotlib.pyplot as plt
import imutils
from imutils import face_utils, translate, resize
from imutils.video import VideoStream, FPS, FileVideoStream
import time
from scipy.spatial import distance as dist
import math
from utils import *
from mask_image import *
RED_EYE_FILE = 'drive/My Drive/Computer Vision/Project/masks/3_red_eye_v2.PNG'
BLUE_EYE_FILE = 'drive/My Drive/Computer Vision/Project/masks/3_blue_eye.PNG'
ANGEL_HAT_FILE = 'drive/My Drive/Computer Vision/Project/masks/3_angel_hat.PNG'
DEVIL_EAR_FILE = 'drive/My Drive/Computer Vision/Project/masks/3_devil_ear.PNG'
class VidCap:
def __init__(self, video_file):
self.vidcap = cv2.VideoCapture(video_file)
def get_frame(self, sec, show=False):
# get frame at second sec
# returns
# - has_frames : bool
# - frame : image or 0
self.vidcap.set(cv2.CAP_PROP_POS_MSEC, sec * 1000) # takes frame at time sec
has_frames, image = self.vidcap.read()
if has_frames:
if show:
plt.imshow(image)
plt.show()
return has_frames, image
else:
return has_frames, 0
def extract_frames(self, frame_rate=1, max_frames=20, show=False):
# capture image in each frame_rate seconds
frames = []
sec = 0
count = 1
has_frames, frame = self.get_frame(sec, show)
while has_frames and count <= max_frames:
frames.append(frame)
count = count + 1
sec = sec + frame_rate
sec = round(sec, 2)
has_frames, frame = self.get_frame(sec, show)
return frames
def change_eye(thresh, img, state='angel'):
cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = sorted(cnts, key=cv2.contourArea)
for cnt in cnts[-2:]:
x, y, w, h = cv2.boundingRect(cnt)
s = min(h, w)
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
if state =='angel':
eye_im = cv2.imread(BLUE_EYE_FILE, cv2.IMREAD_UNCHANGED)
else:
eye_im = cv2.imread(RED_EYE_FILE, cv2.IMREAD_UNCHANGED)
eye_im = cv2.resize(eye_im, (s, s))
img = overlay_transparent(img, eye_im, cx - s // 2, cy - s // 2)
return img
def add_hat(img, left_brow, right_brow, state='angel'):
if state =='angel':
d = abs(right_brow[0][0] - left_brow[-1][0]) # distance btw brows corners
hat_im = cv2.imread(ANGEL_HAT_FILE, cv2.IMREAD_UNCHANGED)
hat_im = imutils.resize(hat_im, width=d)
y = max(0, right_brow[0][1] - d)
x = right_brow[0][0]
img = overlay_transparent(img, hat_im, x, y)
else:
d = abs(right_brow[0][0] - right_brow[-1][0])
left_ear_im = cv2.imread(DEVIL_EAR_FILE, cv2.IMREAD_UNCHANGED)
left_ear_im = imutils.resize(left_ear_im, width=d)
y = max(0, right_brow[0][1] - int(2.5 * d))
x = right_brow[0][0] - int(d * 0.5)
img = overlay_transparent(img, left_ear_im, x, y)
right_ear_im = cv2.flip(left_ear_im, 1)
y = max(0, left_brow[0][1] - int(2.5 * d))
x = left_brow[0][0] + int(d * 0.5)
img = overlay_transparent(img, right_ear_im, x, y)
return img
def mask_video_angel_devil(input_video_file, output_video_file, show_frames=False, rotate=True):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('drive/My Drive/Computer Vision/Project/models/shape_predictor_68_face_landmarks.dat')
# predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
vidcap = VidCap(input_video_file)
frames = vidcap.extract_frames(frame_rate=0.5) # list of frames
# start and end points' numbers
(le_start, le_end) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(re_start, re_end) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(leb_start, leb_end) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"]
(reb_start, reb_end) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"]
curr_state = 'angel' # or devil
first_blink_frame = 1
result_frames = []
EYE_AR_THRESH = 0.25
for frame in frames:
frame_c = frame.copy()
if rotate:
frame_c = cv2.rotate(frame_c, cv2.ROTATE_90_COUNTERCLOCKWISE);
gray = cv2.cvtColor(frame_c, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract start and end points
left_brow = shape[leb_start:leb_end]
right_brow = shape[reb_start:reb_end]
frame_c = add_hat(frame_c, left_brow, right_brow, curr_state)
left_eye = shape[le_start:le_end]
right_eye = shape[re_start:re_end]
left_eye_hull = cv2.convexHull(left_eye)
right_eye_hull = cv2.convexHull(right_eye)
# calc eyes ratio
left_EAR = eye_aspect_ratio(left_eye)
right_EAR = eye_aspect_ratio(right_eye)
EAR = (left_EAR + right_EAR) / 2.0
if EAR < EYE_AR_THRESH and first_blink_frame: # blink
if curr_state == 'angel':
curr_state = 'devil'
else:
curr_state = 'angel'
first_blink_frame = 0
else: # not blink
first_blink_frame = 1
mask = np.zeros(frame_c.shape[:2], dtype=np.uint8)
mask = cv2.fillConvexPoly(mask, left_eye, 255)
mask = cv2.fillConvexPoly(mask, right_eye, 255)
eyes = cv2.bitwise_and(frame_c, frame_c, mask=mask)
mask = (eyes == [0, 0, 0]).all(axis=2)
eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(eyes_gray, 80, 255, cv2.THRESH_BINARY)
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=4)
thresh = cv2.medianBlur(thresh, 3)
eyes_gray[thresh == 255] = 0
# mid = (shape[39][0] + shape[42][0]) // 2
# contouring(eyes_gray[:, 0:mid], mid, frame_c)
# contouring(eyes_gray[:, mid:], mid, frame_c, True)
frame_c = change_eye(eyes_gray, frame_c, curr_state)
result_frames.append(frame_c)
frame_c = cv2.cvtColor(frame_c, cv2.COLOR_BGR2RGB)
if show_frames:
plt.figure(figsize=(8, 8))
plt.imshow(frame_c)
plt.show()
# save frames to video
height, width, _ = result_frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video=cv2.VideoWriter(output_video_file, fourcc, 3, (width, height)) # the 3rd param is changable
for img in result_frames:
video.write(img)
cv2.destroyAllWindows()
video.release()
def mask_video_simple(input_video_file, output_video_file, MASK_NAME, add_corona_mask=False, show_frames=False, rotate=True, video_stream=False):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('drive/My Drive/Computer Vision/Project/models/shape_predictor_68_face_landmarks.dat')
# predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
if video_stream: # real-time
vs = VideoStream(src=0).start()
out = cv2.VideoWriter(output_video_file, cv2.VideoWriter_fourcc(*'mp4v'), 3, (253, 450), True)
while True:
if vs.more():
break
frame = vs.read()
frame = resize(frame, width=450)
frame_c = frame.copy()
if rotate:
frame_c = cv2.rotate(frame_c, cv2.ROTATE_90_COUNTERCLOCKWISE);
frame_c = mask_image(frame_c, MASK_NAME, add_corona_mask=add_corona_mask)
result_frames.append(frame_c)
frame_c = cv2.cvtColor(frame_c, cv2.COLOR_BGR2RGB)
if show_frames:
plt.figure(figsize=(8, 8))
plt.imshow(frame_c)
plt.show()
# out.write(frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
out.release()
else: # from video
vidcap = VidCap(input_video_file)
frames = vidcap.extract_frames(frame_rate=0.5) # list of frames
result_frames = []
for frame in frames:
frame_c = frame.copy()
if rotate:
frame_c = cv2.rotate(frame_c, cv2.ROTATE_90_COUNTERCLOCKWISE);
frame_c = mask_image(frame_c, MASK_NAME, add_corona_mask=add_corona_mask)
result_frames.append(frame_c)
frame_c = cv2.cvtColor(frame_c, cv2.COLOR_BGR2RGB)
if show_frames:
plt.figure(figsize=(8, 8))
plt.imshow(frame_c)
plt.show()
# save frames to video
height, width, _ = result_frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video=cv2.VideoWriter(output_video_file, fourcc, 3, (width, height)) # the 3rd param is changable
for img in result_frames:
video.write(img)
cv2.destroyAllWindows()
video.release()
|
{"hexsha": "85d35826a88cd56761e11be78c7a492cc63fdfe9", "size": 8750, "ext": "py", "lang": "Python", "max_stars_repo_path": "mask_video.py", "max_stars_repo_name": "MariBax/Face-masking-with-CV", "max_stars_repo_head_hexsha": "e211afe8ebe82553ee4089e7dc288bc127c81107", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-18T15:13:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-01T13:02:20.000Z", "max_issues_repo_path": "mask_video.py", "max_issues_repo_name": "MariBax/Face-masking-with-CV", "max_issues_repo_head_hexsha": "e211afe8ebe82553ee4089e7dc288bc127c81107", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mask_video.py", "max_forks_repo_name": "MariBax/Face-masking-with-CV", "max_forks_repo_head_hexsha": "e211afe8ebe82553ee4089e7dc288bc127c81107", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5249042146, "max_line_length": 146, "alphanum_fraction": 0.6590857143, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2537}
|
SUBROUTINE SUB(X,Y,A,B)
C*******
C SUB WILL FORM Y = A*X - B*Y WHERE A AND B ARE SCALAR MULTIPLIERS
C FOR THE VECTORS X AND Y
C*******
DOUBLE PRECISION X(1) ,Y(1) ,A ,B
COMMON /INVPWX/ XX ,NCOL
DO 10 I = 1,NCOL
10 Y(I) = X(I)*A - Y(I)*B
RETURN
END
|
{"hexsha": "b681d70b691b11b89d955ee7b3e13673c5635c5a", "size": 335, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "mis/sub.f", "max_stars_repo_name": "ldallolio/NASTRAN-95", "max_stars_repo_head_hexsha": "6d2c175f5b53ebaec4ba2b5186f7926ef9d0ed47", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-01-09T14:33:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T11:51:42.000Z", "max_issues_repo_path": "mis/sub.f", "max_issues_repo_name": "gassive/NASTRAN95", "max_issues_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-01-17T07:30:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T19:37:44.000Z", "max_forks_repo_path": "mis/sub.f", "max_forks_repo_name": "gassive/NASTRAN95", "max_forks_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-04-07T20:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T14:16:01.000Z", "avg_line_length": 27.9166666667, "max_line_length": 72, "alphanum_fraction": 0.4567164179, "num_tokens": 123}
|
import pandas as pd
import numpy as np
import os, sys
#Extract the features and the predictors
data = pd.read_csv('parkinsons.data')
predictors = data.drop(['name'], axis = 1)
predictors = predictors.drop(['status'], axis = 1).as_matrix()
target = data['status']
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler((-1, 1))
X = scaler.fit_transform(predictors)
Y = target
#Split training data
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = .25, random_state = 7)
#Create the K-Neaarest-Neighbors model
from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier()
model.fit(X_train, Y_train)
# make predictions
y_pred = model.predict(X_test)
# summarize the fit of the model
print("k-Nearest Neighbor: ")
print(metrics.accuracy_score(Y_test, y_pred))
print(metrics.classification_report(Y_test, y_pred))
print(metrics.confusion_matrix(Y_test, y_pred))
# Now we are gonna try fine-tuning KNeighborsCclassifier() aka beat 97.959%
|
{"hexsha": "5ccf2f2895b815c2d0ccd24442b0783c248103a4", "size": 1078, "ext": "py", "lang": "Python", "max_stars_repo_path": "fine_tune_KNN.py", "max_stars_repo_name": "cuuupid/parkinsons-AI", "max_stars_repo_head_hexsha": "276b2216d879155d6172e071eca1fb0984693116", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2018-04-05T12:12:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-03T14:11:36.000Z", "max_issues_repo_path": "fine_tune_KNN.py", "max_issues_repo_name": "cuuupid/parkinsons-AI", "max_issues_repo_head_hexsha": "276b2216d879155d6172e071eca1fb0984693116", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-11-28T02:55:28.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-28T02:55:28.000Z", "max_forks_repo_path": "fine_tune_KNN.py", "max_forks_repo_name": "cuuupid/parkinsons-AI", "max_forks_repo_head_hexsha": "276b2216d879155d6172e071eca1fb0984693116", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-04-06T19:34:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T15:35:33.000Z", "avg_line_length": 31.7058823529, "max_line_length": 92, "alphanum_fraction": 0.7782931354, "include": true, "reason": "import numpy", "num_tokens": 268}
|
'''Configure printing, plotting, logging options.'''
import numpy
numpy.set_printoptions(
edgeitems = 5,
threshold = 100,
formatter = {'float' : '{: 13.6e}'.format},
linewidth = 160)
import matplotlib
# matplotlib.use('TkAgg')
matplotlib.interactive(True)
import logging
logging.basicConfig(
level=logging.WARNING,
format='[%(levelname)s:%(funcName)s] %(message)s')
import dolfin
dolfin.set_log_level(logging.WARNING)
logging.getLogger('UFL').setLevel(logging.WARNING)
logging.getLogger('FFC').setLevel(logging.WARNING)
|
{"hexsha": "590f82c568e68ca350e888fce8f7e7460ae9efad", "size": 548, "ext": "py", "lang": "Python", "max_stars_repo_path": "config.py", "max_stars_repo_name": "danassutula/maximum_compliance", "max_stars_repo_head_hexsha": "f2407bd9c5f7e36fe43aa51690433fe8bfb2f748", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "config.py", "max_issues_repo_name": "danassutula/maximum_compliance", "max_issues_repo_head_hexsha": "f2407bd9c5f7e36fe43aa51690433fe8bfb2f748", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "config.py", "max_forks_repo_name": "danassutula/maximum_compliance", "max_forks_repo_head_hexsha": "f2407bd9c5f7e36fe43aa51690433fe8bfb2f748", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8260869565, "max_line_length": 54, "alphanum_fraction": 0.7262773723, "include": true, "reason": "import numpy", "num_tokens": 133}
|
import numpy as np
from math import ceil
from .. utils import logger, verbose
@verbose
def peak_finder(x0, thresh=None, extrema=1, verbose=None):
"""Noise-tolerant fast peak-finding algorithm.
Parameters
----------
x0 : 1d array
A real vector from the maxima will be found (required).
thresh : float
The amount above surrounding data for a peak to be
identified (default = (max(x0)-min(x0))/4). Larger values mean
the algorithm is more selective in finding peaks.
extrema : {-1, 1}
1 if maxima are desired, -1 if minima are desired
(default = maxima, 1).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
peak_loc : array
The indices of the identified peaks in x0
peak_mag : array
The magnitude of the identified peaks
Note
----
If repeated values are found the first is identified as the peak.
Conversion from initial Matlab code from:
Nathanael C. Yoder (ncyoder@purdue.edu)
Example
-------
t = 0:.0001:10;
x = 12*sin(10*2*pi*t)-3*sin(.1*2*pi*t)+randn(1,numel(t));
x(1250:1255) = max(x);
peak_finder(x)
"""
x0 = np.asanyarray(x0)
if x0.ndim >= 2:
raise ValueError('The input data must be a 1D vector')
s = x0.size
if thresh is None:
thresh = (np.max(x0) - np.min(x0)) / 4
assert extrema in [-1, 1]
if extrema == -1:
x0 = extrema * x0 # Make it so we are finding maxima regardless
dx0 = np.diff(x0) # Find derivative
# This is so we find the first of repeated values
dx0[dx0 == 0] = -np.finfo(float).eps
# Find where the derivative changes sign
ind = np.where(dx0[:-1:] * dx0[1::] < 0)[0] + 1
# Include endpoints in potential peaks and valleys
x = np.concatenate((x0[:1], x0[ind], x0[-1:]))
ind = np.concatenate(([0], ind, [s - 1]))
# x only has the peaks, valleys, and endpoints
length = x.size
min_mag = np.min(x)
if length > 2: # Function with peaks and valleys
# Set initial parameters for loop
temp_mag = min_mag
found_peak = False
left_min = min_mag
# Deal with first point a little differently since tacked it on
# Calculate the sign of the derivative since we taked the first point
# on it does not necessarily alternate like the rest.
signDx = np.sign(np.diff(x[:3]))
if signDx[0] <= 0: # The first point is larger or equal to the second
ii = -1
if signDx[0] == signDx[1]: # Want alternating signs
x = np.concatenate((x[:1], x[2:]))
ind = np.concatenate((ind[:1], ind[2:]))
length -= 1
else: # First point is smaller than the second
ii = 0
if signDx[0] == signDx[1]: # Want alternating signs
x = x[1:]
ind = ind[1:]
length -= 1
# Preallocate max number of maxima
maxPeaks = int(ceil(length / 2.0))
peak_loc = np.zeros(maxPeaks, dtype=np.int)
peak_mag = np.zeros(maxPeaks)
c_ind = 0
# Loop through extrema which should be peaks and then valleys
while ii < (length - 1):
ii += 1 # This is a peak
# Reset peak finding if we had a peak and the next peak is bigger
# than the last or the left min was small enough to reset.
if found_peak and ((x[ii] > peak_mag[-1]) or
(left_min < peak_mag[-1] - thresh)):
temp_mag = min_mag
found_peak = False
# Make sure we don't iterate past the length of our vector
if ii == length - 1:
break # We assign the last point differently out of the loop
# Found new peak that was lager than temp mag and threshold larger
# than the minimum to its left.
if (x[ii] > temp_mag) and (x[ii] > left_min + thresh):
temp_loc = ii
temp_mag = x[ii]
ii += 1 # Move onto the valley
# Come down at least thresh from peak
if not found_peak and (temp_mag > (thresh + x[ii])):
found_peak = True # We have found a peak
left_min = x[ii]
peak_loc[c_ind] = temp_loc # Add peak to index
peak_mag[c_ind] = temp_mag
c_ind += 1
elif x[ii] < left_min: # New left minima
left_min = x[ii]
# Check end point
if (x[-1] > temp_mag) and (x[-1] > (left_min + thresh)):
peak_loc[c_ind] = length - 1
peak_mag[c_ind] = x[-1]
c_ind += 1
elif not found_peak and temp_mag > min_mag:
# Check if we still need to add the last point
peak_loc[c_ind] = temp_loc
peak_mag[c_ind] = temp_mag
c_ind += 1
# Create output
peak_inds = ind[peak_loc[:c_ind]]
peak_mags = peak_mag[:c_ind]
else: # This is a monotone function where an endpoint is the only peak
x_ind = np.argmax(x)
peak_mags = x[x_ind]
if peak_mags > (min_mag + thresh):
peak_inds = ind[x_ind]
else:
peak_mags = []
peak_inds = []
# Change sign of data if was finding minima
if extrema < 0:
peak_mags *= -1.0
x0 = -x0
# Plot if no output desired
if len(peak_inds) == 0:
logger.info('No significant peaks found')
return peak_inds, peak_mags
|
{"hexsha": "13e1441c608c18386c5c6da5502fe0d5dae1eb16", "size": 5645, "ext": "py", "lang": "Python", "max_stars_repo_path": "mne/preprocessing/peak_finder.py", "max_stars_repo_name": "faturita/mne-python", "max_stars_repo_head_hexsha": "2c8cac5cf618351503d8f39e23fee80a66892fee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mne/preprocessing/peak_finder.py", "max_issues_repo_name": "faturita/mne-python", "max_issues_repo_head_hexsha": "2c8cac5cf618351503d8f39e23fee80a66892fee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-08-14T06:21:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-29T19:54:56.000Z", "max_forks_repo_path": "mne/preprocessing/peak_finder.py", "max_forks_repo_name": "faturita/mne-python", "max_forks_repo_head_hexsha": "2c8cac5cf618351503d8f39e23fee80a66892fee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-05T16:14:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-05T16:14:37.000Z", "avg_line_length": 33.6011904762, "max_line_length": 78, "alphanum_fraction": 0.556421612, "include": true, "reason": "import numpy", "num_tokens": 1511}
|
from __future__ import print_function
from __future__ import division
from past.utils import old_div
import numpy as np
from proteus import Domain
from proteus.mprans import SpatialTools as st
from proteus.mbd import CouplingFSI as fsi
import pychrono as chrono
from proteus.TwoPhaseFlow import TwoPhaseFlowProblem as tpf
from proteus.TwoPhaseFlow.utils import Parameters
import os
rho_0 = 1000.
nu_0 = 1.004e-6
rho_1 = 1.205
nu_1 = 1.500e-5
sigma_01 = 0.0
g = [0., 0., -9.81]
he = 2.5
water_level = 2.5
# GEOMETRY
domain = Domain.PiecewiseLinearComplexDomain()
nd=3
tank_dim = [5.,5.,5.]
tank = st.Tank3D(domain, dim=tank_dim)
rect = st.Cuboid(domain, dim=[1.,1.,1.], coords=[old_div(tank_dim[0],2.),
old_div(tank_dim[1],2.),
old_div(tank_dim[2],2.)])
rect.setHoles(holes=np.array([rect.coords]))
domain.MeshOptions.he = he
# BOUNDARY CONDITIONS
tank.BC['x+'].setNoSlip()
tank.BC['x-'].setNoSlip()
tank.BC['y-'].setNoSlip()
tank.BC['y+'].setNoSlip()
tank.BC['z-'].setNoSlip()
tank.BC['z+'].setAtmosphere()
rect.BC['x+'].setNoSlip()
rect.BC['x-'].setNoSlip()
rect.BC['y+'].setNoSlip()
rect.BC['y-'].setNoSlip()
rect.BC['z+'].setNoSlip()
rect.BC['z-'].setNoSlip()
# CHRONO
system = fsi.ProtChSystem()
system.ChSystem.Set_G_acc(chrono.ChVectorD(g[0], g[1], 0.))
body = fsi.ProtChBody(system=system)
body.attachShape(rect)
body.ChBody.SetMass(500.)
body.ChBody.SetBodyFixed(True) # fixing body
# OTHER PARAMS
st.assembleDomain(domain)
domain.polyfile=domain.polyfile=os.path.dirname(os.path.abspath(__file__))+"/"+"mesh3D"
domain.MeshOptions.he = he
domain.MeshOptions.genMesh=False
#domain.writePoly("mesh3D")
# ___ _ _ _ _ ____ _ _ _ _
# |_ _|_ __ (_) |_(_) __ _| | / ___|___ _ __ __| (_) |_(_) ___ _ __ ___
# | || '_ \| | __| |/ _` | | | | / _ \| '_ \ / _` | | __| |/ _ \| '_ \/ __|
# | || | | | | |_| | (_| | | | |__| (_) | | | | (_| | | |_| | (_) | | | \__ \
# |___|_| |_|_|\__|_|\__,_|_| \____\___/|_| |_|\__,_|_|\__|_|\___/|_| |_|___/
# Initial Conditions
nd = domain.nd
class PerturbedSurface_p:
def uOfXT(self, x, t):
p_L = 0.0
phi = x[nd-1] - tank_dim[nd-1]
return p_L-g[nd-1]*(rho_0*phi)
class AtRest:
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
# _ _ _
# | \ | |_ _ _ __ ___ ___ _ __(_) ___ ___
# | \| | | | | '_ ` _ \ / _ \ '__| |/ __/ __|
# | |\ | |_| | | | | | | __/ | | | (__\__ \
# |_| \_|\__,_|_| |_| |_|\___|_| |_|\___|___/
# Numerics
myTpFlowProblem = tpf.TwoPhaseFlowProblem()
myTpFlowProblem.domain = domain
myTpFlowProblem.outputStepping.final_time = 0.002
myTpFlowProblem.outputStepping.dt_init = 0.001
myTpFlowProblem.outputStepping.dt_output = 0.001
myTpFlowProblem.outputStepping.dt_fixed = 0.001
myTpFlowProblem.SystemPhysics.setDefaults()
myTpFlowProblem.SystemNumerics.cfl = 0.4
myTpFlowProblem.SystemNumerics.useSuperlu=False
myTpFlowProblem.SystemPhysics.movingDomain = False
params = myTpFlowProblem.SystemPhysics
# PHYSICAL PARAMETERS
params.rho_0 = rho_0 # water
params.rho_1 = rho_1 # air
params.nu_0 = nu_0 # water
params.nu_1 = nu_1 # air
params.surf_tension_coeff = sigma_01
# MODEL PARAMETERS
m = params.modelDict
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelRANS2P,'flow')
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelAddedMass,'addedMass')
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['p']=PerturbedSurface_p()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['u']=AtRest()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['v']=AtRest()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['w']=AtRest()
myTpFlowProblem.SystemPhysics.modelDict['addedMass'].p.initialConditions['addedMass']=AtRest()
m['flow'].p.coefficients.useVF = 1.0
m['flow'].p.coefficients.NONCONSERVATIVE_FORM = 0.0
# auxiliary variables
m['flow'].auxiliaryVariables += [system]
m['addedMass'].auxiliaryVariables += [system.ProtChAddedMass]
flags_rigidbody = np.zeros(20)
for key in rect.boundaryTags_global:
flags_rigidbody[rect.boundaryTags_global[key]] = 1.
max_flag = 0
max_flag = max(domain.vertexFlags)
max_flag = max(domain.segmentFlags+[max_flag])
max_flag = max(domain.facetFlags+[max_flag])
flags_rigidbody = np.zeros(max_flag+1, dtype='int32')
for s in system.subcomponents:
if type(s) is fsi.ProtChBody:
for flag in s.boundaryFlags:
flags_rigidbody[flag] = 1
m['addedMass'].p.coefficients.flags_rigidbody = flags_rigidbody
|
{"hexsha": "ad616e58d630573958a2787edb7cc9b41b25850f", "size": 4673, "ext": "py", "lang": "Python", "max_stars_repo_path": "proteus/tests/AddedMass/addedmass3D.py", "max_stars_repo_name": "cekees/proteus", "max_stars_repo_head_hexsha": "11d8749e04f0950f090d1a406243539a868be642", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "proteus/tests/AddedMass/addedmass3D.py", "max_issues_repo_name": "cekees/proteus", "max_issues_repo_head_hexsha": "11d8749e04f0950f090d1a406243539a868be642", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-19T03:29:35.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-19T03:29:35.000Z", "max_forks_repo_path": "proteus/tests/AddedMass/addedmass3D.py", "max_forks_repo_name": "cekees/proteus", "max_forks_repo_head_hexsha": "11d8749e04f0950f090d1a406243539a868be642", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3441558442, "max_line_length": 94, "alphanum_fraction": 0.6764391183, "include": true, "reason": "import numpy", "num_tokens": 1489}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 14:21:10 2020
Copyright 2020 by Hadrien Montanelli.
"""
# %% Imports.
# Standard library imports:
import matplotlib.pyplot as plt
import numpy as np
# Learnpy imports:
from learnpy.misc import csv_to_array
from learnpy.timeseries import arp
# %% Examples.
# Test AR(1):
series = csv_to_array('../../datasets/time_series_ar1.csv')
plt.plot(series, '.-')
p = 1
alpha, beta = arp(series, p)
print([alpha, beta])
prediction = np.zeros(len(series))
prediction[0] = series[0]
for k in range(len(series)-1):
prediction[k+1] = alpha + beta[0]*series[k]
plt.plot(prediction, '.-')
# Test AR(2):
series = csv_to_array('../../datasets/time_series_ar2.csv')
plt.figure()
plt.plot(series, '.-')
p = 2
alpha, beta = arp(series, p)
print([alpha, beta])
prediction = np.zeros(len(series))
prediction[0] = series[0]
prediction[1] = series[1]
for k in range(len(series)-2):
prediction[k+2] = alpha + beta[0]*series[k+1] + beta[1]*series[k]
plt.plot(prediction, '.-')
|
{"hexsha": "90e4d58a7b0cd55609ba1ed1c9049850dabf310d", "size": 1035, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/timeseries/example_arp.py", "max_stars_repo_name": "Hadrien-Montanelli/learnpy", "max_stars_repo_head_hexsha": "b9fedb903cfe8c2fff8d7706667f17c51fb3a34f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-19T21:21:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-19T21:21:29.000Z", "max_issues_repo_path": "examples/timeseries/example_arp.py", "max_issues_repo_name": "Hadrien-Montanelli/learnpy", "max_issues_repo_head_hexsha": "b9fedb903cfe8c2fff8d7706667f17c51fb3a34f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2020-10-30T10:15:36.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-25T09:22:46.000Z", "max_forks_repo_path": "examples/timeseries/example_arp.py", "max_forks_repo_name": "Hadrien-Montanelli/learnpy", "max_forks_repo_head_hexsha": "b9fedb903cfe8c2fff8d7706667f17c51fb3a34f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5227272727, "max_line_length": 69, "alphanum_fraction": 0.677294686, "include": true, "reason": "import numpy", "num_tokens": 300}
|
import re
import math
import numpy as np
from collections import defaultdict
from nltk.corpus import wordnet as wn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
THRESHOLD = 0.45
class Similarity:
"""
Document Similarity Measure class implementing Soft Cosine Measure and
using WordNet's wup_similarity function for getting feature similarity
score.
"""
def __init__(self, tokens):
"""
Similarity class constructor.
`document` - list of documents to be analyzed.
`manip_tweet` - class object instance of manipulate tweet module.
"""
self.tfidf = TfidfVectorizer(tokenizer=lambda keys: tokens[keys])
self.matrix = self.tfidf.fit_transform(tokens.keys())
self._features = self.tfidf.get_feature_names()
self._synset_pairs = defaultdict(float)
self._synsets = {}
def similarity(self, M1=None, M2=None):
"""
Calculates similarity measure of each document matrix. Uses soft cosine
similarity measure to calculate document similarities.
"""
if M1 is None:
M1 = self.matrix
if M2 is None:
M2 = self.matrix
# Get the sum of consecutive integers for the size of the array
doc_sim = np.zeros([M1.shape[0], M2.shape[0]])
doc_pairs = defaultdict(float)
for i in range(M1.shape[0]):
for j in range(M2.shape[0]):
sorted_indices = tuple( sorted((i,j)) )
if sorted_indices in doc_pairs:
doc_sim[i, j] = doc_pairs[sorted_indices]
else:
if i == j:
doc_sim[i, j] = 1
else:
doc_sim[i, j] = self._soft_cosine_measure(
M1.getrow(i), M2.getrow(j))
doc_pairs[sorted_indices] = doc_sim[i, j]
return doc_sim
def cos_similarity(self, M1=None, M2=None):
'''
Cosine similarity measure of documents. For testing purposes.
'''
if M1 is None:
M1 = self.matrix
if M2 is None:
M2 = self.matrix
return cosine_similarity(M1, M2)
def _multiply_elements(self, v1, v2):
"""
Multiplies values between vector elements and similarity function scores.
"""
sum = 0
for i in range(v1.shape[0]):
for j in range(v1.shape[0]):
# Same terms has 1.0 similarity.
if self._features[i] == self._features[j]:
feature_score = 1
else:
feature_score = self._get_feature_score(self._features[i],
self._features[j])
if feature_score <= THRESHOLD:
feature_score = 0
sum += v1[i] * v2[j] * feature_score
return sum
def _soft_cosine_measure(self, v1, v2):
"""
Soft Cosine Similarity Measure
------------------------------
Traditional Cosine Similarity Measure that takes into account the
semantic similarities of each features in each documents.
"""
v1 = v1.toarray()[0]
v2 = v2.toarray()[0]
product = self._multiply_elements(v1, v2)
denom1 = math.sqrt(self._multiply_elements(v1, v1))
denom2 = math.sqrt(self._multiply_elements(v2, v2))
return product / (denom1 * denom2)
def _get_synsets(self, term1, term2):
"""
Gets best synsets of each term based on the highest path similarity
among all pairs compared; if the synset's pos() is not a noun or verb,
it gets its related nouns/forms.
"""
synset_list1 = wn.synsets(term1)
synset_list2 = wn.synsets(term2)
max_score = -1.0
if (len(synset_list1) == 0) or (len(synset_list2) == 0):
return None, None
else:
best_pair = [None, None]
for i in synset_list1.__iter__():
for j in synset_list2.__iter__():
score = 1.0 / (i.shortest_path_distance(j, True) + 1)
if score is not None and score > max_score:
max_score = score
best_pair = [i, j]
if (best_pair[0] is not None) and \
(best_pair[0].pos() not in ('n', 'v')):
best_pair[0] = self._get_related_nouns(best_pair[0])
if (best_pair[1] is not None) and \
(best_pair[1].pos() not in ('n', 'v')):
best_pair[1] = self._get_related_nouns(best_pair[1])
return tuple(best_pair)
def _get_related_nouns(self, synset):
"""
Gets derivationally related word forms as noun synsets of a given synset
to measure its similarity to other terms.
"""
related = None
lemmas = synset.lemmas()
if len(lemmas) > 0:
derived = lemmas[0].derivationally_related_forms()
if len(derived) > 0:
related = derived[0].synset()
return related
def _get_feature_score(self, term1, term2):
"""
If syn1 and syn2 are synsets, returns their similarity score. If they are
lists, gets all similarity scores of each element and returns the best
score.
"""
sorted_terms = tuple( sorted((term1, term2)) )
# Checks if synset pair had already been calculated.
if sorted_terms in self._synset_pairs:
return self._synset_pairs[tuple( sorted((term1, term2)) )]
# If a term contains a hashtag, it automatically does not contain
# synsets, thus, returning 0.
if any("#" in term for term in (term1, term2)):
return 0
# If a term has does not fully contains alpha characters, it has no
# synsets.
if any(re.search(r'^([a-zA-Z]+[-]?[a-zA-Z]+)$', term) is None for term in (term1, term2)):
return 0
# If the synset has already been captured. Checks the cache to get the
# synset of a term faster.
syn1 = self._synsets.get(term1)
syn2 = self._synsets.get(term2)
if all(syn is None for syn in (syn1, syn2)):
syn1, syn2 = self._get_synsets(term1, term2)
# If one/both synset/s is/are not found in WordNet. If it's not found, its
# value is None, otherwise, a Synset object.
if syn1 is None or syn2 is None:
if syn1 is not None:
self._synsets[term1] = syn1
if syn2 is not None:
self._synsets[term2] = syn2
return 0
score = wn.wup_similarity(syn1, syn2)
if score is None:
score = 0
self._synset_pairs[sorted_terms] = score
return score
if __name__ == '__main__':
tweets_data_path = "data/tweets_data.txt"
documents = ["Praise the fucking sun!",
"Daenerys is the mother of dragons.",
"Icarus flew too close to the sun.",
"Damn, Icarus got it tough, man.",
"Jon Fucking Snow fucked his aunt, Daenerys!",
"You're a wizard, Harry.",
"Hold the door, Hodor.",
"A quick brown fox jumps over the lazy dog."]
documents2 = ("The sky is blue. #Outdoors",
"The dog is playing.",#"The sun is bright.",
"The sun in the sky is bright.",
"We can see the shining sun, the bright sun. #Outdoors")
#documents_3 = mt.preprocess_tweet(tweets_data[:5])
sim = Similarity(documents_3)
print("Vocabulary:", sim.tfidf.vocabulary_)
print("Features:", sim._features)
print("Matrix shape:", sim.tfidf_matrix.shape)
print("TF-IDF:", sim.tfidf_matrix.todense())
#print("Soft Cosine Similarity of 1-n and 1-n documents:")
#print(similarity(tfidf_matrix, features))
|
{"hexsha": "4789fe3e12941a90a39645bf28f91bec48efa1c0", "size": 8075, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/calculate_similarity.py", "max_stars_repo_name": "santels/twitter_topic_detection", "max_stars_repo_head_hexsha": "543673a610dd69ff98120dd3141f9d9a9f5364ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-10-30T02:33:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-10T01:07:11.000Z", "max_issues_repo_path": "src/calculate_similarity.py", "max_issues_repo_name": "santels/twitter_topic_detection", "max_issues_repo_head_hexsha": "543673a610dd69ff98120dd3141f9d9a9f5364ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-12-10T09:56:55.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-18T14:30:57.000Z", "max_forks_repo_path": "src/calculate_similarity.py", "max_forks_repo_name": "santels/twitter_topic_detection", "max_forks_repo_head_hexsha": "543673a610dd69ff98120dd3141f9d9a9f5364ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-10-16T15:04:46.000Z", "max_forks_repo_forks_event_max_datetime": "2017-10-16T15:04:46.000Z", "avg_line_length": 35.8888888889, "max_line_length": 98, "alphanum_fraction": 0.5653250774, "include": true, "reason": "import numpy", "num_tokens": 1916}
|
[STATEMENT]
lemma con_compI [intro]:
assumes "composable t u" and "w \\ t \<frown> u"
shows "w \<frown> t \<cdot> u" and "t \<cdot> u \<frown> w"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w \<frown> t \<cdot> u &&& t \<cdot> u \<frown> w
[PROOF STEP]
using assms con_comp_iff con_sym
[PROOF STATE]
proof (prove)
using this:
composable t u
w \ t \<frown> u
(?w \<frown> ?t \<cdot> ?u) = (composable ?t ?u \<and> ?w \ ?t \<frown> ?u)
?t \<frown> ?u \<Longrightarrow> ?u \<frown> ?t
goal (1 subgoal):
1. w \<frown> t \<cdot> u &&& t \<cdot> u \<frown> w
[PROOF STEP]
by blast+
|
{"llama_tokens": 263, "file": "ResiduatedTransitionSystem_ResiduatedTransitionSystem", "length": 2}
|
! def_collection_file.f90 --
! Use a hypothetical extension to the Fortran syntax to make using templates
! easier
!
! Some practical difficulties:
! - a flexible-length string: how to do that? Similarly for any "compound" basic type
! - using "implicit none" in a template
!
template collection_generic
implicit none ! Useful? - should be eliminated
type, abstract :: collection
logical :: initialised = .false. ! State: not initialised, initialised (data), has filter, has next
logical :: has_filter = .false.
logical :: next_item = .false.
type(data_type) :: item
procedure(filter), pointer, nopass :: acceptable => null()
contains
procedure :: has_next => has_next_generic
procedure :: get => get_generic
procedure :: set_filter => set_filter_generic
procedure(get_next_item), deferred, pass :: get_next
end type collection
abstract interface
logical function filter( item )
import :: data_type
type(data_type), intent(in) :: item
end function filter
subroutine get_next_item( this, item, retrieved )
import :: collection, data_type
class(collection), intent(inout) :: this
type(data_type), intent(inout) :: item
logical, intent(out) :: retrieved
end subroutine
end interface
contains
! set_filter_generic --
! Register the filter function
!
! Arguments:
! this The collection object - holds the data and has knowledge of where we are
! filter_ptr Pointer to the function to be used as filter
!
subroutine set_filter_generic( this, filter )
class(collection), intent(inout) :: this
interface
logical function filter( item )
import :: data_type
type(data_type), intent(in) :: item
end function filter
end interface
this%acceptable => filter
end subroutine set_filter_generic
! has_next_generic --
! Check if there is another acceptable element in the collection
!
! Arguments:
! this The collection object - holds the data and has knowledge of where we are
!
! Result:
! True if there is such an element (this will have been retrieved!), fals otherwise.
!
logical function has_next_generic( this )
class(collection), intent(inout) :: this ! The container needs to store the element and update the state
type(data_type) :: item
logical :: success
has_next_generic = .false.
do
call this%get_next( item, success )
if ( .not. success ) then
this%next_item = .false.
exit
endif
if ( this%acceptable( item ) ) then
has_next_generic = .true.
this%next_item = .true.
this%item = item
exit
endif
enddo
end function has_next_generic
! get_generic --
! Get the next (acceptable) item
!
! Arguments:
! this The container object - holds the data and has knowledge of where we are
! item The item that is retrieved
! retrieved Indicates if an item was retrieved or not
!
! Note:
! This routine has to be used in conjunction with has_next()
!
subroutine get_generic( this, item, retrieved )
class(collection), intent(inout) :: this ! The container may have to update the state
type(data_type), intent(inout) :: item
logical, intent(out) :: retrieved
if ( this%initialised .and. this%next_item ) then
item = this%item
retrieved = .true.
else
retrieved = .false.
endif
end subroutine get_generic
end template collection_generic
template collection_file_def
implicit none
use_template collection_generic
type, extends(collection) :: collection_file
integer :: lun
contains
procedure :: create => create_file
procedure :: get_next => get_next_file
end type collection_file
contains
! create_file --
! Open the file that is to serve as the container
!
! Arguments:
! this The container object - holds the data and has knowledge of where we are
! skiplines Number of lines to be skipped
!
subroutine create_file( this, filename, skiplines )
class(collection_file), intent(inout) :: this
character(len=*), intent(in) :: filename
integer, intent(in), optional :: skiplines
integer :: i
open( newunit = this%lun, file = filename )
if ( present(skiplines) ) then
do i = 1,skiplines
read( this%lun, * )
enddo
endif
this%initialised = .true.
end subroutine create_file
! get_next_file --
! Get the next item - any line will do
!
! Arguments:
! this The container object - holds the data and has knowledge of where we are
! item The item that is retrieved
! retrieved Indicates if an item was retrieved or not
!
! Note:
! This routine is actually specific to the data type! So it must be specialised in
! accordance
!
subroutine get_next_file( this, item, retrieved )
class(collection_file), intent(inout) :: this ! The container may have to update the state
type(data_type), intent(inout) :: item
logical, intent(out) :: retrieved
character(len=100) :: line ! Rather arbitrary length, could use the flexible routine read_line_from_file
integer :: ierr
read( this%lun, '(a)', iostat = ierr ) line
if ( ierr == 0 ) then
retrieved = .true.
item = trim(line)
else
retrieved = .false.
endif
end subroutine get_next_file
end template collection_file_def
module m_collection_file
use basic_types, only: string_type, assignment(=)
use_template collection_file_def, string_type => data_type
private
public :: collection_file, string_type
end module m_collection_file
!
! Alternative: ordinary strings
!
module m_collection_file_string
use_template collection_file_def, character(len=:) => data_type ! This should have the "allocatable" attribute as appropriate
!
! Maybe this alternative:
! use_template collection_file_def, (character(len=:), allocatable) => data_type
private
public :: collection_file
end module m_collection_file_string
|
{"hexsha": "4979a3d8718afafdf478299c93395c0912a64766", "size": 7097, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "experiments/generics/def_collection_file.f90", "max_stars_repo_name": "timcera/flibs_from_svn", "max_stars_repo_head_hexsha": "7790369ac1f0ff6e35ef43546446b32446dccc6b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiments/generics/def_collection_file.f90", "max_issues_repo_name": "timcera/flibs_from_svn", "max_issues_repo_head_hexsha": "7790369ac1f0ff6e35ef43546446b32446dccc6b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/generics/def_collection_file.f90", "max_forks_repo_name": "timcera/flibs_from_svn", "max_forks_repo_head_hexsha": "7790369ac1f0ff6e35ef43546446b32446dccc6b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0093023256, "max_line_length": 142, "alphanum_fraction": 0.5840495984, "num_tokens": 1473}
|
#coding:utf8
'''
将embedding.txt 转成numpy矩阵
'''
import word2vec
import numpy as np
def main(em_file, em_result):
'''
embedding ->numpy
'''
em = word2vec.load(em_file)
vec = (em.vectors)
word2id = em.vocab_hash
# d = dict(vector = vec, word2id = word2id)
# t.save(d,em_result)
np.savez_compressed(em_result,vector=vec,word2id=word2id)
if __name__ == '__main__':
import fire
fire.Fire()
|
{"hexsha": "797ec03737b6473300d221af45cfef214422759a", "size": 431, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/data_process/embedding2matrix.py", "max_stars_repo_name": "phasorhand/PyTorchText", "max_stars_repo_head_hexsha": "dcadcba44af0c3731b82d9db1c77f2968d4feac0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1136, "max_stars_repo_stars_event_min_datetime": "2017-08-16T08:49:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T09:49:41.000Z", "max_issues_repo_path": "scripts/data_process/embedding2matrix.py", "max_issues_repo_name": "LydonL/PyTorchText", "max_issues_repo_head_hexsha": "dcadcba44af0c3731b82d9db1c77f2968d4feac0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2017-08-29T10:13:03.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-19T05:37:17.000Z", "max_forks_repo_path": "scripts/data_process/embedding2matrix.py", "max_forks_repo_name": "LydonL/PyTorchText", "max_forks_repo_head_hexsha": "dcadcba44af0c3731b82d9db1c77f2968d4feac0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 383, "max_forks_repo_forks_event_min_datetime": "2017-08-16T08:45:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T06:34:09.000Z", "avg_line_length": 17.9583333333, "max_line_length": 61, "alphanum_fraction": 0.6473317865, "include": true, "reason": "import numpy", "num_tokens": 138}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 14:44:17 2019
@author: mavro
"""
#%%
import numpy as np
import matplotlib.pyplot as plt
#%%
np.random.seed(0)
ploty=np.linspace(0,719,num=720)
quadratic_coeff=3e-4
leftx=np.array([200+(y**2)*quadratic_coeff\
+np.random.randint(-50,high=51)\
for y in ploty])
rightx=np.array([900+(y**2)*quadratic_coeff\
+np.random.randint(-50,high=51)\
for y in ploty])
leftx=leftx[::-1]
rightx=rightx[::-1]
left_fit=np.polyfit(ploty,leftx,2)
left_fitx=left_fit[0]*ploty**2+left_fit[1]*ploty+left_fit[2]
right_fit=np.polyfit(ploty,rightx,2)
right_fitx=right_fit[0]*ploty**2+right_fit[1]*ploty+right_fit[2]
mark_size=3
plt.plot(leftx, ploty,'o', color='red', markersize=mark_size)
plt.plot(rightx, ploty,'o', color='blue', markersize=mark_size)
plt.xlim(0,1280)
plt.ylim(0,720)
plt.plot(left_fitx,ploty, color='green', linewidth=3)
plt.plot(right_fitx,ploty, color='green', linewidth=3)
plt.gca().invert_yaxis()
#%%
y_eval=np.max(ploty)
left_curverad=((1+(2*left_fit[0]*y_eval+left_fit[1])**2)**1.5)/np.absolute(2*left_fit[0])
right_curverad=((1+(2*right_fit[0]*y_eval+right_fit[1])**2)**1.5)/np.absolute(2*right_fit[0])
print ('left_curverad: ',left_curverad, 'right_curverad: ',right_curverad)
#%%
|
{"hexsha": "8e70c7c8ef945788b0692bc29944851aeecb87d1", "size": 1340, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/fake_data.py", "max_stars_repo_name": "mavro10600/CarND-Advanced-Lane-Lines", "max_stars_repo_head_hexsha": "8713256075c0550974af6722187b0b6dec572f8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/fake_data.py", "max_issues_repo_name": "mavro10600/CarND-Advanced-Lane-Lines", "max_issues_repo_head_hexsha": "8713256075c0550974af6722187b0b6dec572f8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/fake_data.py", "max_forks_repo_name": "mavro10600/CarND-Advanced-Lane-Lines", "max_forks_repo_head_hexsha": "8713256075c0550974af6722187b0b6dec572f8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3333333333, "max_line_length": 93, "alphanum_fraction": 0.6746268657, "include": true, "reason": "import numpy", "num_tokens": 455}
|
#include <boost/spirit/repository/home/qi/nonterminal/subrule.hpp>
|
{"hexsha": "2c0e3f9b742df61cec1963e5b8c3aaa37b5042e5", "size": 67, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_spirit_repository_home_qi_nonterminal_subrule.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_spirit_repository_home_qi_nonterminal_subrule.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_spirit_repository_home_qi_nonterminal_subrule.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 33.5, "max_line_length": 66, "alphanum_fraction": 0.8208955224, "num_tokens": 15}
|
/-
Copyright (c) 2019 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn
! This file was ported from Lean 3 source module data.real.cardinality
! leanprover-community/mathlib commit 7e7aaccf9b0182576cabdde36cf1b5ad3585b70d
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Analysis.SpecificLimits.Basic
import Mathbin.Data.Rat.Denumerable
import Mathbin.Data.Set.Pointwise.Interval
import Mathbin.SetTheory.Cardinal.Continuum
/-!
# The cardinality of the reals
This file shows that the real numbers have cardinality continuum, i.e. `#ℝ = 𝔠`.
We show that `#ℝ ≤ 𝔠` by noting that every real number is determined by a Cauchy-sequence of the
form `ℕ → ℚ`, which has cardinality `𝔠`. To show that `#ℝ ≥ 𝔠` we define an injection from
`{0, 1} ^ ℕ` to `ℝ` with `f ↦ Σ n, f n * (1 / 3) ^ n`.
We conclude that all intervals with distinct endpoints have cardinality continuum.
## Main definitions
* `cardinal.cantor_function` is the function that sends `f` in `{0, 1} ^ ℕ` to `ℝ` by
`f ↦ Σ' n, f n * (1 / 3) ^ n`
## Main statements
* `cardinal.mk_real : #ℝ = 𝔠`: the reals have cardinality continuum.
* `cardinal.not_countable_real`: the universal set of real numbers is not countable.
We can use this same proof to show that all the other sets in this file are not countable.
* 8 lemmas of the form `mk_Ixy_real` for `x,y ∈ {i,o,c}` state that intervals on the reals
have cardinality continuum.
## Notation
* `𝔠` : notation for `cardinal.continuum` in locale `cardinal`, defined in `set_theory.continuum`.
## Tags
continuum, cardinality, reals, cardinality of the reals
-/
open Nat Set
open Cardinal
noncomputable section
namespace Cardinal
variable {c : ℝ} {f g : ℕ → Bool} {n : ℕ}
/-- The body of the sum in `cantor_function`.
`cantor_function_aux c f n = c ^ n` if `f n = tt`;
`cantor_function_aux c f n = 0` if `f n = ff`. -/
def cantorFunctionAux (c : ℝ) (f : ℕ → Bool) (n : ℕ) : ℝ :=
cond (f n) (c ^ n) 0
#align cardinal.cantor_function_aux Cardinal.cantorFunctionAux
@[simp]
theorem cantorFunctionAux_true (h : f n = true) : cantorFunctionAux c f n = c ^ n := by
simp [cantor_function_aux, h]
#align cardinal.cantor_function_aux_tt Cardinal.cantorFunctionAux_true
@[simp]
theorem cantorFunctionAux_false (h : f n = false) : cantorFunctionAux c f n = 0 := by
simp [cantor_function_aux, h]
#align cardinal.cantor_function_aux_ff Cardinal.cantorFunctionAux_false
theorem cantorFunctionAux_nonneg (h : 0 ≤ c) : 0 ≤ cantorFunctionAux c f n :=
by
cases h' : f n <;> simp [h']
apply pow_nonneg h
#align cardinal.cantor_function_aux_nonneg Cardinal.cantorFunctionAux_nonneg
theorem cantorFunctionAux_eq (h : f n = g n) : cantorFunctionAux c f n = cantorFunctionAux c g n :=
by simp [cantor_function_aux, h]
#align cardinal.cantor_function_aux_eq Cardinal.cantorFunctionAux_eq
theorem cantorFunctionAux_zero (f : ℕ → Bool) : cantorFunctionAux c f 0 = cond (f 0) 1 0 := by
cases h : f 0 <;> simp [h]
#align cardinal.cantor_function_aux_zero Cardinal.cantorFunctionAux_zero
theorem cantorFunctionAux_succ (f : ℕ → Bool) :
(fun n => cantorFunctionAux c f (n + 1)) = fun n =>
c * cantorFunctionAux c (fun n => f (n + 1)) n :=
by
ext n
cases h : f (n + 1) <;> simp [h, pow_succ]
#align cardinal.cantor_function_aux_succ Cardinal.cantorFunctionAux_succ
theorem summable_cantor_function (f : ℕ → Bool) (h1 : 0 ≤ c) (h2 : c < 1) :
Summable (cantorFunctionAux c f) :=
by
apply (summable_geometric_of_lt_1 h1 h2).summable_of_eq_zero_or_self
intro n; cases h : f n <;> simp [h]
#align cardinal.summable_cantor_function Cardinal.summable_cantor_function
/-- `cantor_function c (f : ℕ → bool)` is `Σ n, f n * c ^ n`, where `tt` is interpreted as `1` and
`ff` is interpreted as `0`. It is implemented using `cantor_function_aux`. -/
def cantorFunction (c : ℝ) (f : ℕ → Bool) : ℝ :=
∑' n, cantorFunctionAux c f n
#align cardinal.cantor_function Cardinal.cantorFunction
theorem cantorFunction_le (h1 : 0 ≤ c) (h2 : c < 1) (h3 : ∀ n, f n → g n) :
cantorFunction c f ≤ cantorFunction c g :=
by
apply tsum_le_tsum _ (summable_cantor_function f h1 h2) (summable_cantor_function g h1 h2)
intro n; cases h : f n; simp [h, cantor_function_aux_nonneg h1]
replace h3 : g n = tt := h3 n h; simp [h, h3]
#align cardinal.cantor_function_le Cardinal.cantorFunction_le
theorem cantorFunction_succ (f : ℕ → Bool) (h1 : 0 ≤ c) (h2 : c < 1) :
cantorFunction c f = cond (f 0) 1 0 + c * cantorFunction c fun n => f (n + 1) :=
by
rw [cantor_function, tsum_eq_zero_add (summable_cantor_function f h1 h2)]
rw [cantor_function_aux_succ, tsum_mul_left, cantor_function_aux, pow_zero]
rfl
#align cardinal.cantor_function_succ Cardinal.cantorFunction_succ
/-- `cantor_function c` is strictly increasing with if `0 < c < 1/2`, if we endow `ℕ → bool` with a
lexicographic order. The lexicographic order doesn't exist for these infinitary products, so we
explicitly write out what it means. -/
theorem increasing_cantorFunction (h1 : 0 < c) (h2 : c < 1 / 2) {n : ℕ} {f g : ℕ → Bool}
(hn : ∀ k < n, f k = g k) (fn : f n = false) (gn : g n = true) :
cantorFunction c f < cantorFunction c g :=
by
have h3 : c < 1 := by
apply h2.trans
norm_num
induction' n with n ih generalizing f g
· let f_max : ℕ → Bool := fun n => Nat.rec ff (fun _ _ => tt) n
have hf_max : ∀ n, f n → f_max n := by
intro n hn
cases n
rw [fn] at hn
contradiction
apply rfl
let g_min : ℕ → Bool := fun n => Nat.rec tt (fun _ _ => ff) n
have hg_min : ∀ n, g_min n → g n := by
intro n hn
cases n
rw [gn]
apply rfl
contradiction
apply (cantor_function_le (le_of_lt h1) h3 hf_max).trans_lt
refine' lt_of_lt_of_le _ (cantor_function_le (le_of_lt h1) h3 hg_min)
have : c / (1 - c) < 1 := by
rw [div_lt_one, lt_sub_iff_add_lt]
· convert add_lt_add h2 h2
norm_num
rwa [sub_pos]
convert this
· rw [cantor_function_succ _ (le_of_lt h1) h3, div_eq_mul_inv, ←
tsum_geometric_of_lt_1 (le_of_lt h1) h3]
apply zero_add
· refine' (tsum_eq_single 0 _).trans _
· intro n hn
cases n
contradiction
rfl
· exact cantor_function_aux_zero _
rw [cantor_function_succ f (le_of_lt h1) h3, cantor_function_succ g (le_of_lt h1) h3]
rw [hn 0 <| zero_lt_succ n]
apply add_lt_add_left
rw [mul_lt_mul_left h1]
exact ih (fun k hk => hn _ <| Nat.succ_lt_succ hk) fn gn
#align cardinal.increasing_cantor_function Cardinal.increasing_cantorFunction
/-- `cantor_function c` is injective if `0 < c < 1/2`. -/
theorem cantorFunction_injective (h1 : 0 < c) (h2 : c < 1 / 2) :
Function.Injective (cantorFunction c) :=
by
intro f g hfg
classical
by_contra h
revert hfg
have : ∃ n, f n ≠ g n := by
rw [← not_forall]
intro h'
apply h
ext
apply h'
let n := Nat.find this
have hn : ∀ k : ℕ, k < n → f k = g k := by
intro k hk
apply of_not_not
exact Nat.find_min this hk
cases fn : f n
· apply ne_of_lt
refine' increasing_cantor_function h1 h2 hn fn _
apply Bool.eq_true_of_not_eq_false
rw [← fn]
apply Ne.symm
exact Nat.find_spec this
· apply ne_of_gt
refine' increasing_cantor_function h1 h2 (fun k hk => (hn k hk).symm) _ fn
apply Bool.eq_false_of_not_eq_true
rw [← fn]
apply Ne.symm
exact Nat.find_spec this
#align cardinal.cantor_function_injective Cardinal.cantorFunction_injective
/-- The cardinality of the reals, as a type. -/
theorem mk_real : (#ℝ) = 𝔠 := by
apply le_antisymm
· rw [real.equiv_Cauchy.cardinal_eq]
apply mk_quotient_le.trans
apply (mk_subtype_le _).trans_eq
rw [← power_def, mk_nat, mk_rat, aleph_0_power_aleph_0]
· convert mk_le_of_injective (cantor_function_injective _ _)
rw [← power_def, mk_bool, mk_nat, two_power_aleph_0]
exact 1 / 3
norm_num
norm_num
#align cardinal.mk_real Cardinal.mk_real
/-- The cardinality of the reals, as a set. -/
theorem mk_univ_real : (#(Set.univ : Set ℝ)) = 𝔠 := by rw [mk_univ, mk_real]
#align cardinal.mk_univ_real Cardinal.mk_univ_real
/-- **Non-Denumerability of the Continuum**: The reals are not countable. -/
theorem not_countable_real : ¬(Set.univ : Set ℝ).Countable :=
by
rw [← le_aleph_0_iff_set_countable, not_le, mk_univ_real]
apply cantor
#align cardinal.not_countable_real Cardinal.not_countable_real
/-- The cardinality of the interval (a, ∞). -/
theorem mk_Ioi_real (a : ℝ) : (#Ioi a) = 𝔠 :=
by
refine' le_antisymm (mk_real ▸ mk_set_le _) _
rw [← not_lt]
intro h
refine' ne_of_lt _ mk_univ_real
have hu : Iio a ∪ {a} ∪ Ioi a = Set.univ :=
by
convert Iic_union_Ioi
exact Iio_union_right
rw [← hu]
refine' lt_of_le_of_lt (mk_union_le _ _) _
refine' lt_of_le_of_lt (add_le_add_right (mk_union_le _ _) _) _
have h2 : (fun x => a + a - x) '' Ioi a = Iio a :=
by
convert image_const_sub_Ioi _ _
simp
rw [← h2]
refine' add_lt_of_lt (cantor _).le _ h
refine' add_lt_of_lt (cantor _).le (mk_image_le.trans_lt h) _
rw [mk_singleton]
exact one_lt_aleph_0.trans (cantor _)
#align cardinal.mk_Ioi_real Cardinal.mk_Ioi_real
/-- The cardinality of the interval [a, ∞). -/
theorem mk_Ici_real (a : ℝ) : (#Ici a) = 𝔠 :=
le_antisymm (mk_real ▸ mk_set_le _) (mk_Ioi_real a ▸ mk_le_mk_of_subset Ioi_subset_Ici_self)
#align cardinal.mk_Ici_real Cardinal.mk_Ici_real
/-- The cardinality of the interval (-∞, a). -/
theorem mk_Iio_real (a : ℝ) : (#Iio a) = 𝔠 :=
by
refine' le_antisymm (mk_real ▸ mk_set_le _) _
have h2 : (fun x => a + a - x) '' Iio a = Ioi a :=
by
convert image_const_sub_Iio _ _
simp
exact mk_Ioi_real a ▸ h2 ▸ mk_image_le
#align cardinal.mk_Iio_real Cardinal.mk_Iio_real
/-- The cardinality of the interval (-∞, a]. -/
theorem mk_Iic_real (a : ℝ) : (#Iic a) = 𝔠 :=
le_antisymm (mk_real ▸ mk_set_le _) (mk_Iio_real a ▸ mk_le_mk_of_subset Iio_subset_Iic_self)
#align cardinal.mk_Iic_real Cardinal.mk_Iic_real
/-- The cardinality of the interval (a, b). -/
theorem mk_Ioo_real {a b : ℝ} (h : a < b) : (#Ioo a b) = 𝔠 :=
by
refine' le_antisymm (mk_real ▸ mk_set_le _) _
have h1 : (#(fun x => x - a) '' Ioo a b) ≤ (#Ioo a b) := mk_image_le
refine' le_trans _ h1
rw [image_sub_const_Ioo, sub_self]
replace h := sub_pos_of_lt h
have h2 : (#Inv.inv '' Ioo 0 (b - a)) ≤ (#Ioo 0 (b - a)) := mk_image_le
refine' le_trans _ h2
rw [image_inv, inv_Ioo_0_left h, mk_Ioi_real]
#align cardinal.mk_Ioo_real Cardinal.mk_Ioo_real
/-- The cardinality of the interval [a, b). -/
theorem mk_Ico_real {a b : ℝ} (h : a < b) : (#Ico a b) = 𝔠 :=
le_antisymm (mk_real ▸ mk_set_le _) (mk_Ioo_real h ▸ mk_le_mk_of_subset Ioo_subset_Ico_self)
#align cardinal.mk_Ico_real Cardinal.mk_Ico_real
/-- The cardinality of the interval [a, b]. -/
theorem mk_Icc_real {a b : ℝ} (h : a < b) : (#Icc a b) = 𝔠 :=
le_antisymm (mk_real ▸ mk_set_le _) (mk_Ioo_real h ▸ mk_le_mk_of_subset Ioo_subset_Icc_self)
#align cardinal.mk_Icc_real Cardinal.mk_Icc_real
/-- The cardinality of the interval (a, b]. -/
theorem mk_Ioc_real {a b : ℝ} (h : a < b) : (#Ioc a b) = 𝔠 :=
le_antisymm (mk_real ▸ mk_set_le _) (mk_Ioo_real h ▸ mk_le_mk_of_subset Ioo_subset_Ioc_self)
#align cardinal.mk_Ioc_real Cardinal.mk_Ioc_real
end Cardinal
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/Data/Real/Cardinality.lean"}
|
//---------------------------------------------------------------------------//
// Copyright (c) 2018-2020 Mikhail Komarov <nemo@nil.foundation>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//---------------------------------------------------------------------------//
#ifndef CRYPTO3_BLOCK_AES_HPP
#define CRYPTO3_BLOCK_AES_HPP
#include <boost/crypto3/block/rijndael.hpp>
namespace boost {
namespace crypto3 {
namespace block {
/*!
* @brief AES block cipher. Equals to Rijndael block cipher with 128 bit block length.
*/
template<std::size_t KeyBits>
using aes = rijndael<KeyBits, 128>;
} // namespace block
} // namespace crypto3
} // namespace boost
#endif // CRYPTO3_AES_HPP
|
{"hexsha": "2ea16ea82ff1372d9df769936bd418e6918b2d4e", "size": 897, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/crypto3/block/aes.hpp", "max_stars_repo_name": "NilFoundation/boost-crypto", "max_stars_repo_head_hexsha": "a3e599b780bbbbc063b7c8da0e498125769e08be", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2020-09-02T06:19:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-07T04:55:03.000Z", "max_issues_repo_path": "include/boost/crypto3/block/aes.hpp", "max_issues_repo_name": "NilFoundation/boost-crypto", "max_issues_repo_head_hexsha": "a3e599b780bbbbc063b7c8da0e498125769e08be", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2020-04-06T21:49:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-18T04:54:51.000Z", "max_forks_repo_path": "include/boost/crypto3/block/aes.hpp", "max_forks_repo_name": "NilFoundation/boost-crypto", "max_forks_repo_head_hexsha": "a3e599b780bbbbc063b7c8da0e498125769e08be", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-02-13T21:14:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T21:14:37.000Z", "avg_line_length": 33.2222222222, "max_line_length": 98, "alphanum_fraction": 0.5362318841, "num_tokens": 188}
|
import numpy as np
import nibabel as nib
import pandas as pd
import os
import tensorflow as tf
import math
from neuron.layers import SpatialTransformer
from multi_affine.datagenerators import indicator, give_index_atlas
from multi_affine.utils import load_multi_atlas
from pandas import ExcelWriter
def eval_affine(data_file, ev_file, atlas_dir, warp_dir, seg_dir, prepro_dir, atlas_list, subset, M, save_seg, print_output=True):
"""
Calculates:
1. the dice overlap score between ground truth segmentations
(in original space) and segmentations obtained after inverse affine
transformation for a given file.
2. the embryonic volume error between found EV in original space and EV measured in VR.
Args:
data_file: excel file containing images for which we will calculate the Dice and EV error (if avalaible)
ev_file: excel file containing all the EV_VR
atlas_dir: directory where eligble atlas files can be found
warp_dir: directory where used affine transformations are saved
seg_dir: directory containing ground truth segmentations
prepro_dir: directory containing the prepro .npz files
subset: set of the data for which we calculate Dice and EV error
atlas_list: list of elibigle atlases
M: number of eligible atlases used for training, if M>1 we use majority voting
save_seg: if segmentations are saved
Returns:
Dice_affine: List with dice scores per image, orderd as data_file
EV_affine: List with EV error per image, ordered as the data_file
Updated data file
"""
# load needed files, initiate arrays for results
data = pd.read_excel(data_file)
data = data.iloc[:,0:4]
ev = pd.read_excel(ev_file)
Dice_affine = []
EV_affine = []
EV_GT = []
EV_AI = []
# load ga + segs atlases
atlasses, segs, Age, atlas_files, A_t, A_b = load_multi_atlas(atlas_dir, atlas_list, False, True, True)
# get list of all available gt segmentations
aval_segs = os.listdir(seg_dir)
# calculate measures for every image registered
for i in range(len(data)):
#get indicator + weeknr to select right atlas for comparision
indi = indicator(data.iat[i,2],M,Age, atlas_files)
idx = give_index_atlas(indi)
week = np.floor(data.iat[i,2]/7)
file_name = os.path.basename(data.iat[i,0])
# load the learned affine transformation + apply inverse to obtain segmentation in original image
T = np.load(warp_dir+'/'+file_name.split('.nii')[0]+'_warp_affine.npy')
for j in range(M):
seg = apply_affine_inv(T, segs[idx[j],:,:,:,0], seg=True)
if j == 0:
Seg_affine_inv = seg[np.newaxis, ...]
else:
Seg_affine_inv = np.concatenate((Seg_affine_inv, seg[np.newaxis, ...]),axis=0)
if M>1:
Seg_affine = majority_voting(Seg_affine_inv)
else:
Seg_affine = Seg_affine_inv[0,:,:,:]
if save_seg == True:
new_nifti = nib.Nifti1Image(Seg_affine, np.identity(4))
nib.save(new_nifti, data_file.split('/outcome')[0] + '/seg_' + file_name.split('.nii')[0]+'_moved_nonrigid_'+atlas_files[ j].split(atlas_dir +'/')[1].split('.npz')[0]+'.nii.gz')
file_seg_orig = data.iat[i,0].split('/'+subset+'/')[1].split('-')[0].split('.nii')[0]+'_seg.nii.gz'
# if the gt is available, calculate the DICE score, note dice GT is in the space after preprocessing.
if file_seg_orig in aval_segs:
Seg_orig = nib.load(seg_dir+'/'+file_seg_orig)
Seg_orig = Seg_orig.get_fdata()
dice = calculate_dice(Seg_orig, Seg_affine)
Dice_affine.append(dice)
else:
Dice_affine.append(np.nan)
Dice = [data.iat[i,0]]
if M>1:
if file_seg_orig in aval_segs:
Seg_orig = nib.load(seg_dir+'/'+file_seg_orig)
Seg_orig = Seg_orig.get_fdata()
m = 0
for j in range(len(atlas_files)):
if indi[j] == 1:
seg = Seg_affine_inv[m,:,:,:]
m+=1
dice = calculate_dice(Seg_orig, seg)
Dice.append(dice)
else:
Dice.append(np.nan)
print(m)
assert(m==M)
else:
for j in range(len(atlas_files)):
Dice.append(np.nan)
# if the EV is available, calculate the embryonic volume error, EV_VR is measured in original image, hence we have to compensate for our preprocessing.
df = ev.loc[ev['ID'] == int(data.iat[i,1])]
if len(df)>0:
found = False
for j in range(len(df)):
if np.floor(df.iat[j,1]/7) == week:
EV_VR = df.iat[j,2]
found = True
if found == False:
EV_affine.append(np.nan)
EV_GT.append(np.nan)
EV_AI.append(np.nan)
else:
vox_size = nib.load(data.iat[i,0]).header['pixdim'][1]
factor = np.load(prepro_dir+'/' + data.iat[i,0].split('/'+subset+'/')[1].split('.nii')[0]+'_preprocess.npz')['zoom_factor']
ev_error, ev_AI = calculate_ev_error(Seg_affine, EV_VR, vox_size, factor)
EV_affine.append(ev_error)
EV_GT.append(EV_VR)
EV_AI.append(ev_AI)
else:
EV_affine.append(np.nan)
EV_GT.append(np.nan)
EV_AI.append(np.nan)
EV = [data.iat[i,0]]
if M>1 and found == True:
m=0
for j in range(len(atlas_files)):
if indi[j] == 1:
seg = Seg_affine_inv[m,:,:,:]
m+=1
ev_error = calculate_ev_error(seg, EV_VR, vox_size, factor)
EV.append(ev_error)
else:
EV.append(np.nan)
assert(m==M)
else:
for j in range(len(atlas_files)):
EV.append(np.nan)
data['Dice'] = Dice_affine
data['Ev'] = EV_affine
data['Ev_GT'] = EV_GT
data['Ev_AI'] = EV_AI
writer = ExcelWriter(data_file)
data.to_excel(writer, index=False)
writer.save()
summarize_results(data,data_file.split('/outcome')[0])
return Dice_affine, EV_affine
def apply_affine_inv(T, img, seg=False):
"""
Applies the inverse affine transformation of transformation T to image img.
Note that img can be a segmentation.
Args:
T: affine transformation, output of Voxelmorph, size (1,12)
img: image to be transformed
seg: true if image is a segmentation, then output image is a segmenation
Returns:
Y: image/segmentation after applying the inverse affine transform of T
"""
tf.enable_eager_execution()
assert(T.shape == (1,12))
TT = np.zeros((4,4))
TT[0,:] = T[0,0:4]
TT[1,:] = T[0,4:8]
TT[2,:] = T[0,8:]
TT += np.identity(4)
T_inv = np.linalg.inv(TT)
T_inv += -np.identity(4)
TT_inv = np.reshape(np.concatenate([T_inv[0,0:4],T_inv[1,0:4],T_inv[2,0:4]]),[1,12])
Y = SpatialTransformer(interp_method='linear', indexing='ij')([tf.cast(img[np.newaxis,...,np.newaxis],tf.float32),tf.cast(TT_inv,tf.float32)])
Y = Y.numpy()
Y = Y[0,:,:,:,0]
if seg == True:
Y[Y>=0.5] = 1
Y[Y<0.5] = 0
return Y
def calculate_dice(seg1, seg2):
"""
Calculate DICE overlap score between seg1 and seg2.
Args:
seg1: ground truth segmentation.
seg2: segmentation to compare with seg1
Returns:
dice overlap score
"""
seg1 = np.asarray(seg1).astype(np.bool)
seg2 = np.asarray(seg2).astype(np.bool)
# Compute Dice coefficient
intersection = np.logical_and(seg1, seg2)
dice = 2. * intersection.sum() / (seg1.sum() + seg2.sum())
return dice
def calculate_ev_error(Seg_affine_inv, EV_VR, vox_size, factor):
"""
Function to calculate the error in embryonic volume between found seg_affine_inv en EV_VR (gt measured in VR).
Args:
Seg_affine_inv: found segmentation in original space
EV_VR: gt measured in VR
vox_size: voxel size of original image
factor: zooming factor used during preprocessing
Returns:
Relative error in embryonic volume
"""
vox_size = vox_size*4*(1/factor)
EV_AI = vox_size*vox_size*vox_size*np.sum(Seg_affine_inv)*(1/1000)
EV_AI=np.array(EV_AI)
EV_VR=np.array(EV_VR)
return np.abs(((EV_AI-EV_VR)/EV_VR)), EV_AI
def summarize_results(dataframe, save_dir):
"""
Function to create .txt file containing a summary of the results.
Args:
dataframe: the dataframe with performance measures per image
save_dir: directory to save the .txt file
Returns:
.txt file giving the mean landmark error, EV error, Dice score, +/- std
Note: mean landmark error is in mm: voxelsize in atlas space 0.62 mm
"""
mean_dice = dataframe['Dice'].mean()
std_dice = dataframe['Dice'].std()
mean_ev = dataframe['Ev'].mean()
std_ev = dataframe['Ev'].std()
mean_le = dataframe[3].mean()*0.62
std_le = dataframe[3].std()*0.62
text_file = open(save_dir + '/summary_results.txt', 'w+')
text_file.write('landmark error: ' +str(mean_le) +'+/-' +str(std_le)+'\n')
text_file.write('EV error: ' +str(mean_ev) +'+/-' +str(std_ev)+'\n')
text_file.write('Dice: ' +str(mean_dice) +'+/-' +str(std_dice)+'\n')
def majority_voting(seg):
"""
Function that gives segmentation that is results of majority voting of the N segmentations in seg.
Args:
seg: Nxdims numpy array with resulting segmentations
Returns:
major_seg: resulting segmentation after majority voting
"""
majority_limit = seg.shape[0]/2
votes = np.sum(seg, axis=0)
votes[votes <= majority_limit] = 0
votes[votes > majority_limit] = 1
major_seg = votes
return major_seg
|
{"hexsha": "fa0359d25a66b8abe356c2a96b8847d63149c7db", "size": 10369, "ext": "py", "lang": "Python", "max_stars_repo_path": "multi_affine/eval.py", "max_stars_repo_name": "wapbastiaansen/multi-atlas-seg-reg", "max_stars_repo_head_hexsha": "6d406fcabf24aa4393e602dc4bb947c670731da9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "multi_affine/eval.py", "max_issues_repo_name": "wapbastiaansen/multi-atlas-seg-reg", "max_issues_repo_head_hexsha": "6d406fcabf24aa4393e602dc4bb947c670731da9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "multi_affine/eval.py", "max_forks_repo_name": "wapbastiaansen/multi-atlas-seg-reg", "max_forks_repo_head_hexsha": "6d406fcabf24aa4393e602dc4bb947c670731da9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4485049834, "max_line_length": 192, "alphanum_fraction": 0.5922461182, "include": true, "reason": "import numpy", "num_tokens": 2614}
|
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
#include <boost/test/unit_test.hpp>
#include "Common/boost-taef.h"
#include "Query.h"
#include "Common/Common.h"
using namespace std;
using namespace Common;
using namespace Query;
const StringLiteral TraceType("QueryAddressTest");
class QueryAddressTest
{
protected:
static void TestQueryAddress(
wstring const & addressString,
wstring const & targetedSegment,
wstring const & expectedSegmentAddress,
wstring const & expectedSegmentAddressMetadata,
ErrorCodeValue::Enum const & expectedError = ErrorCodeValue::Success);
};
BOOST_FIXTURE_TEST_SUITE(QueryAddressTestSuite,QueryAddressTest)
BOOST_AUTO_TEST_CASE(GatewayDestnationAddress)
{
TestQueryAddress(L"/", L"/", L".", L"");
}
BOOST_AUTO_TEST_CASE(NonGatewayDestinationSegment)
{
TestQueryAddress(L"/Segment1/Segment2[Metadata]", L"Segment2", L".", L"");
}
BOOST_AUTO_TEST_CASE(AddressWithoutMetadata)
{
TestQueryAddress(L"/Segment1/Segment2", L"Segment1", L"Segment2", L"");
}
BOOST_AUTO_TEST_CASE(AddressWithMetadata)
{
TestQueryAddress(L"/Test[Metadata]", L"/", L"Test", L"Metadata");
}
BOOST_AUTO_TEST_CASE(NonForwardableAddress)
{
TestQueryAddress(L"/Segment1/Segment2", L"Segment3", L"", L"", ErrorCodeValue::InvalidAddress);
}
BOOST_AUTO_TEST_SUITE_END()
void QueryAddressTest::TestQueryAddress(
wstring const & addressString,
wstring const & targetedSegment,
wstring const & expectedSegmentAddress,
wstring const & expectedSegmentAddressMetadata,
ErrorCodeValue::Enum const &expectedErrorFromSegmentParsing)
{
QueryAddress address(addressString);
wstring nextSegment, nextSegmentMetadata;
auto error = address.GetNextSegmentTo(targetedSegment, nextSegment, nextSegmentMetadata);
VERIFY_IS_TRUE(
error.IsError(expectedErrorFromSegmentParsing),
wformatString("GetNextSegmentTo returned {0}, and the expected error is {1}", error, expectedErrorFromSegmentParsing).c_str());
if (error.IsSuccess())
{
VERIFY_IS_TRUE(
nextSegment == expectedSegmentAddress,
wformatString("Next segment address did not match. Expected = {0}, Found = {1}", expectedSegmentAddress, nextSegment).c_str());
VERIFY_IS_TRUE(
nextSegmentMetadata == expectedSegmentAddressMetadata,
wformatString("Next segment address metadata did not match. Expected = {0}, Found = {1}", expectedSegmentAddressMetadata, nextSegmentMetadata).c_str());
}
}
|
{"hexsha": "bd7cc0f1ba0e4fa5ff8ff38c591a885cafb12f92", "size": 2813, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/prod/src/query/QueryAddress.Test.cpp", "max_stars_repo_name": "vishnuk007/service-fabric", "max_stars_repo_head_hexsha": "d0afdea185ae932cc3c9eacf179692e6fddbc630", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2542.0, "max_stars_repo_stars_event_min_datetime": "2018-03-14T21:56:12.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-06T01:18:20.000Z", "max_issues_repo_path": "src/prod/src/query/QueryAddress.Test.cpp", "max_issues_repo_name": "vishnuk007/service-fabric", "max_issues_repo_head_hexsha": "d0afdea185ae932cc3c9eacf179692e6fddbc630", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 994.0, "max_issues_repo_issues_event_min_datetime": "2019-05-07T02:39:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:23:04.000Z", "max_forks_repo_path": "src/prod/src/query/QueryAddress.Test.cpp", "max_forks_repo_name": "vishnuk007/service-fabric", "max_forks_repo_head_hexsha": "d0afdea185ae932cc3c9eacf179692e6fddbc630", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 300.0, "max_forks_repo_forks_event_min_datetime": "2018-03-14T21:57:17.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-06T20:07:00.000Z", "avg_line_length": 32.7093023256, "max_line_length": 164, "alphanum_fraction": 0.6978314966, "num_tokens": 605}
|
function affordable_real(
irreducible_characters,
multiplicities=fill(1, length(irreducible_characters)),
)
irr_real = similar(irreducible_characters, 0)
mls_real = similar(multiplicities, 0)
for (i, χ) in pairs(irreducible_characters)
ι = Characters.frobenius_schur(χ)
if abs(ι) == 1 # real or quaternionic
@debug "real/quaternionic:" χ
push!(irr_real, χ)
push!(mls_real, multiplicities[i])
else # complex one...
cχ = conj(χ)
k = findfirst(==(cχ), irreducible_characters)
@assert k !== nothing
@debug "complex" χ conj(χ)=irreducible_characters[k]
if k > i # ... we haven't already observed a conjugate of
@assert multiplicities[i] == multiplicities[k]
push!(irr_real, χ + cχ)
push!(mls_real, multiplicities[i])
end
end
end
return irr_real, mls_real
end
"""
symmetry_adapted_basis([T::Type,] G::AbstractPermutationGroup[, S=Rational{Int};
semisimple=false])
Compute a basis for the linear space `ℝⁿ` (where `n = degree(G)`) which is
invariant under the symmetry of `G`.
The basis is returned as a vector of `DirectSummand{T}`s (blocks) corresponding
to the irreducible characters of `G`. The blocks are orthogonal to **each other**,
however vectors within a single block may *not* be orthogonal.
If `T<:LinearAlgebra.BlasFloat` BLAS routines will be used to orthogonalize
vectors within each `DirectSummand`.
Arguments:
* `S` controls the types of `Cyclotomic`s used in the computation of
character table. Exact type are preferred. For larger groups `G` `Rational{BigInt}`
might be necessary.
* `T` controls the type of coefficients of the returned basis.
* `semisimple`: if set to `false` (the default) an effort to find minimal
projection system is made, so that each block defines a projection to a single
simple summand within each (isotypical) block. Otherwise an isomorphism to the
semisimple decomposition is computed.
!!! Note:
Each returned block (a `DirectSummand`) is invariant under the action of `G`,
which means that the action may still e.g. permute (row) vectors , but only
*within* each block.
When `semisimple=true` the blocks constitute an isomorphism. Otherwise blocks
may represent only a projection onto the commutant of the appropriate matrix
algebra (which has in general lower dimension). This happens precisely when
`issimple` on a block returns `true` and `SymbolicWedderburn.degree(ds) == 1`.
"""
function symmetry_adapted_basis(
G::PermutationGroups.AbstractPermutationGroup,
S::Type = Rational{Int};
semisimple::Bool = false,
)
tbl = CharacterTable(S, G)
return symmetry_adapted_basis(eltype(tbl), tbl, semisimple = semisimple)
end
symmetry_adapted_basis(
T::Type,
G::PermutationGroups.AbstractPermutationGroup,
S::Type = Rational{Int};
semisimple::Bool = false,
) = symmetry_adapted_basis(T, CharacterTable(S, G), semisimple = semisimple)
function symmetry_adapted_basis(
T::Type,
tbl::CharacterTable;
semisimple::Bool = false,
)
irr, multips = _constituents_decomposition(
action_character(conjugacy_classes(tbl), tbl),
tbl,
)
if T <: Real
irr, multips = affordable_real(irr, multips)
end
if semisimple || all(isone ∘ degree, irr)
return _symmetry_adapted_basis(T, irr, multips)
else
RG = _group_algebra(parent(tbl))
return _symmetry_adapted_basis(T, irr, multips, RG)
end
end
"""
symmetry_adapted_basis([T::Type,] G::Group, action, basis[, S=Rational{Int}];
semisimple=false)
Compute a decomposition of basis into (semi)simple subspaces which are invariant under
the action of `G`.
It is assumed that `G` acts on a subset of basis and the action needs to be
extended to the whole `basis`. If `G` is a permutation group already acting on
the whole `basis`, a call to `symmetry_adapted_basis(G)` is preferred.
* For inducing the action `basis` needs to be indexable and iterable
(e.g. in the form of an `AbstractVector`).
"""
function symmetry_adapted_basis(
G::Group,
action::Action,
basis,
S::Type = Rational{Int};
semisimple=false,
)
tbl = CharacterTable(S, G)
ehom = CachedExtensionHomomorphism(parent(tbl), action, basis, precompute=true)
return symmetry_adapted_basis(eltype(tbl), tbl, ehom, semisimple=semisimple)
end
function symmetry_adapted_basis(
T::Type,
G::Group,
action::Action,
basis,
S::Type = Rational{Int};
semisimple=false,
)
tbl = CharacterTable(S, G)
ehom = CachedExtensionHomomorphism(parent(tbl), action, basis, precompute=true)
return symmetry_adapted_basis(T, tbl, ehom, semisimple=semisimple)
end
function symmetry_adapted_basis(
T::Type,
tbl::CharacterTable,
ehom::InducedActionHomomorphism;
semisimple=false,
)
ψ = action_character(ehom, tbl)
irr, multips = _constituents_decomposition(ψ, tbl)
if T <: Real
irr, multips = affordable_real(irr, multips)
@debug "Decomposition into real character spaces:
degrees: $(join([lpad(d, 6) for d in degrees], ""))
multiplicities: $(join([lpad(m, 6) for m in multiplicities], ""))"
end
if semisimple || all(isone ∘ degree, irr)
return _symmetry_adapted_basis(T, irr, multips, ehom)
else
RG = _group_algebra(parent(tbl))
return _symmetry_adapted_basis(T, irr, multips, RG, ehom)
end
end
function _constituents_decomposition(ψ::Character, tbl::CharacterTable)
irr = irreducible_characters(tbl)
degrees = degree.(irr)
multiplicities = constituents(ψ)
@debug "Decomposition into character spaces:
degrees: $(join([lpad(d, 6) for d in degrees], ""))
multiplicities: $(join([lpad(m, 6) for m in multiplicities], ""))"
@assert dot(multiplicities, degrees) == degree(ψ)
"Something went wrong: characters do not constitute a complete basis for action:
$(dot(multiplicities, degrees)) ≠ $(degree(ψ))"
present_irreps = [i for (i, m) in enumerate(multiplicities) if m ≠ 0]
return irr[present_irreps], multiplicities[present_irreps]
end
function _group_algebra(G::Group)
@assert isfinite(G)
b = StarAlgebras.Basis{UInt16}(vec(collect(G)))
RG = if order(Int, G) <= (typemax(UInt16)>>2)
StarAlgebra(G, b, (length(b), length(b)), precompute=true)
# cache is about ~ 1Gb
else
StarAlgebra(G, b)
end
return RG
end
function _symmetry_adapted_basis(
T::Type,
irr::AbstractVector{<:Character},
multiplicities::AbstractVector{<:Integer},
hom=nothing
)
res = map(zip(irr, multiplicities)) do (µ, m)
@spawn_compat begin
µT = eltype(µ) == T ? µ : Character{T}(µ)
image = hom === nothing ? image_basis(µT) : image_basis(hom, µT)
simple = size(image, 1) == m
deg = degree(µ)
@assert size(image, 1) == (simple ? m : m*deg) "incompatible projection dimension: $(size(image, 1)) ≠ $(simple ? m : m*deg)"
if deg == 1
@assert simple "Central projection associated to character is not simple unless its degree == 1"
end
DirectSummand(image, m, deg, simple)
end
end
return fetch.(res)
end
function _symmetry_adapted_basis(
T::Type,
irr::AbstractVector{<:Character},
multiplicities::AbstractVector{<:Integer},
RG::StarAlgebra{<:Group},
hom=nothing,
)
mps, simples = minimal_projection_system(irr, RG)
degrees = degree.(irr)
res = map(zip(mps, multiplicities, degrees, simples)) do (µ, m, deg, simple)
@spawn_compat begin
µT = eltype(µ) == T ? µ : AlgebraElement{T}(µ)
image = hom === nothing ? image_basis(µT) : image_basis(hom, µT)
@assert size(image, 1) == (simple ? m : m*deg) "incompatible projection dimension: $(size(image, 1)) ≠ $(simple ? m : m*deg)"
DirectSummand(image, m, deg, simple)
end
end
direct_summands = fetch.(res)
for (χ, ds) in zip(irr, direct_summands)
if issimple(ds) && (d = size(ds, 1)) != (e = multiplicity(ds)*sum(constituents(χ).>0))
throw("The dimension of the projection doesn't match with simple summand multiplicity: $d ≠ $e")
end
end
return direct_summands
end
|
{"hexsha": "8dc42f4ad09af5d632f249dfc0306cdc30b19acf", "size": 8420, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/sa_basis.jl", "max_stars_repo_name": "kalmarek/SymbolicWedderburn.jl", "max_stars_repo_head_hexsha": "9e23b8f8ccbee6a55e7afe952961f80cc1fbc124", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-02T15:46:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-18T17:01:22.000Z", "max_issues_repo_path": "src/sa_basis.jl", "max_issues_repo_name": "kalmarek/SymbolicWedderburn.jl", "max_issues_repo_head_hexsha": "9e23b8f8ccbee6a55e7afe952961f80cc1fbc124", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2020-07-24T15:31:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T22:37:07.000Z", "max_forks_repo_path": "src/sa_basis.jl", "max_forks_repo_name": "kalmarek/SymbolicWedderburn.jl", "max_forks_repo_head_hexsha": "9e23b8f8ccbee6a55e7afe952961f80cc1fbc124", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-28T09:18:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T13:08:31.000Z", "avg_line_length": 35.230125523, "max_line_length": 137, "alphanum_fraction": 0.6660332542, "num_tokens": 2300}
|
function SynchronizedRandomGenerator(arg0::RandomGenerator)
return SynchronizedRandomGenerator((RandomGenerator,), arg0)
end
function next_boolean(obj::SynchronizedRandomGenerator)
return jcall(obj, "nextBoolean", jboolean, ())
end
function next_bytes(obj::SynchronizedRandomGenerator, arg0::Vector{jbyte})
return jcall(obj, "nextBytes", void, (Vector{jbyte},), arg0)
end
function next_bytes(obj::SynchronizedRandomGenerator, arg0::Vector{jbyte}, arg1::jint, arg2::jint)
return jcall(obj, "nextBytes", void, (Vector{jbyte}, jint, jint), arg0, arg1, arg2)
end
function next_double(obj::SynchronizedRandomGenerator)
return jcall(obj, "nextDouble", jdouble, ())
end
function next_float(obj::SynchronizedRandomGenerator)
return jcall(obj, "nextFloat", jfloat, ())
end
function next_gaussian(obj::SynchronizedRandomGenerator)
return jcall(obj, "nextGaussian", jdouble, ())
end
function next_int(obj::SynchronizedRandomGenerator)
return jcall(obj, "nextInt", jint, ())
end
function next_int(obj::SynchronizedRandomGenerator, arg0::jint)
return jcall(obj, "nextInt", jint, (jint,), arg0)
end
function next_long(obj::SynchronizedRandomGenerator)
return jcall(obj, "nextLong", jlong, ())
end
function next_long(obj::SynchronizedRandomGenerator, arg0::jlong)
return jcall(obj, "nextLong", jlong, (jlong,), arg0)
end
function set_seed(obj::SynchronizedRandomGenerator, arg0::Vector{jint})
return jcall(obj, "setSeed", void, (Vector{jint},), arg0)
end
function set_seed(obj::SynchronizedRandomGenerator, arg0::jint)
return jcall(obj, "setSeed", void, (jint,), arg0)
end
function set_seed(obj::SynchronizedRandomGenerator, arg0::jlong)
return jcall(obj, "setSeed", void, (jlong,), arg0)
end
|
{"hexsha": "cae7c839aab162a2def1936ed03dab2245f129af", "size": 1748, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "gen/HipparchusWrapper/RandomWrapper/synchronized_random_generator.jl", "max_stars_repo_name": "JuliaAstrodynamics/Orekit.jl", "max_stars_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-07T12:26:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T16:02:35.000Z", "max_issues_repo_path": "gen/HipparchusWrapper/RandomWrapper/synchronized_random_generator.jl", "max_issues_repo_name": "JuliaSpace/Orekit.jl", "max_issues_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-09-05T10:16:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-30T05:17:19.000Z", "max_forks_repo_path": "gen/HipparchusWrapper/RandomWrapper/synchronized_random_generator.jl", "max_forks_repo_name": "JuliaSpace/Orekit.jl", "max_forks_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6666666667, "max_line_length": 98, "alphanum_fraction": 0.7511441648, "num_tokens": 469}
|
"""
Test suite to verify the output integrity of VORONOI
TODO: check LaGriT output
"""
import subprocess
import unittest
import numpy as np
import os
import filecmp
import sys
import argparse
import h5py
# Test diagnostics on/off and all flag permutations
params = {
"voronoi_exe": "../../src/voronoi",
"use_mpi": True,
"mpi_procs": 4,
"verbose": False,
"2D": {
"mesh": "mesh_2D.inp",
"stor": "gold_2D.stor",
"pflotran": "gold_2D.uge",
"tough": "gold_2D_MESH",
"hdf5": "gold_2D.h5",
},
"3D": {
"mesh": "mesh_3D.inp",
"stor": "gold_3D.stor",
"pflotran": "gold_3D.uge",
"tough": "gold_3D_MESH",
"hdf5": "gold_3D.h5",
},
}
def getMatrixFromSTORFile(infile):
"""
This function can be grossly improved.
Not efficient at *all*.
"""
def getSTORHeader(f, skip_two):
if skip_two:
f.readline() # fehmstor ascir8i4 LaGriT Sparse Matrix Voronoi Coefficients
f.readline() # Mon Oct 23 17:19:23 2017
header = f.readline().strip().split()
NUM_WRITTEN_COEFS = int(header[0])
NEQ = int(header[1])
NCOEF_NEQ_1 = int(header[2])
NUM_AREA_COEF = int(header[3])
try:
NCON_MAX = int(header[4])
except:
NCON_MAX = None
NCOEF = NCOEF_NEQ_1 - NEQ - 1
return NUM_WRITTEN_COEFS, NEQ, NCOEF, NUM_AREA_COEF, NCON_MAX
f = open(infile)
NUM_WRITTEN_COEFS, NEQ, NCOEF, NUM_AREA_COEF, NCON_MAX = getSTORHeader(
f, True
)
matrix = np.zeros((NEQ, NEQ), dtype=np.double)
vols = []
row_count = []
values = ""
row_entries = []
pointers = []
zeros = []
ptrs_diag = []
for line in f:
values = values + line
f.close()
values = values.strip().split()
# ------------------------------------------------#
# There are seven sections of a STOR file after the header
# Record all of them, and leave the remainder as area coeffs.
for i in range(0, NEQ):
vols.append(float(values.pop(0)))
for i in range(0, NEQ + 1):
row_count.append(int(values.pop(0)))
for i in range(0, NCOEF):
row_entries.append(int(values.pop(0)))
for i in range(0, NCOEF):
pointers.append(int(values.pop(0)))
for i in range(0, NEQ + 1):
zeros.append(int(values.pop(0)))
for i in range(0, NEQ):
ptrs_diag.append(int(values.pop(0)) - 5)
# ------------------------------------------------#
tmp = row_count[:]
for i in range(1, len(row_count)):
row_count[i] = row_count[i] - tmp[i - 1]
row_count.pop(0)
# Fill the matrix with area coeffs.
a = 0
bb = 0
for i in range(0, len(row_count)):
b = row_count[i]
aa = row_entries[a : b + a]
a = a + row_count[i]
for j in range(0, len(aa)):
matrix[i][aa[j] - 1] = float(values[pointers[j + bb] - 1])
bb = bb + len(aa)
# Finally, fill the diagonal with volume coeffs.
for i in range(0, len(vols)):
matrix[i][i] = vols[i]
return matrix
def runVoronoi(
params,
otype="fehm",
output_path="TMP_OUT",
dimension=2,
cv="voronoi",
extra_flags="",
useLaGriT=False,
):
prefix = (
("mpirun -np %s " % params["mpi_procs"]) if params["use_mpi"] else ""
)
mesh_in = params["2D"]["mesh"] if dimension == 2 else params["3D"]["mesh"]
intype = "-avs"
if useLaGriT:
intype = "-lg"
mesh_in = "infile.lgi"
cmd = "%s%s %s %s -type %s -o %s -cv %s %s" % (
prefix,
params["voronoi_exe"],
intype,
mesh_in,
otype,
output_path,
cv,
extra_flags,
)
out = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
if params["verbose"]:
print("\n%s\n" % cmd)
print(out)
try:
out = out.decode("ascii")
except AttributeError:
pass
assert "error" not in out.lower(), (
"VORONOI threw an exception: \n%s\n" % out
)
def areMatricesEqual(A1, A2, epsilon=1e-3):
if np.shape(A1) != np.shape(A2):
return False
return np.allclose(A1, A2, atol=1e-04, rtol=1e-2)
def compareHDF5(file1, file2):
logical = True
with h5py.File(file1) as f1:
with h5py.File(file2) as f2:
if f1.keys() != f2.keys():
return False
for key in f1.keys():
data1 = f1.get(key).value
data2 = f2.get(key).value
logical = logical and areMatricesEqual(data1, data2)
return logical
def comparePFLOTRAN(file1, file2):
"""
Perform a simple comparison between CELL and CONNECTIONS blocks
in two UGE files.
Note that this function is not stable when mesh ordering is changed -
i.e., CELL 1 in file1 is represented as CELL 2 in file2.
TODO:
1. sort cells along x. this ensures that remapped arrays are equal.
2. remap new ordering to conns block.
"""
with open(file1) as f:
uge = f.read().split("\n")
cell_count = int(uge[0].split()[1])
cells1 = np.array(
[b.split() for b in uge[1 : cell_count + 1]], dtype=np.double
)
conn_count = int(uge[cell_count + 1].split()[1])
conns1 = np.array(
[
b.split()
for b in uge[cell_count + 2 : (cell_count + 2) + conn_count]
],
dtype=np.double,
)
with open(file2) as f:
uge = f.read().split("\n")
cell_count = int(uge[0].split()[1])
cells2 = np.array(
[b.split() for b in uge[1 : cell_count + 1]], dtype=np.double
)
conn_count = int(uge[cell_count + 1].split()[1])
conns2 = np.array(
[
b.split()
for b in uge[cell_count + 2 : (cell_count + 2) + conn_count]
],
dtype=np.double,
)
logical = areMatricesEqual(cells1, cells2) and areMatricesEqual(
conns1, conns2
)
return logical
def compareFiles(file1, file2):
return filecmp.cmp(file1, file2)
class TestFlags(unittest.TestCase):
def test_diagnostics_2d(self):
gold = getMatrixFromSTORFile(params["2D"]["stor"])
runVoronoi(
params,
otype="fehm",
output_path="bronze.stor",
dimension=2,
extra_flags="-d",
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
def test_diagnostics_3d(self):
gold = getMatrixFromSTORFile(params["3D"]["stor"])
runVoronoi(
params,
otype="fehm",
output_path="bronze.stor",
dimension=3,
extra_flags="-d",
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
class TestFEHM(unittest.TestCase):
def test_no_settings(self):
gold = getMatrixFromSTORFile(params["2D"]["stor"])
runVoronoi(
params, otype="fehm", output_path="bronze.stor", dimension=2
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
gold = getMatrixFromSTORFile(params["3D"]["stor"])
runVoronoi(
params, otype="fehm", output_path="bronze.stor", dimension=3
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
def test_is_compressed(self):
gold = getMatrixFromSTORFile(params["2D"]["stor"])
runVoronoi(
params,
otype="fehm",
output_path="bronze.stor",
dimension=2,
extra_flags="-compress",
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
gold = getMatrixFromSTORFile(params["3D"]["stor"])
runVoronoi(
params,
otype="fehm",
output_path="bronze.stor",
dimension=3,
extra_flags="-compress",
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
def test_is_dedudded(self):
gold = getMatrixFromSTORFile(params["2D"]["stor"])
runVoronoi(
params,
otype="fehm",
output_path="bronze.stor",
dimension=2,
extra_flags="-dedud",
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
gold = getMatrixFromSTORFile(params["3D"]["stor"])
runVoronoi(
params,
otype="fehm",
output_path="bronze.stor",
dimension=3,
extra_flags="-dedud",
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
"""
def test_is_compressed_and_dedudded(self):
gold = getMatrixFromSTORFile(params['2D']['stor'])
runVoronoi(params,otype='fehm',output_path='bronze.stor',dimension=2,extra_flags='-compress -dedud')
bronze = getMatrixFromSTORFile('bronze.stor')
os.remove('bronze.stor')
self.assertTrue(areMatricesEqual(gold,bronze))
gold = getMatrixFromSTORFile(params['3D']['stor'])
runVoronoi(params,otype='fehm',output_path='bronze.stor',dimension=3,extra_flags='-compress -dedud')
bronze = getMatrixFromSTORFile('bronze.stor')
os.remove('bronze.stor')
self.assertTrue(areMatricesEqual(gold,bronze))
"""
class TestLaGriT(unittest.TestCase):
def writeLaGriTinfile(self, mesh):
lgi = "read / avs / %s / mo1\nfinish\n\n" % mesh
with open("infile.lgi", "w") as f:
f.write(lgi)
def test_FEHM(self):
self.writeLaGriTinfile(params["2D"]["mesh"])
gold = getMatrixFromSTORFile(params["2D"]["stor"])
runVoronoi(
params,
otype="fehm",
output_path="bronze.stor",
dimension=2,
extra_flags="-compress",
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
self.writeLaGriTinfile(params["3D"]["mesh"])
gold = getMatrixFromSTORFile(params["3D"]["stor"])
runVoronoi(
params,
otype="fehm",
output_path="bronze.stor",
dimension=3,
extra_flags="-compress",
)
bronze = getMatrixFromSTORFile("bronze.stor")
os.remove("bronze.stor")
self.assertTrue(areMatricesEqual(gold, bronze))
def test_HDF5(self):
self.writeLaGriTinfile(params["3D"]["mesh"])
runVoronoi(params, otype="hdf5", output_path="bronze.h5", dimension=3)
result = compareHDF5("bronze.h5", params["3D"]["hdf5"])
os.remove("bronze.h5")
self.assertTrue(result)
class TestPFLOTRAN(unittest.TestCase):
def test_2D(self):
runVoronoi(
params, otype="pflotran", output_path="_temp_2D.uge", dimension=2
)
result = comparePFLOTRAN("_temp_2D.uge", params["2D"]["pflotran"])
os.remove("_temp_2D.uge")
self.assertTrue(result)
def test_3D(self):
# runVoronoi(params,otype='pflotran',output_path='_temp_3D.uge',dimension=3)
# result = comparePFLOTRAN("_temp_3D.uge",params["3D"]["pflotran"])
# os.remove("_temp_3D.uge")
self.assertTrue(True)
class TestTOUGH(unittest.TestCase):
def test_2D(self):
runVoronoi(
params, otype="tough2", output_path="_TMP_MESH2D", dimension=2
)
result = compareFiles("_TMP_MESH2D", params["2D"]["tough"])
os.remove("_TMP_MESH2D")
self.assertTrue(result)
def test_3D(self):
runVoronoi(
params, otype="tough2", output_path="_TMP_MESH3D", dimension=3
)
result = compareFiles("_TMP_MESH3D", params["3D"]["tough"])
os.remove("_TMP_MESH3D")
self.assertTrue(result)
class TestHDF5(unittest.TestCase):
def test_2D(self):
runVoronoi(params, otype="hdf5", output_path="bronze.h5", dimension=2)
result = compareHDF5("bronze.h5", params["2D"]["hdf5"])
os.remove("bronze.h5")
self.assertTrue(result)
def test_3D(self):
runVoronoi(params, otype="hdf5", output_path="bronze.h5", dimension=3)
result = compareHDF5("bronze.h5", params["3D"]["hdf5"])
os.remove("bronze.h5")
self.assertTrue(result)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--noparallel", action="store_true")
# parser.add_argument('-np','--numprocs', action='store_true')
args = parser.parse_args()
if args.noparallel:
params["use_mpi"] = False
suite = unittest.TestLoader().loadTestsFromTestCase(TestFEHM)
a = unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(TestLaGriT)
a = unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(TestFlags)
a = unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(TestPFLOTRAN)
a = unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(TestTOUGH)
a = unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(TestHDF5)
a = unittest.TextTestRunner(verbosity=2).run(suite)
|
{"hexsha": "f28a5ed96af2d9cea68c1f756a5ff4150ca2c42a", "size": 13974, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/sanity_check/run_tests.py", "max_stars_repo_name": "daniellivingston/voronoi", "max_stars_repo_head_hexsha": "1c109b38b2fcbfac601d635d105674533c2a4204", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-11-01T17:13:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T04:50:31.000Z", "max_issues_repo_path": "test/sanity_check/run_tests.py", "max_issues_repo_name": "daniellivingston/voronoi", "max_issues_repo_head_hexsha": "1c109b38b2fcbfac601d635d105674533c2a4204", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2019-12-10T18:08:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-18T17:18:10.000Z", "max_forks_repo_path": "test/sanity_check/run_tests.py", "max_forks_repo_name": "daniellivingston/voronoi", "max_forks_repo_head_hexsha": "1c109b38b2fcbfac601d635d105674533c2a4204", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-03-19T07:08:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-09T23:02:40.000Z", "avg_line_length": 28.5183673469, "max_line_length": 108, "alphanum_fraction": 0.5835837985, "include": true, "reason": "import numpy", "num_tokens": 3861}
|
import requests
from src.constants import ONCOKB_API_KEY, MAF_COLUMNS,THERAPEUTIC_COLUMNS,DEVELOPMENT_MODE
import pandas as pd
import types
import numpy as np
def ontology_classes(onto):
classes = []
for i in list(onto.classes()):
classes.append((str(i)[5:]))
for i in list(onto.individuals()):
classes.append((str(i)[5:]))
return classes
def therapy_normalize(diagnosis):
return diagnosis.replace(" ","_").replace(",","").replace("+","_").replace("__","_").replace("__","_")
## add oncokb curated genes
def all_curated_genes(onto):
HEADER = {
'Authorization' : f'Bearer {ONCOKB_API_KEY}',
'accept': 'application/json'
}
response = requests.get('https://www.oncokb.org/api/v1/utils/allCuratedGenes',headers = HEADER)
# generate gene subclasses
for i in response.json():
gene_subclass = types.new_class(i['hugoSymbol'],(onto['Gene'],))
gene_subclass.comment = i['background'].replace('','')
gene_subclass.grch38RefSeq = i['grch38RefSeq']
gene_subclass.highestResistanceLevel = i['highestResistanceLevel']
gene_subclass.hugoSymbol = i['hugoSymbol']
# gene_subclass.entrezGeneId = i['entrezGeneId']
gene_subclass.grch38Isoform = i['grch38Isoform']
gene_subclass.highestSensitiveLevel = i['highestSensitiveLevel']
gene_subclass.oncogene = i['oncogene']
gene_subclass.hasVariant = []
return onto
## add therapies
def therapies(onto):
tabular = pd.read_csv('src/oncokb_biomarker_drug_associations.tsv',sep="\t")
therapies = list(tabular['Drugs (for therapeutic implications only)'].dropna().unique())
for i in therapies:
therapy_regimen = therapy_normalize(i)
therapy_subclass = types.new_class(therapy_regimen,(onto['TherapyRegimen'],))
return onto
## Add oncotree cancer types
def oncotree():
HEADER = {
'accept': 'application/json'
}
response = requests.get('http://oncotree.mskcc.org/api/tumorTypes/tree',headers = HEADER)
return (response.json())
def add_oncotree(onto):
tree = oncotree()
node = tree['TISSUE']
stack = [node]
while len(stack) > 0:
node = stack.pop()
for key, child in node['children'].items():
stack.append(child)
parent = node['parent']
if parent == "TISSUE":
parent = "Disease"
if node['code'] != "TISSUE":
if node['code'] not in ontology_classes(onto):
NewClass = types.new_class(node['code'], (onto[parent],))
return onto
def clean_mutation_effect(mutation_effect):
mapping = {
"Unknown": None,
"Likely Loss-of-function":"LossOfFunction",
"Gain-of-function":"GainOfFunction",
"Loss-of-function":"LossOfFunction",
"Likely Gain-of-function":"GainOfFunction",
"Likely Neutral":None,
"Inconclusive":None,
"Likely Switch-of-function":None,
"Switch-of-function":None,
None: None
}
return mapping[mutation_effect]
def clean_variant_classification(variant_classification):
mapping = {
"Missense_Mutation":"Missense",
"Nonsense_Mutation":"Nonsense",
"Frame_Shift_Del":"Frameshift",
"Splice_Site":"Splice",
"Frame_Shift_Ins":"Frameshift",
"In_Frame_Del":"INDEL",
"In_Frame_Ins":"INDEL",
"Translation_Start_Site":"TranslationStartSite",
"Nonstop_Mutation":"Nonsense"
}
return mapping[variant_classification]
def clean_variant(variant):
return variant.replace("p.","").replace("*","")
def add_levels(onto, row, biomarker):
level_mapping = {
"Level_1":["LEVEL_1"],
"Level_2":["LEVEL_2"],
"Level_3":["LEVEL_3A","LEVEL_3B"],
"Level_4":["LEVEL_4"],
"Level_R1":["LEVEL_R1"],
"Level_R2":["LEVEL_R2"],
}
for key, value in level_mapping.items():
level = ','.join(row[value].dropna())
if level != '':
for i in level.split(","):
therapy_name = therapy_normalize(i)
if therapy_name in ontology_classes(onto):
therapy_regimen = onto[therapy_name]
else:
therapy_regimen = types.new_class(therapy_name,(onto['TherapyRegimen'],))
onto, therapy_regimen, biomarker = add_evidence(onto, therapy_regimen, biomarker, key)
return onto,biomarker
def add_evidence(onto, therapy, biomarker, key):
if key == "Level_1":
therapy.hasEvidenceLevel1.append(biomarker)
biomarker.hasEvidenceLevel1.append(therapy)
if key == "Level_2":
therapy.hasEvidenceLevel2.append(biomarker)
biomarker.hasEvidenceLevel2.append(therapy)
if key == "Level_3":
therapy.hasEvidenceLevel3.append(biomarker)
biomarker.hasEvidenceLevel3.append(therapy)
if key == "Level_4":
therapy.hasEvidenceLevel4.append(biomarker)
biomarker.hasEvidenceLevel4.append(therapy)
if key == "Level_R1":
therapy.hasEvidenceLevelR1.append(biomarker)
biomarker.hasEvidenceLevelR1.append(therapy)
if key == "Level_R2":
therapy.hasEvidenceLevelR2.append(biomarker)
biomarker.hasEvidenceLevelR2.append(therapy)
return onto, therapy, biomarker
## variants and biomarker relationships
def parse_maf(onto, variants_path):
# generate variant subclasses and is_biomarker_for object properties
variants = pd.read_csv(variants_path, sep ="\t",low_memory=False)
variants = variants[MAF_COLUMNS]
variants = variants.loc[variants['HIGHEST_LEVEL'].notna()]
if DEVELOPMENT_MODE:
variants = variants.sample(100)
for index, row in variants.iterrows():
mutation_effect = clean_mutation_effect(row['MUTATION_EFFECT'])
variant_classification = clean_variant_classification(row['Variant_Classification'])
variant_name = clean_variant(row['HGVSp_Short'])
cancer_type = onto[row['Cohort']]
biomarker_name = f"{row['Hugo_Symbol']}_{variant_name}_{row['Cohort']}"
biomarker = types.new_class(biomarker_name, (onto['Biomarker'],))
gene = onto[row['Hugo_Symbol']]
biomarker.hasGene.append(gene)
gene.hasBiomarker.append(biomarker)
biomarker.hasDisease.append(cancer_type)
biomarker.evidenceSource.append("oncokb")
if variant_classification is not None:
variant = types.new_class(variant_name,(onto[variant_classification],))
biomarker.hasVariant = [variant]
variant.hasBiomarker.append(biomarker)
variant.hasGene = [gene]
gene.hasVariant.append(variant)
onto, biomarker = add_levels(onto, row,biomarker)
return onto
def add_fusions(onto,fusion_path):
fusions = pd.read_csv(fusion_path,sep="\t",low_memory=False)
fusions = fusions.loc[fusions['HIGHEST_LEVEL'].notna()]
for index, row in fusions.iterrows():
gene_1 = row['Fusion'].split("-")[0]
gene_2 = row['Fusion'].split("-")[1]
biomarker_name = f"fusion_{gene_1}_{gene_2}"
fusion_name = f"{gene_1}_{gene_2}"
if fusion_name not in ontology_classes(onto):
fusion = types.new_class(fusion_name, (onto['GeneFusion'],))
biomarker = types.new_class(biomarker_name, (onto['Biomarker'],))
else:
fusion = onto[fusion_name]
biomarker = onto[biomarker_name]
if gene_1 in ontology_classes(onto):
fusion.hasGene = [onto[gene_1]]
if gene_2 in ontology_classes(onto):
fusion.hasGene.append(onto[gene_2])
biomarker.hasVariant = [fusion]
fusion.hasBiomarker.append(biomarker)
onto, biomarker = add_levels(onto, row, biomarker)
biomarker.evidenceSource.append("oncokb")
return onto
def add_cnas(onto, cna_path):
cnas = pd.read_csv(cna_path, sep = "\t",low_memory = False).drop_duplicates(subset=['CANCER_TYPE','HUGO_SYMBOL','ALTERATION'])
cnas = cnas.loc[cnas['HIGHEST_LEVEL'].notna()]
for index, row in cnas.iterrows():
gene_name = row['HUGO_SYMBOL']
alteration = row['ALTERATION']
cna_name = f"{gene_name}_{alteration}"
cancer_type = row['CANCER_TYPE']
biomarker_name = f"{gene_name}_{alteration}_{cancer_type}"
disease = onto[cancer_type]
if gene_name not in ontology_classes(onto):
gene = types.new_class(gene_name, (onto['Gene']))
else:
gene = onto[gene_name]
cna = types.new_class(cna_name, (onto[alteration],))
biomarker = types.new_class(biomarker_name,(onto['Biomarker'],))
cna.hasGene.append(gene)
cna.hasBiomarker.append(biomarker)
biomarker.hasVariant.append(cna)
biomarker.hasDisease.append(disease)
gene.hasVariant.append(cna)
biomarker.evidenceSource.append("oncokb")
onto, biomarker = add_levels(onto, row, biomarker)
return onto
def map_civic_evidence(clin_sig, evidence_level):
mapping = {
"A":{"Sensitivity/Response":"Level_2", "Resistance":"Level_R1", "Adverse Response":"Level_R1", "Reduced Sensitivity":"Level_R1",},
"B":{"Sensitivity/Response":"Level_3", "Resistance":"Level_R1", "Adverse Response":"Level_R1", "Reduced Sensitivity":"Level_R1",},
"C":{"Sensitivity/Response":"Level_3", "Resistance":"Level_R2", "Adverse Response":"Level_R2", "Reduced Sensitivity":"Level_R2",},
"D":{"Sensitivity/Response":"Level_3", "Resistance":"Level_R2", "Adverse Response":"Level_R2", "Reduced Sensitivity":"Level_R2",},
"E":{"Sensitivity/Response":"Level_4", "Resistance":"Level_R2", "Adverse Response":"Level_R2", "Reduced Sensitivity":"Level_R2",},
}
return mapping[evidence_level][clin_sig]
def add_civic_variants(onto, civic_path):
civic_evidence = pd.read_csv(civic_path)
# subset to cleanly formatted civic variants
civic_evidence= civic_evidence.loc[civic_evidence.variant.str.contains("^[A-Z][0-9]*[A-Z]$",na=False)]
civic_evidence = civic_evidence[['Gene','variant','TherapyRegimen','oncotree','ClinicalSignificance','EvidenceLevel']].dropna().drop_duplicates()
for index, row in civic_evidence.iterrows():
therapy_name = therapy_normalize(row['TherapyRegimen'])
gene_name = row['Gene']
mutation_name = clean_variant(row['variant'])
disease_name = row['oncotree']
evidence_level = map_civic_evidence(row['ClinicalSignificance'], row['EvidenceLevel'])
biomarker_name = f"{gene_name}_{mutation_name}_{disease_name}"
if disease_name in ontology_classes(onto):
disease = onto[disease_name]
else:
disease = types.new_class(disease_name, (onto['Disease'],))
if gene_name in ontology_classes(onto):
gene = onto[gene_name]
else:
gene = types.new_class(gene_name, (onto['Gene'],))
if therapy_name in ontology_classes(onto):
therapy_regimen = onto[therapy_name]
else:
therapy_regimen = types.new_class(therapy_name, (onto['TherapyRegimen'],))
if mutation_name in ontology_classes(onto):
mutation = onto[mutation_name]
else:
mutation = types.new_class(mutation_name, (onto['Missense'],))
if biomarker_name in ontology_classes(onto):
biomarker = onto[biomarker_name]
else:
biomarker = types.new_class(biomarker_name, (onto['Biomarker'],))
biomarker.evidenceSource.append("civic")
biomarker.hasGene.append(gene)
gene.hasBiomarker.append(biomarker)
biomarker.hasDisease.append(disease)
biomarker.hasVariant.append(mutation)
mutation.hasBiomarker.append(biomarker)
onto, therapy_regimen, biomarker = add_evidence(onto, therapy_regimen, biomarker, evidence_level)
return onto
def add_civic_cnas(onto, civic_path):
civic_evidence = pd.read_csv(civic_path)
# subset to cleanly formatted civic variants
civic_cnas= civic_evidence.loc[civic_evidence.variant.str.contains("amplification|deletion",na=False, case=False)]
civic_cnas = civic_cnas[['Gene','variant','TherapyRegimen','oncotree','ClinicalSignificance','EvidenceLevel']].dropna().drop_duplicates()
for index, row in civic_cnas.iterrows():
therapy_name = therapy_normalize(row['TherapyRegimen'])
gene_name = row['Gene']
alteration = "Amplification" if "amplification" in row['variant'] else "Deletion"
cna_name = f"{gene_name}_{alteration}"
disease_name = row['oncotree']
evidence_level = map_civic_evidence(row['ClinicalSignificance'], row['EvidenceLevel'])
biomarker_name = f"{gene_name}_{alteration}_{disease_name}"
if disease_name in ontology_classes(onto):
disease = onto[disease_name]
else:
disease = types.new_class(disease_name, (onto['Disease'],))
if gene_name in ontology_classes(onto):
gene = onto[gene_name]
else:
gene = types.new_class(gene_name, (onto['Gene'],))
if therapy_name in ontology_classes(onto):
therapy_regimen = onto[therapy_name]
else:
therapy_regimen = types.new_class(therapy_name, (onto['TherapyRegimen'],))
if cna_name in ontology_classes(onto):
cna = onto[mutation_name]
else:
cna = types.new_class(cna_name, (onto[alteration],))
if biomarker_name in ontology_classes(onto):
biomarker = onto[biomarker_name]
else:
biomarker = types.new_class(biomarker_name, (onto['Biomarker'],))
biomarker.hasVariant.append(cna)
biomarker.evidenceSource.append("civic")
gene.hasBiomarker.append(biomarker)
biomarker.hasDisease.append(disease)
biomarker.hasVariant.append(cna)
cna.hasBiomarker.append(biomarker)
onto, therapy_regimen, biomarker = add_evidence(onto, therapy_regimen, biomarker, evidence_level)
return onto
|
{"hexsha": "b43a887c76b8c47e0f0cd97c7b08c95877e627d6", "size": 12532, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/generate_data.py", "max_stars_repo_name": "jmichuda/bmi-214-final-project", "max_stars_repo_head_hexsha": "84e270c388f1f43939d3b0de51b370c7ce31277b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/generate_data.py", "max_issues_repo_name": "jmichuda/bmi-214-final-project", "max_issues_repo_head_hexsha": "84e270c388f1f43939d3b0de51b370c7ce31277b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/generate_data.py", "max_forks_repo_name": "jmichuda/bmi-214-final-project", "max_forks_repo_head_hexsha": "84e270c388f1f43939d3b0de51b370c7ce31277b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-26T21:58:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-26T21:58:37.000Z", "avg_line_length": 35.3014084507, "max_line_length": 146, "alphanum_fraction": 0.739067986, "include": true, "reason": "import numpy", "num_tokens": 3569}
|
import sys
import types
import itertools
import numpy as np
import sympy as sp
from line_profiler import LineProfiler
profile = LineProfiler()
# _c_...: class attr must be set at class def
# _i_...: must be set at init
# _a_...: will be computed automatically and validated lazily and cached
abbreviations = {
"ac": "is_acceptable",
"ch": "choices",
"co": "consequences",
"ins": "information_set",
"la": "labels",
"no": "nodes",
"pl": "player",
"po": "successors",
"pr": "probabilities",
"ro": "root",
"su": "successors",
"subs": "substitutions",
"ou": "outcome",
"tr": "transitions",
}
class _AbstractObject (object):
"""Parent class for objects that have a name and optionally a description,
such as Agent, Group, Action, Outcome, Node."""
_c_symbols = []
@property
@classmethod
def symbols(cls):
"""list of symbols for generic objects of this type"""
return cls._c_symbols
_i_name = ""
@property
def name(self):
"""short and unique name of object"""
return self._i_name
_i_desc = ""
@property
def desc(self):
"""optional longer description"""
return self._i_desc
def __init__(self, name, **kwargs):
assert isinstance(name, str)
self._i_name = name
for attr, value in kwargs.items():
if attr in abbreviations: attr = abbreviations[attr]
assert hasattr(self, "_i_"+attr)
if isinstance(value, set):
value = frozenset(value)
setattr(self, "_i_"+attr, value)
self.validate()
def validate(self): pass
def clone(self):
return self.__class__(self.name, desc=self.desc)
def __str__(self): return self._i_name
def __repr__(self): return self._i_name
# helper functions:
def hasname(ob):
"""Does the object have a name that begins with a letter?"""
return hasattr(ob, "name") and isinstance(ob.name, str) and len(ob.name)>0 and ob.name[0].isalpha()
def update_consistently(base, other):
"""Update base dict with other dict until a conflict is found.
Returns whether a conflict was found."""
for key, value in other.items():
if base.setdefault(key, value) != value:
# attempted update is inconsistent with content, so stop here
return False
return True
def global_symbols(*names):
"""Create a sympy symbol for each name listed as an argument
and store it in a global variable of the same name"""
module_name = list(sys._current_frames().values())[0].f_back.f_globals['__name__']
module = sys.modules[module_name]
sy = sp.symbols(",".join([*names]))
for s in sy if isinstance(sy, tuple) else [sy]:
n = s.name
if getattr(module, n, s) != s:
print("Warning: global var", n, "existed, did not overwrite it.")
else:
setattr(module, n, s)
def Min(values):
"""Return the symbolic or numeric minimum of a list of values"""
return (sp.simplify(sp.Min(*values)) if np.any([isinstance(v, sp.Expr) for v in values])
else min(values))
def Max(values):
"""Return the symbolic or numeric maximum of a list of values"""
return (sp.simplify(sp.Max(*values)) if np.any([isinstance(v, sp.Expr) for v in values])
else max(values))
|
{"hexsha": "d4fae792c6d20db9d4424b21acf65c1b8ff7d7dc", "size": 3433, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/responsibility/core.py", "max_stars_repo_name": "pik-gane/pyresponsibility", "max_stars_repo_head_hexsha": "e0f43be1a9712754832bb97b3797851cdb24842d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/responsibility/core.py", "max_issues_repo_name": "pik-gane/pyresponsibility", "max_issues_repo_head_hexsha": "e0f43be1a9712754832bb97b3797851cdb24842d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/responsibility/core.py", "max_forks_repo_name": "pik-gane/pyresponsibility", "max_forks_repo_head_hexsha": "e0f43be1a9712754832bb97b3797851cdb24842d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6517857143, "max_line_length": 103, "alphanum_fraction": 0.6131663268, "include": true, "reason": "import numpy,import sympy", "num_tokens": 836}
|
module TestMOIwrapper
using CPLEX
using MathOptInterface
using Test
const MOI = MathOptInterface
const MOIT = MOI.Test
const MOIB = MOI.Bridges
const CONFIG = MOIT.TestConfig(basis = true)
const OPTIMIZER = CPLEX.Optimizer()
MOI.set(OPTIMIZER, MOI.Silent(), true)
# Turn off presolve reductions so CPLEX will generate infeasibility
# certificates.
MOI.set(OPTIMIZER, MOI.RawParameter("CPX_PARAM_REDUCE"), 0)
const BRIDGED_OPTIMIZER = MOI.Bridges.full_bridge_optimizer(OPTIMIZER, Float64)
function test_basic_constraint_tests()
MOIT.basic_constraint_tests(BRIDGED_OPTIMIZER, CONFIG; exclude = [
(MOI.VectorOfVariables, MOI.SecondOrderCone),
(MOI.VectorOfVariables, MOI.RotatedSecondOrderCone),
(MOI.VectorOfVariables, MOI.GeometricMeanCone),
(MOI.VectorAffineFunction{Float64}, MOI.SecondOrderCone),
(MOI.VectorAffineFunction{Float64}, MOI.RotatedSecondOrderCone),
(MOI.VectorAffineFunction{Float64}, MOI.GeometricMeanCone),
(MOI.VectorQuadraticFunction{Float64}, MOI.SecondOrderCone),
(MOI.VectorQuadraticFunction{Float64}, MOI.RotatedSecondOrderCone),
(MOI.VectorQuadraticFunction{Float64}, MOI.GeometricMeanCone),
(MOI.VectorAffineFunction{Float64}, MOI.IndicatorSet{MOI.ACTIVATE_ON_ONE, MOI.LessThan{Float64}}),
(MOI.VectorAffineFunction{Float64}, MOI.IndicatorSet{MOI.ACTIVATE_ON_ONE, MOI.GreaterThan{Float64}}),
])
# TODO(odow): bugs deleting SOC variables. See also the
# `delete_soc_variables` test.
MOIT.basic_constraint_tests(
BRIDGED_OPTIMIZER,
CONFIG;
include = [
(MOI.VectorOfVariables, MOI.SecondOrderCone),
(MOI.VectorOfVariables, MOI.RotatedSecondOrderCone),
(MOI.VectorOfVariables, MOI.GeometricMeanCone),
(MOI.VectorAffineFunction{Float64}, MOI.SecondOrderCone),
(MOI.VectorAffineFunction{Float64}, MOI.RotatedSecondOrderCone),
(MOI.VectorAffineFunction{Float64}, MOI.GeometricMeanCone),
(MOI.VectorQuadraticFunction{Float64}, MOI.SecondOrderCone),
(MOI.VectorQuadraticFunction{Float64}, MOI.RotatedSecondOrderCone),
(MOI.VectorQuadraticFunction{Float64}, MOI.GeometricMeanCone),
(MOI.VectorAffineFunction{Float64}, MOI.IndicatorSet{MOI.ACTIVATE_ON_ONE,MOI.LessThan{Float64}}),
(MOI.VectorAffineFunction{Float64}, MOI.IndicatorSet{MOI.ACTIVATE_ON_ONE,MOI.GreaterThan{Float64}}),
],
delete = false
)
end
function test_unittest()
MOIT.unittest(BRIDGED_OPTIMIZER, CONFIG, [
# TODO(odow): bug! We can't delete a vector of variables if one is in
# a second order cone.
"delete_soc_variables",
])
end
function test_modificationtest()
MOIT.modificationtest(BRIDGED_OPTIMIZER, CONFIG)
end
function test_contlineartest()
MOIT.contlineartest(BRIDGED_OPTIMIZER, CONFIG)
end
function test_intlineartest()
# interval somehow needed for indicator tests
interval_optimizer = MOIB.LazyBridgeOptimizer(OPTIMIZER)
MOIB.add_bridge(interval_optimizer, MOIB.Constraint.SplitIntervalBridge{Float64})
MOIT.intlineartest(BRIDGED_OPTIMIZER, CONFIG)
MOIT.intlineartest(interval_optimizer, CONFIG)
end
function test_contquadratictest()
MOIT.contquadratictest(
BRIDGED_OPTIMIZER,
MOIT.TestConfig(atol = 1e-3, rtol = 1e-3),
["ncqcp"], # CPLEX doesn't support non-convex problems
)
end
function test_CPXPARAM_OptimalityTarget()
# Test setting CPXPARAM_OptimalityTarget because it changes the problem
# type.
# Max x^2
# s.t. 1 <= x <= 4
model = CPLEX.Optimizer()
MOI.set(model, MOI.Silent(), true)
x = MOI.add_variable(model)
MOI.add_constraint(model, MOI.SingleVariable(x), MOI.Interval(1.0, 4.0))
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(),
MOI.ScalarQuadraticFunction(
MOI.ScalarAffineTerm{Float64}[],
[MOI.ScalarQuadraticTerm(2.0, x, x)],
0.0,
)
)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(
model,
MOI.RawParameter("CPXPARAM_OptimalityTarget"),
CPX_OPTIMALITYTARGET_OPTIMALGLOBAL,
)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.ObjectiveValue()) ≈ 16.0 atol=1e-6
@test MOI.get(model, MOI.VariablePrimal(), x) ≈ 4.0 atol=1e-6
end
function test_contconic()
MOIT.lintest(BRIDGED_OPTIMIZER, CONFIG)
soc_config = MOIT.TestConfig(atol=5e-3)
# TODO(odow): investigate why infeasibility certificates not generated for
# SOC.
MOIT.soctest(BRIDGED_OPTIMIZER, soc_config, ["soc3"])
MOIT.soc3test(
BRIDGED_OPTIMIZER,
MOIT.TestConfig(atol = 1e-3, infeas_certificates = false)
)
MOIT.rsoctest(BRIDGED_OPTIMIZER, soc_config, ["rotatedsoc2"])
MOIT.rotatedsoc2test(
BRIDGED_OPTIMIZER,
# Need for `duals = false` fixed by https://github.com/jump-dev/MathOptInterface.jl/pull/1171
# Remove in a future MOI 0.9.18+ release.
MOIT.TestConfig(
atol = 1e-3, infeas_certificates = false, duals = false
),
)
MOIT.geomeantest(BRIDGED_OPTIMIZER, soc_config)
end
function test_solvername()
@test MOI.get(BRIDGED_OPTIMIZER, MOI.SolverName()) == "CPLEX"
end
function test_default_objective_test()
MOIT.default_objective_test(BRIDGED_OPTIMIZER)
end
function test_default_status_test()
MOIT.default_status_test(BRIDGED_OPTIMIZER)
end
function test_nametest()
MOIT.nametest(BRIDGED_OPTIMIZER)
end
function test_validtest()
MOIT.validtest(BRIDGED_OPTIMIZER)
end
function test_emptytest()
MOIT.emptytest(BRIDGED_OPTIMIZER)
end
function test_orderedindicestest()
MOIT.orderedindicestest(BRIDGED_OPTIMIZER)
end
function test_copytest()
MOIT.copytest(
BRIDGED_OPTIMIZER,
MOI.Bridges.full_bridge_optimizer(CPLEX.Optimizer(), Float64)
)
end
function test_scalar_function_constant_not_zero()
MOIT.scalar_function_constant_not_zero(OPTIMIZER)
end
function test_start_values_test()
model = CPLEX.Optimizer()
x = MOI.add_variables(model, 2)
@test MOI.supports(model, MOI.VariablePrimalStart(), MOI.VariableIndex)
@test MOI.get(model, MOI.VariablePrimalStart(), x[1]) === nothing
@test MOI.get(model, MOI.VariablePrimalStart(), x[2]) === nothing
MOI.set(model, MOI.VariablePrimalStart(), x[1], 1.0)
MOI.set(model, MOI.VariablePrimalStart(), x[2], nothing)
@test MOI.get(model, MOI.VariablePrimalStart(), x[1]) == 1.0
@test MOI.get(model, MOI.VariablePrimalStart(), x[2]) === nothing
MOI.optimize!(model)
@test MOI.get(model, MOI.ObjectiveValue()) == 0.0
end
function test_supports_constrainttest()
# supports_constrainttest needs VectorOfVariables-in-Zeros,
# MOIT.supports_constrainttest(CPLEX.Optimizer(), Float64, Float32)
# but supports_constrainttest is broken via bridges:
MOI.empty!(BRIDGED_OPTIMIZER)
MOI.add_variable(BRIDGED_OPTIMIZER)
@test MOI.supports_constraint(BRIDGED_OPTIMIZER, MOI.SingleVariable, MOI.EqualTo{Float64})
@test MOI.supports_constraint(BRIDGED_OPTIMIZER, MOI.ScalarAffineFunction{Float64}, MOI.EqualTo{Float64})
# This test is broken for some reason:
@test_broken !MOI.supports_constraint(BRIDGED_OPTIMIZER, MOI.ScalarAffineFunction{Int}, MOI.EqualTo{Float64})
@test !MOI.supports_constraint(BRIDGED_OPTIMIZER, MOI.ScalarAffineFunction{Int}, MOI.EqualTo{Int})
@test !MOI.supports_constraint(BRIDGED_OPTIMIZER, MOI.SingleVariable, MOI.EqualTo{Int})
@test MOI.supports_constraint(BRIDGED_OPTIMIZER, MOI.VectorOfVariables, MOI.Zeros)
@test !MOI.supports_constraint(BRIDGED_OPTIMIZER, MOI.VectorOfVariables, MOI.EqualTo{Float64})
@test !MOI.supports_constraint(BRIDGED_OPTIMIZER, MOI.SingleVariable, MOI.Zeros)
@test !MOI.supports_constraint(BRIDGED_OPTIMIZER, MOI.VectorOfVariables, MOIT.UnknownVectorSet)
end
function test_set_lower_bound_twice()
MOIT.set_lower_bound_twice(OPTIMIZER, Float64)
end
function test_set_upper_bound_twice()
MOIT.set_upper_bound_twice(OPTIMIZER, Float64)
end
function test_user_provided_env()
env = CPLEX.Env()
model_1 = CPLEX.Optimizer(env)
@test model_1.env === env
model_2 = CPLEX.Optimizer(env)
@test model_2.env === env
# Check that finalizer doesn't touch env when manually provided.
finalize(model_1)
@test env.ptr != C_NULL
end
function test_automatic_env()
model_1 = CPLEX.Optimizer()
model_2 = CPLEX.Optimizer()
@test model_1.env.ptr !== model_2.env.ptr
end
function test_user_provided_env_empty()
env = CPLEX.Env()
model = CPLEX.Optimizer(env)
@test model.env === env
@test env.ptr != C_NULL
MOI.empty!(model)
@test model.env === env
@test env.ptr != C_NULL
end
function test_automatic_env_empty()
model = CPLEX.Optimizer()
env = model.env
MOI.empty!(model)
@test model.env === env
@test env.ptr != C_NULL
end
function test_manual_env()
env = CPLEX.Env()
model = CPLEX.Optimizer(env)
finalize(env)
@test env.finalize_called
finalize(model)
@test env.ptr == C_NULL
end
function test_cont_int_cont()
atol = 1e-5
rtol = 1e-5
model = CPLEX.Optimizer()
MOI.empty!(model)
@test MOI.is_empty(model)
# min -x
# st x + y <= 1.5 (x + y - 1.5 ∈ Nonpositives)
# x, y >= 0 (x, y ∈ Nonnegatives)
v = MOI.add_variables(model, 2)
@test MOI.get(model, MOI.NumberOfVariables()) == 2
cf = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0,1.0], v), 0.0)
c = MOI.add_constraint(model, cf, MOI.LessThan(1.5))
@test MOI.get(model, MOI.NumberOfConstraints{MOI.ScalarAffineFunction{Float64},MOI.LessThan{Float64}}()) == 1
MOI.add_constraint.(model, MOI.SingleVariable.(v), MOI.GreaterThan(0.0))
@test MOI.get(model, MOI.NumberOfConstraints{MOI.SingleVariable,MOI.GreaterThan{Float64}}()) == 2
objf = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([-1.0,0.0], v), 0.0)
MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), objf)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
@test MOI.get(model, MOI.ObjectiveSense()) == MOI.MIN_SENSE
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMIZE_NOT_CALLED
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.ObjectiveValue()) ≈ -1.5 atol=atol rtol=rtol
@test MOI.get(model, MOI.VariablePrimal(), v) ≈ [1.5, 0] atol=atol rtol=rtol
@test MOI.get(model, MOI.ConstraintPrimal(), c) ≈ 1.5 atol=atol rtol=rtol
@test MOI.get(model, MOI.DualStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.ConstraintDual(), c) ≈ -1.0 atol=atol rtol=rtol
# Add integrality constraints
int = MOI.add_constraint.(model, MOI.SingleVariable.(v), MOI.Integer())
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.ObjectiveValue()) ≈ -1.0 atol=atol rtol=rtol
@test MOI.get(model, MOI.VariablePrimal(), v) ≈ [1.0, 0] atol=atol rtol=rtol
@test MOI.get(model, MOI.ConstraintPrimal(), c) ≈ 1.0 atol=atol rtol=rtol
@test MOI.get(model, MOI.DualStatus()) == MOI.NO_SOLUTION
# Remove integrality constraints
MOI.delete.(model, int)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(model, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.ObjectiveValue()) ≈ -1.5 atol=atol rtol=rtol
@test MOI.get(model, MOI.VariablePrimal(), v) ≈ [1.5, 0] atol=atol rtol=rtol
@test MOI.get(model, MOI.ConstraintPrimal(), c) ≈ 1.5 atol=atol rtol=rtol
@test MOI.get(model, MOI.DualStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(model, MOI.ConstraintDual(), c) ≈ -1.0 atol=atol rtol=rtol
end
function test_conflict_bounds()
# @testset "Variable bounds (SingleVariable and LessThan/GreaterThan)" begin
# Test similar to ../C_API/iis.jl, but ported to MOI.
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
c1 = MOI.add_constraint(model, MOI.SingleVariable(x), MOI.GreaterThan(2.0))
c2 = MOI.add_constraint(model, MOI.SingleVariable(x), MOI.LessThan(1.0))
# Getting the results before the conflict refiner has been called must return an error.
@test MOI.get(model, CPLEX.ConflictStatus()) === nothing
@test MOI.get(model, MOI.ConflictStatus()) == MOI.COMPUTE_CONFLICT_NOT_CALLED
@test_throws ErrorException MOI.get(model, MOI.ConstraintConflictStatus(), c1)
# Once it's called, no problem.
MOI.compute_conflict!(model)
@test MOI.get(model, CPLEX.ConflictStatus()) == CPLEX.CPX_STAT_CONFLICT_MINIMAL
@test MOI.get(model, MOI.ConflictStatus()) == MOI.CONFLICT_FOUND
@test MOI.get(model, MOI.ConstraintConflictStatus(), c1) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), c2) == MOI.IN_CONFLICT
end
function test_conflict_scalaraffine()
# @testset "Variable bounds (ScalarAffine)" begin
# Same test as ../C_API/iis.jl, but ported to MOI.
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
c1 = MOI.add_constraint(model, MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0], [x]), 0.0), MOI.GreaterThan(2.0))
c2 = MOI.add_constraint(model, MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0], [x]), 0.0), MOI.LessThan(1.0))
# Getting the results before the conflict refiner has been called must return an error.
@test MOI.get(model, CPLEX.ConflictStatus()) === nothing
@test MOI.get(model, MOI.ConflictStatus()) == MOI.COMPUTE_CONFLICT_NOT_CALLED
@test_throws ErrorException MOI.get(model, MOI.ConstraintConflictStatus(), c1)
# Once it's called, no problem.
MOI.compute_conflict!(model)
@test MOI.get(model, CPLEX.ConflictStatus()) == CPLEX.CPX_STAT_CONFLICT_MINIMAL
@test MOI.get(model, MOI.ConflictStatus()) == MOI.CONFLICT_FOUND
@test MOI.get(model, MOI.ConstraintConflictStatus(), c1) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), c2) == MOI.IN_CONFLICT
end
function test_conflict_two_bound()
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
y = MOI.add_variable(model)
b1 = MOI.add_constraint(model, MOI.SingleVariable(x), MOI.GreaterThan(0.0))
b2 = MOI.add_constraint(model, MOI.SingleVariable(y), MOI.GreaterThan(0.0))
cf1 = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0, 1.0], [x, y]), 0.0)
c1 = MOI.add_constraint(model, cf1, MOI.LessThan(-1.0))
cf2 = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0, -1.0], [x, y]), 0.0)
c2 = MOI.add_constraint(model, cf2, MOI.GreaterThan(1.0))
# Getting the results before the conflict refiner has been called must return an error.
@test MOI.get(model, CPLEX.ConflictStatus()) === nothing
@test MOI.get(model, MOI.ConflictStatus()) == MOI.COMPUTE_CONFLICT_NOT_CALLED
@test_throws ErrorException MOI.get(model, MOI.ConstraintConflictStatus(), c1)
# Once it's called, no problem.
MOI.compute_conflict!(model)
@test MOI.get(model, CPLEX.ConflictStatus()) == CPLEX.CPX_STAT_CONFLICT_MINIMAL
@test MOI.get(model, MOI.ConflictStatus()) == MOI.CONFLICT_FOUND
@test MOI.get(model, MOI.ConstraintConflictStatus(), b1) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), b2) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), c1) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), c2) == MOI.NOT_IN_CONFLICT
end
function test_conflict_two_equalto()
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
y = MOI.add_variable(model)
b1 = MOI.add_constraint(model, MOI.SingleVariable(x), MOI.GreaterThan(0.0))
b2 = MOI.add_constraint(model, MOI.SingleVariable(y), MOI.GreaterThan(0.0))
cf1 = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0, 1.0], [x, y]), 0.0)
c1 = MOI.add_constraint(model, cf1, MOI.EqualTo(-1.0))
cf2 = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0, -1.0], [x, y]), 0.0)
c2 = MOI.add_constraint(model, cf2, MOI.GreaterThan(1.0))
# Getting the results before the conflict refiner has been called must return an error.
@test MOI.get(model, CPLEX.ConflictStatus()) === nothing
@test MOI.get(model, MOI.ConflictStatus()) == MOI.COMPUTE_CONFLICT_NOT_CALLED
@test_throws ErrorException MOI.get(model, MOI.ConstraintConflictStatus(), c1)
# Once it's called, no problem.
MOI.compute_conflict!(model)
@test MOI.get(model, CPLEX.ConflictStatus()) == CPLEX.CPX_STAT_CONFLICT_MINIMAL
@test MOI.get(model, MOI.ConflictStatus()) == MOI.CONFLICT_FOUND
@test MOI.get(model, MOI.ConstraintConflictStatus(), b1) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), b2) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), c1) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), c2) == MOI.NOT_IN_CONFLICT
end
function test_conflict_variables_outside()
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
y = MOI.add_variable(model)
z = MOI.add_variable(model)
b1 = MOI.add_constraint(model, MOI.SingleVariable(x), MOI.GreaterThan(0.0))
b2 = MOI.add_constraint(model, MOI.SingleVariable(y), MOI.GreaterThan(0.0))
b3 = MOI.add_constraint(model, MOI.SingleVariable(z), MOI.GreaterThan(0.0))
cf1 = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0, 1.0], [x, y]), 0.0)
c1 = MOI.add_constraint(model, cf1, MOI.LessThan(-1.0))
cf2 = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0, -1.0, 1.0], [x, y, z]), 0.0)
c2 = MOI.add_constraint(model, cf2, MOI.GreaterThan(1.0))
# Getting the results before the conflict refiner has been called must return an error.
@test MOI.get(model, CPLEX.ConflictStatus()) === nothing
@test MOI.get(model, MOI.ConflictStatus()) == MOI.COMPUTE_CONFLICT_NOT_CALLED
@test_throws ErrorException MOI.get(model, MOI.ConstraintConflictStatus(), c1)
# Once it's called, no problem.
MOI.compute_conflict!(model)
@test MOI.get(model, CPLEX.ConflictStatus()) == CPLEX.CPX_STAT_CONFLICT_MINIMAL
@test MOI.get(model, MOI.ConflictStatus()) == MOI.CONFLICT_FOUND
@test MOI.get(model, MOI.ConstraintConflictStatus(), b1) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), b2) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), b3) == MOI.NOT_IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), c1) == MOI.IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), c2) == MOI.NOT_IN_CONFLICT
end
function test_conflict_no_conflict()
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
c1 = MOI.add_constraint(model, MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0], [x]), 0.0), MOI.GreaterThan(1.0))
c2 = MOI.add_constraint(model, MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([1.0], [x]), 0.0), MOI.LessThan(2.0))
# Getting the results before the conflict refiner has been called must return an error.
@test MOI.get(model, CPLEX.ConflictStatus()) === nothing
@test MOI.get(model, MOI.ConflictStatus()) == MOI.COMPUTE_CONFLICT_NOT_CALLED
@test_throws ErrorException MOI.get(model, MOI.ConstraintConflictStatus(), c1)
# Once it's called, no problem.
MOI.compute_conflict!(model)
@test MOI.get(model, CPLEX.ConflictStatus()) == CPLEX.CPX_STAT_CONFLICT_FEASIBLE
@test MOI.get(model, MOI.ConflictStatus()) == MOI.NO_CONFLICT_EXISTS
@test MOI.get(model, MOI.ConstraintConflictStatus(), c1) == MOI.NOT_IN_CONFLICT
@test MOI.get(model, MOI.ConstraintConflictStatus(), c2) == MOI.NOT_IN_CONFLICT
end
function test_ZeroOne_NONE()
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
c = MOI.add_constraint(model, MOI.SingleVariable(x), MOI.ZeroOne())
tmp = Ref{Cdouble}()
CPLEX.CPXgetlb(model.env, model.lp, tmp, 0, 0)
@test tmp[] == 0.0
CPLEX.CPXgetub(model.env, model.lp, tmp, 0, 0)
@test tmp[] == 1.0
MOI.delete(model, c)
CPLEX.CPXgetlb(model.env, model.lp, tmp, 0, 0)
@test tmp[] == -CPLEX.CPX_INFBOUND
CPLEX.CPXgetub(model.env, model.lp, tmp, 0, 0)
@test tmp[] == CPLEX.CPX_INFBOUND
end
function test_ZeroOne_LESS_THAN()
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
MOI.add_constraint(model, MOI.SingleVariable(x), MOI.LessThan(2.0))
c = MOI.add_constraint(model, MOI.SingleVariable(x), MOI.ZeroOne())
tmp = Ref{Cdouble}()
CPLEX.CPXgetlb(model.env, model.lp, tmp, 0, 0)
@test tmp[] == 0.0
CPLEX.CPXgetub(model.env, model.lp, tmp, 0, 0)
@test tmp[] == 2.0
MOI.delete(model, c)
CPLEX.CPXgetlb(model.env, model.lp, tmp, 0, 0)
@test tmp[] == -CPLEX.CPX_INFBOUND
CPLEX.CPXgetub(model.env, model.lp, tmp, 0, 0)
@test tmp[] == 2.0
end
function test_ZeroOne_GREATER_THAN()
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
MOI.add_constraint(model, MOI.SingleVariable(x), MOI.GreaterThan(-2.0))
c = MOI.add_constraint(model, MOI.SingleVariable(x), MOI.ZeroOne())
tmp = Ref{Cdouble}()
CPLEX.CPXgetlb(model.env, model.lp, tmp, 0, 0)
@test tmp[] == -2.0
CPLEX.CPXgetub(model.env, model.lp, tmp, 0, 0)
@test tmp[] == 1.0
MOI.delete(model, c)
CPLEX.CPXgetlb(model.env, model.lp, tmp, 0, 0)
@test tmp[] == -2.0
CPLEX.CPXgetub(model.env, model.lp, tmp, 0, 0)
@test tmp[] == CPLEX.CPX_INFBOUND
end
function test_ZeroOne_INTERVAL()
model = CPLEX.Optimizer()
x = MOI.add_variable(model)
MOI.add_constraint(model, MOI.SingleVariable(x), MOI.Interval(-2.0, 2.0))
c = MOI.add_constraint(model, MOI.SingleVariable(x), MOI.ZeroOne())
tmp = Ref{Cdouble}()
CPLEX.CPXgetlb(model.env, model.lp, tmp, 0, 0)
@test tmp[] == -2.0
CPLEX.CPXgetub(model.env, model.lp, tmp, 0, 0)
@test tmp[] == 2.0
MOI.delete(model, c)
CPLEX.CPXgetlb(model.env, model.lp, tmp, 0, 0)
@test tmp[] == -2.0
CPLEX.CPXgetub(model.env, model.lp, tmp, 0, 0)
@test tmp[] == 2.0
end
function test_farkas_dual_min()
model = CPLEX.Optimizer()
MOI.set(model, MOI.Silent(), true)
MOI.set(model, MOI.RawParameter("CPX_PARAM_REDUCE"), 0)
x = MOI.add_variables(model, 2)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(
model,
MOI.ObjectiveFunction{MOI.SingleVariable}(),
MOI.SingleVariable(x[1]),
)
clb = MOI.add_constraint.(
model, MOI.SingleVariable.(x), MOI.GreaterThan(0.0)
)
c = MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([2.0, 1.0], x), 0.0),
MOI.LessThan(-1.0),
)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.DualStatus()) == MOI.INFEASIBILITY_CERTIFICATE
clb_dual = MOI.get.(model, MOI.ConstraintDual(), clb)
c_dual = MOI.get(model, MOI.ConstraintDual(), c)
@show clb_dual, c_dual
@test clb_dual[1] > 1e-6
@test clb_dual[2] > 1e-6
@test c_dual[1] < -1e-6
@test clb_dual[1] ≈ -2 * c_dual atol = 1e-6
@test clb_dual[2] ≈ -c_dual atol = 1e-6
end
function test_farkas_dual_min_interval()
model = CPLEX.Optimizer()
MOI.set(model, MOI.Silent(), true)
MOI.set(model, MOI.RawParameter("CPX_PARAM_REDUCE"), 0)
x = MOI.add_variables(model, 2)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(
model,
MOI.ObjectiveFunction{MOI.SingleVariable}(),
MOI.SingleVariable(x[1]),
)
clb = MOI.add_constraint.(
model, MOI.SingleVariable.(x), MOI.Interval(0.0, 10.0)
)
c = MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([2.0, 1.0], x), 0.0),
MOI.LessThan(-1.0),
)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.DualStatus()) == MOI.INFEASIBILITY_CERTIFICATE
clb_dual = MOI.get.(model, MOI.ConstraintDual(), clb)
c_dual = MOI.get(model, MOI.ConstraintDual(), c)
@show clb_dual, c_dual
@test clb_dual[1] > 1e-6
@test clb_dual[2] > 1e-6
@test c_dual[1] < -1e-6
@test clb_dual[1] ≈ -2 * c_dual atol = 1e-6
@test clb_dual[2] ≈ -c_dual atol = 1e-6
end
function test_farkas_dual_min_equalto()
model = CPLEX.Optimizer()
MOI.set(model, MOI.Silent(), true)
MOI.set(model, MOI.RawParameter("CPX_PARAM_REDUCE"), 0)
x = MOI.add_variables(model, 2)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(
model,
MOI.ObjectiveFunction{MOI.SingleVariable}(),
MOI.SingleVariable(x[1]),
)
clb = MOI.add_constraint.(model, MOI.SingleVariable.(x), MOI.EqualTo(0.0))
c = MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([2.0, 1.0], x), 0.0),
MOI.LessThan(-1.0),
)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.DualStatus()) == MOI.INFEASIBILITY_CERTIFICATE
clb_dual = MOI.get.(model, MOI.ConstraintDual(), clb)
c_dual = MOI.get(model, MOI.ConstraintDual(), c)
@show clb_dual, c_dual
@test clb_dual[1] > 1e-6
@test clb_dual[2] > 1e-6
@test c_dual[1] < -1e-6
@test clb_dual[1] ≈ -2 * c_dual atol = 1e-6
@test clb_dual[2] ≈ -c_dual atol = 1e-6
end
function test_farkas_dual_min_ii()
model = CPLEX.Optimizer()
MOI.set(model, MOI.Silent(), true)
MOI.set(model, MOI.RawParameter("CPX_PARAM_REDUCE"), 0)
x = MOI.add_variables(model, 2)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(-1.0, x[1])], 0.0),
)
clb = MOI.add_constraint.(
model, MOI.SingleVariable.(x), MOI.LessThan(0.0)
)
c = MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([-2.0, -1.0], x), 0.0),
MOI.LessThan(-1.0),
)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.DualStatus()) == MOI.INFEASIBILITY_CERTIFICATE
clb_dual = MOI.get.(model, MOI.ConstraintDual(), clb)
c_dual = MOI.get(model, MOI.ConstraintDual(), c)
@show clb_dual, c_dual
@test clb_dual[1] < -1e-6
@test clb_dual[2] < -1e-6
@test c_dual[1] < -1e-6
@test clb_dual[1] ≈ 2 * c_dual atol = 1e-6
@test clb_dual[2] ≈ c_dual atol = 1e-6
end
function test_farkas_dual_max()
model = CPLEX.Optimizer()
MOI.set(model, MOI.Silent(), true)
MOI.set(model, MOI.RawParameter("CPX_PARAM_REDUCE"), 0)
x = MOI.add_variables(model, 2)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(
model,
MOI.ObjectiveFunction{MOI.SingleVariable}(),
MOI.SingleVariable(x[1]),
)
clb = MOI.add_constraint.(
model, MOI.SingleVariable.(x), MOI.GreaterThan(0.0)
)
c = MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([2.0, 1.0], x), 0.0),
MOI.LessThan(-1.0),
)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.DualStatus()) == MOI.INFEASIBILITY_CERTIFICATE
clb_dual = MOI.get.(model, MOI.ConstraintDual(), clb)
c_dual = MOI.get(model, MOI.ConstraintDual(), c)
@show clb_dual, c_dual
@test clb_dual[1] > 1e-6
@test clb_dual[2] > 1e-6
@test c_dual[1] < -1e-6
@test clb_dual[1] ≈ -2 * c_dual atol = 1e-6
@test clb_dual[2] ≈ -c_dual atol = 1e-6
end
function test_farkas_dual_max_ii()
model = CPLEX.Optimizer()
MOI.set(model, MOI.Silent(), true)
MOI.set(model, MOI.RawParameter("CPX_PARAM_REDUCE"), 0)
x = MOI.add_variables(model, 2)
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.set(
model,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(-1.0, x[1])], 0.0),
)
clb = MOI.add_constraint.(
model, MOI.SingleVariable.(x), MOI.LessThan(0.0)
)
c = MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([-2.0, -1.0], x), 0.0),
MOI.LessThan(-1.0),
)
MOI.optimize!(model)
@test MOI.get(model, MOI.TerminationStatus()) == MOI.INFEASIBLE
@test MOI.get(model, MOI.DualStatus()) == MOI.INFEASIBILITY_CERTIFICATE
clb_dual = MOI.get.(model, MOI.ConstraintDual(), clb)
c_dual = MOI.get(model, MOI.ConstraintDual(), c)
@show clb_dual, c_dual
@test clb_dual[1] < -1e-6
@test clb_dual[2] < -1e-6
@test c_dual[1] < -1e-6
@test clb_dual[1] ≈ 2 * c_dual atol = 1e-6
@test clb_dual[2] ≈ c_dual atol = 1e-6
end
end # module TestMOIwrapper
runtests(TestMOIwrapper)
|
{"hexsha": "84fc4c6dcc37160f8c30a434ac4fdfa18af73f80", "size": 29284, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/MathOptInterface/MOI_wrapper.jl", "max_stars_repo_name": "henriquebecker91/CPLEX.jl", "max_stars_repo_head_hexsha": "9f28588672c92b96b472653139c263246f0c2a01", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/MathOptInterface/MOI_wrapper.jl", "max_issues_repo_name": "henriquebecker91/CPLEX.jl", "max_issues_repo_head_hexsha": "9f28588672c92b96b472653139c263246f0c2a01", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/MathOptInterface/MOI_wrapper.jl", "max_forks_repo_name": "henriquebecker91/CPLEX.jl", "max_forks_repo_head_hexsha": "9f28588672c92b96b472653139c263246f0c2a01", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0054644809, "max_line_length": 122, "alphanum_fraction": 0.6878158721, "num_tokens": 9478}
|
[STATEMENT]
lemma lprefixes_chain:
"Complete_Partial_Order.chain (\<sqsubseteq>) {ys. lprefix ys xs}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Complete_Partial_Order.chain (\<sqsubseteq>) {ys. ys \<sqsubseteq> xs}
[PROOF STEP]
by(rule chainI)(auto dest: lprefix_down_linear)
|
{"llama_tokens": 108, "file": "Coinductive_Coinductive_List", "length": 1}
|
Require Import Fiat.BinEncoders.NoEnv.Specs
Fiat.BinEncoders.NoEnv.Libraries.BinCore.
Section BoolBinEncoder.
Definition Bool_encode_inner (b : bool) : bin_t := b :: nil.
Definition Bool_decode (b : bin_t) : bool * bin_t :=
match b with
| nil => (false, nil) (* bogus *)
| x :: xs => (x, xs)
end.
Theorem Bool_encode_correct : bin_encode_correct Bool_encode_inner Bool_decode.
Proof.
unfold bin_encode_correct, Bool_encode_inner, Bool_decode.
eauto.
Qed.
End BoolBinEncoder.
Definition Bool_encode :=
bin_encode_transform_pair Bool_encode_inner.
Global Instance Bool_decoder
: decoder (fun _ => True) Bool_encode :=
bin_encode_transform_pair_decoder Bool_encode_correct.
|
{"author": "proofskiddie", "repo": "CoqStuff", "sha": "fc8ecdf8045bc835bb10b2e4791f041d82451b5d", "save_path": "github-repos/coq/proofskiddie-CoqStuff", "path": "github-repos/coq/proofskiddie-CoqStuff/CoqStuff-fc8ecdf8045bc835bb10b2e4791f041d82451b5d/idontevnkno/src/BinEncoders/NoEnv/Libraries/Bool.v"}
|
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class STN3d(nn.Module):
def __init__(self, num_points = 2048):
super(STN3d, self).__init__()
self.conv1 = nn.Conv1d(3, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.mp1 = nn.MaxPool1d(num_points)
def forward(self, x):
batch_size = x.size()[0]
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.mp1(x)
x = x.view(-1, 1024)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
I = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1, 9).repeat(batch_size, 1)
if x.is_cuda:
I = I.cuda()
x = x + I
x = x.view(-1, 3, 3)
return x
class PointNetFeature(nn.Module):
def __init__(self, num_points = 2048):
super(PointNetFeature, self).__init__()
self.num_points = num_points
self.stn = STN3d(num_points)
self.conv1 = nn.Conv1d(3, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.mp1 = nn.MaxPool1d(num_points)
def forward(self, x):
batch_size = x.size()[0]
T = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, T)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
point_features = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = self.mp1(x)
x = x.view(-1, 1024)
x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)
return torch.cat([x, point_features], 1), T
class PointDenseClassifier(nn.Module):
def __init__(self, num_points = 2048, k = 2):
super(PointDenseClassifier, self).__init__()
self.num_points = num_points
self.k = k
self.feat = PointNetFeature(num_points)
self.conv1 = torch.nn.Conv1d(1088, 512, 1)
self.conv2 = torch.nn.Conv1d(512, 256, 1)
self.conv3 = torch.nn.Conv1d(256, 128, 1)
self.conv4 = torch.nn.Conv1d(128, self.k, 1)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(128)
def forward(self, x):
batch_size = x.size()[0]
x, T = self.feat(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.conv4(x)
x = x.transpose(2,1).contiguous()
x = F.log_softmax(x.view(-1,self.k), dim=-1)
x = x.view(batch_size, self.num_points, self.k)
return x, T
if __name__ == '__main__':
data = Variable(torch.rand(32, 3, 4096))
model = PointDenseClassifier(num_points=4096, k=3)
output, _ = model(data)
print(output.size())
|
{"hexsha": "1996d3a42b6333c5a067cb6c125e21e265c853af", "size": 3241, "ext": "py", "lang": "Python", "max_stars_repo_path": "scene_seg/pointnet/models.py", "max_stars_repo_name": "scenenn/pointwise", "max_stars_repo_head_hexsha": "8ce1eeb73c3bfbd26e5a14d5c47fcdae163d4ed4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 91, "max_stars_repo_stars_event_min_datetime": "2018-04-05T04:13:08.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-24T06:40:44.000Z", "max_issues_repo_path": "scene_seg/pointnet/models.py", "max_issues_repo_name": "scenenn/pointwise", "max_issues_repo_head_hexsha": "8ce1eeb73c3bfbd26e5a14d5c47fcdae163d4ed4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2018-04-07T15:03:15.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-25T00:13:07.000Z", "max_forks_repo_path": "scene_seg/pointnet/models.py", "max_forks_repo_name": "scenenn/pointwise", "max_forks_repo_head_hexsha": "8ce1eeb73c3bfbd26e5a14d5c47fcdae163d4ed4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2018-04-07T10:50:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-26T14:05:30.000Z", "avg_line_length": 32.7373737374, "max_line_length": 121, "alphanum_fraction": 0.5674174637, "include": true, "reason": "import numpy", "num_tokens": 1025}
|
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import argparse
import os
import numpy as np
from sklearn.multiclass import OneVsRestClassifier
from sklearn import svm
from reveal_user_classification.common import get_threads_number
from reveal_graph_embedding.datautil.score_rw_util import write_results
from reveal_graph_embedding.datautil.snow_datautil.snow_read_data import read_adjacency_matrix,\
read_node_label_matrix
from reveal_graph_embedding.embedding.arcte.arcte import arcte
from reveal_graph_embedding.embedding.common import normalize_columns
from reveal_graph_embedding.embedding.community_weighting import chi2_contingency_matrix,\
peak_snr_weight_aggregation, community_weighting
from reveal_graph_embedding.learning.holdout import generate_folds
from reveal_graph_embedding.learning.evaluation import form_node_label_prediction_matrix
from reveal_graph_embedding.learning import evaluation
def main():
"""
Entry point.
"""
# Parse arguments.
parser = argparse.ArgumentParser()
parser.add_argument("-input", "--snow-tweets-folder", dest="snow_tweets_folder",
help="Folder that contains the SNOW tweets.",
type=str, required=True,)
parser.add_argument("-output", "--prototype-output-folder", dest="prototype_output_folder",
help="Folder to which the results from the execution will be written.",
type=str, required=True)
parser.add_argument("-rp", "--restart-probability", dest="restart_probability",
help="Random walk restart probability.",
type=float, required=False, default=0.2)
parser.add_argument("-nt", "--number-of-threads", dest="number_of_threads",
help="The number of parallel threads for feature extraction and classification.",
type=int, required=False, default=None)
args = parser.parse_args()
snow_tweets_folder = args.snow_tweets_folder
prototype_output_folder = args.prototype_output_folder
restart_probability = args.restart_probability
number_of_threads = args.number_of_threads
run_prototype(snow_tweets_folder=snow_tweets_folder,
prototype_output_folder=prototype_output_folder,
restart_probability=restart_probability,
number_of_threads=number_of_threads)
def run_prototype(snow_tweets_folder,
prototype_output_folder,
restart_probability,
number_of_threads):
"""
This is a sample execution of the User Network Profile Classifier Prototype.
Specifically:
- Reads a set of tweets from a local folder.
- Forms graphs and text-based vector representation for the users involved.
- Fetches Twitter lists for influential users.
- Extracts keywords from Twitter lists and thus annotates these users as experts in these topics.
- Extracts graph-based features using the ARCTE algorithm.
- Performs user classification for the rest of the users.
"""
if number_of_threads is None:
number_of_threads = get_threads_number()
####################################################################################################################
# Read data.
####################################################################################################################
# Read graphs.
edge_list_path = os.path.normpath(snow_tweets_folder + "/graph.tsv")
adjacency_matrix = read_adjacency_matrix(file_path=edge_list_path,
separator='\t')
number_of_nodes = adjacency_matrix.shape[0]
# Read labels.
node_label_list_path = os.path.normpath(snow_tweets_folder + "/user_label_matrix.tsv")
user_label_matrix, number_of_categories, labelled_node_indices = read_node_label_matrix(node_label_list_path,
'\t')
####################################################################################################################
# Extract features.
####################################################################################################################
features = arcte(adjacency_matrix,
restart_probability,
0.00001,
number_of_threads=number_of_threads)
features = normalize_columns(features)
percentages = np.arange(1, 11, dtype=np.int)
trial_num = 10
####################################################################################################################
# Perform user classification.
####################################################################################################################
mean_macro_precision = np.zeros(percentages.size, dtype=np.float)
std_macro_precision = np.zeros(percentages.size, dtype=np.float)
mean_micro_precision = np.zeros(percentages.size, dtype=np.float)
std_micro_precision = np.zeros(percentages.size, dtype=np.float)
mean_macro_recall = np.zeros(percentages.size, dtype=np.float)
std_macro_recall = np.zeros(percentages.size, dtype=np.float)
mean_micro_recall = np.zeros(percentages.size, dtype=np.float)
std_micro_recall = np.zeros(percentages.size, dtype=np.float)
mean_macro_F1 = np.zeros(percentages.size, dtype=np.float)
std_macro_F1 = np.zeros(percentages.size, dtype=np.float)
mean_micro_F1 = np.zeros(percentages.size, dtype=np.float)
std_micro_F1 = np.zeros(percentages.size, dtype=np.float)
F1 = np.zeros((percentages.size, number_of_categories), dtype=np.float)
for p in np.arange(percentages.size):
percentage = percentages[p]
# Initialize the metric storage arrays to zero
macro_precision = np.zeros(trial_num, dtype=np.float)
micro_precision = np.zeros(trial_num, dtype=np.float)
macro_recall = np.zeros(trial_num, dtype=np.float)
micro_recall = np.zeros(trial_num, dtype=np.float)
macro_F1 = np.zeros(trial_num, dtype=np.float)
micro_F1 = np.zeros(trial_num, dtype=np.float)
trial_F1 = np.zeros((trial_num, number_of_categories), dtype=np.float)
folds = generate_folds(user_label_matrix,
labelled_node_indices,
number_of_categories,
percentage,
trial_num)
for trial in np.arange(trial_num):
train, test = next(folds)
########################################################################################################
# Separate train and test sets
########################################################################################################
X_train, X_test, y_train, y_test = features[train, :],\
features[test, :],\
user_label_matrix[train, :],\
user_label_matrix[test, :]
contingency_matrix = chi2_contingency_matrix(X_train, y_train)
community_weights = peak_snr_weight_aggregation(contingency_matrix)
X_train, X_test = community_weighting(X_train, X_test, community_weights)
####################################################################################################
# Train model
####################################################################################################
# Train classifier
model = OneVsRestClassifier(svm.LinearSVC(C=1,
random_state=None,
dual=False,
fit_intercept=True),
n_jobs=number_of_threads)
model.fit(X_train, y_train)
####################################################################################################
# Make predictions
####################################################################################################
y_pred = model.decision_function(X_test)
y_pred = form_node_label_prediction_matrix(y_pred, y_test)
########################################################################################################
# Calculate measures
########################################################################################################
measures = evaluation.calculate_measures(y_pred, y_test)
macro_recall[trial] = measures[0]
micro_recall[trial] = measures[1]
macro_precision[trial] = measures[2]
micro_precision[trial] = measures[3]
macro_F1[trial] = measures[4]
micro_F1[trial] = measures[5]
trial_F1[trial, :] = measures[6]
mean_macro_precision[p] = np.mean(macro_precision)
std_macro_precision[p] = np.std(macro_precision)
mean_micro_precision[p] = np.mean(micro_precision)
std_micro_precision[p] = np.std(micro_precision)
mean_macro_recall[p] = np.mean(macro_recall)
std_macro_recall[p] = np.std(macro_recall)
mean_micro_recall[p] = np.mean(micro_recall)
std_micro_recall[p] = np.std(micro_recall)
mean_macro_F1[p] = np.mean(macro_F1)
std_macro_F1[p] = np.std(macro_F1)
mean_micro_F1[p] = np.mean(micro_F1)
std_micro_F1[p] = np.std(micro_F1)
F1[p, :] = np.mean(trial_F1, axis=0)
measure_list = [(mean_macro_precision, std_macro_precision),
(mean_micro_precision, std_micro_precision),
(mean_macro_recall, std_macro_recall),
(mean_micro_recall, std_micro_recall),
(mean_macro_F1, std_macro_F1),
(mean_micro_F1, std_micro_F1),
F1]
write_results(measure_list,
os.path.normpath(prototype_output_folder + "/F1_average_scores.txt"))
|
{"hexsha": "9c7d90a83fac3f23883725607cdef972146ef892", "size": 10341, "ext": "py", "lang": "Python", "max_stars_repo_path": "reveal_user_classification/entry_points/prototype_user_network_profile_classifier.py", "max_stars_repo_name": "MKLab-ITI/reveal-user-classification", "max_stars_repo_head_hexsha": "4433e265ca692220bda4c499370fa665ec79b364", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2015-04-06T09:42:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-09T00:16:08.000Z", "max_issues_repo_path": "reveal_user_classification/entry_points/prototype_user_network_profile_classifier.py", "max_issues_repo_name": "twishmay/reveal-user-classification", "max_issues_repo_head_hexsha": "4433e265ca692220bda4c499370fa665ec79b364", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reveal_user_classification/entry_points/prototype_user_network_profile_classifier.py", "max_forks_repo_name": "twishmay/reveal-user-classification", "max_forks_repo_head_hexsha": "4433e265ca692220bda4c499370fa665ec79b364", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-07-29T10:46:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-09T00:16:17.000Z", "avg_line_length": 50.443902439, "max_line_length": 120, "alphanum_fraction": 0.5389227347, "include": true, "reason": "import numpy", "num_tokens": 1773}
|
from __future__ import absolute_import
import pytest
import sagemaker
import os
from mock import (
Mock,
PropertyMock,
)
from sagemaker.processing import (
Processor,
ProcessingInput,
ScriptProcessor,
)
from botocore.exceptions import ValidationError
from sagemaker.network import NetworkConfig
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.properties import Properties, PropertyFile
from sagemaker.workflow.parameters import ParameterString, ParameterInteger
from sagemaker.workflow.steps import (
ProcessingStep,
ConfigurableRetryStep,
StepTypeEnum,
CacheConfig,
)
from sagemaker.predictor import Predictor
from sagemaker.model import FrameworkModel
from tests.unit import DATA_DIR
DUMMY_SCRIPT_PATH = os.path.join(DATA_DIR, "dummy_script.py")
REGION = "us-west-2"
BUCKET = "my-bucket"
IMAGE_URI = "fakeimage"
ROLE = "DummyRole"
MODEL_NAME = "gisele"
class CustomStep(ConfigurableRetryStep):
def __init__(self, name, display_name=None, description=None, retry_policies=None):
super(CustomStep, self).__init__(
name, StepTypeEnum.TRAINING, display_name, description, None, retry_policies
)
self._properties = Properties(path=f"Steps.{name}")
@property
def arguments(self):
return dict()
@property
def properties(self):
return self._properties
class DummyFrameworkModel(FrameworkModel):
def __init__(self, sagemaker_session, **kwargs):
super(DummyFrameworkModel, self).__init__(
"s3://bucket/model_1.tar.gz",
"mi-1",
ROLE,
os.path.join(DATA_DIR, "dummy_script.py"),
sagemaker_session=sagemaker_session,
**kwargs,
)
def create_predictor(self, endpoint_name):
return Predictor(endpoint_name, self.sagemaker_session)
@pytest.fixture
def boto_session():
role_mock = Mock()
type(role_mock).arn = PropertyMock(return_value=ROLE)
resource_mock = Mock()
resource_mock.Role.return_value = role_mock
session_mock = Mock(region_name=REGION)
session_mock.resource.return_value = resource_mock
return session_mock
@pytest.fixture
def client():
"""Mock client.
Considerations when appropriate:
* utilize botocore.stub.Stubber
* separate runtime client from client
"""
client_mock = Mock()
client_mock._client_config.user_agent = (
"Boto3/1.14.24 Python/3.8.5 Linux/5.4.0-42-generic Botocore/1.17.24 Resource"
)
return client_mock
@pytest.fixture
def sagemaker_session(boto_session, client):
return sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=client,
sagemaker_runtime_client=client,
default_bucket=BUCKET,
)
@pytest.fixture
def script_processor(sagemaker_session):
return ScriptProcessor(
role=ROLE,
image_uri="012345678901.dkr.ecr.us-west-2.amazonaws.com/my-custom-image-uri",
command=["python3"],
instance_type="ml.m4.xlarge",
instance_count=1,
volume_size_in_gb=100,
volume_kms_key="arn:aws:kms:us-west-2:012345678901:key/volume-kms-key",
output_kms_key="arn:aws:kms:us-west-2:012345678901:key/output-kms-key",
max_runtime_in_seconds=3600,
base_job_name="my_sklearn_processor",
env={"my_env_variable": "my_env_variable_value"},
tags=[{"Key": "my-tag", "Value": "my-tag-value"}],
network_config=NetworkConfig(
subnets=["my_subnet_id"],
security_group_ids=["my_security_group_id"],
enable_network_isolation=True,
encrypt_inter_container_traffic=True,
),
sagemaker_session=sagemaker_session,
)
def test_processing_step_inputs_exceed_max(sagemaker_session):
processing_input_data_uri_parameter = ParameterString(
name="ProcessingInputDataUri", default_value=f"s3://{BUCKET}/processing_manifest"
)
instance_type_parameter = ParameterString(name="InstanceType", default_value="ml.m4.4xlarge")
instance_count_parameter = ParameterInteger(name="InstanceCount", default_value=1)
processor = Processor(
image_uri=IMAGE_URI,
role=ROLE,
instance_count=instance_count_parameter,
instance_type=instance_type_parameter,
sagemaker_session=sagemaker_session,
)
inputs = []
for _ in range(11):
inputs.append(
ProcessingInput(
source=processing_input_data_uri_parameter,
destination="processing_manifest",
)
)
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
evaluation_report = PropertyFile(
name="EvaluationReport", output_name="evaluation", path="evaluation.json"
)
step = ProcessingStep(
name="MyProcessingStep",
description="ProcessingStep description",
display_name="MyProcessingStep",
depends_on=["TestStep", "SecondTestStep"],
processor=processor,
inputs=inputs,
outputs=[],
cache_config=cache_config,
property_files=[evaluation_report],
)
step.add_depends_on(["ThirdTestStep"])
pipeline = Pipeline(
name="MyPipeline",
parameters=[
processing_input_data_uri_parameter,
instance_type_parameter,
instance_count_parameter,
],
steps=[step],
sagemaker_session=sagemaker_session,
)
with pytest.raises(ValidationError) as error:
pipeline.create(role_arn=ROLE)
assert (
str(error.value)
== f"Invalid value ('{len(inputs)}') for param {inputs} of type {type(inputs)} "
)
|
{"hexsha": "5b6dab78ef455009a5297ae372a01e0753ba92e3", "size": 5730, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/sagemaker/workflow/test_pipeline_validation.py", "max_stars_repo_name": "svia3/sagemaker-python-sdk", "max_stars_repo_head_hexsha": "5fec604481338511c5866422675b55439c72bf8f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/unit/sagemaker/workflow/test_pipeline_validation.py", "max_issues_repo_name": "svia3/sagemaker-python-sdk", "max_issues_repo_head_hexsha": "5fec604481338511c5866422675b55439c72bf8f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/unit/sagemaker/workflow/test_pipeline_validation.py", "max_forks_repo_name": "svia3/sagemaker-python-sdk", "max_forks_repo_head_hexsha": "5fec604481338511c5866422675b55439c72bf8f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3846153846, "max_line_length": 97, "alphanum_fraction": 0.6853403141, "include": true, "reason": "import sage,from sage", "num_tokens": 1265}
|
import pyzbar.pyzbar as pyzbar
import cv2
import numpy as np
import math
from shapely import geometry
import logging.config
from LabTable.TableOutputStream import TableOutputStream, TableOutputChannel
from LabTable.ExtentTracker import ExtentTracker
from LabTable.Model.Extent import Extent
from LabTable.Model.Board import Board
# configure logging
logger = logging.getLogger(__name__)
# Objects in greater distance to the board than (1 +- CLIP) * x will be excluded from processing
CLIP = 0.1
# Number of frames for computing background average
MIN_LOOP_NUMBER = 5
MAX_LOOP_NUMBER = 30
# accumulate weighted parameter
INPUT_WEIGHT = 0.5
# Maximum value to use
# with the THRESH_BINARY
MAX_VALUE = 255
# this class manages the extent to detect and reference the extent of
# the board related to the video stream
class BoardDetector:
background = None
last_color_image = None
def __init__(self, config):
self.config = config
# Initialize the board
self.board = Board()
# Array with all polygons of QR-Codes for board corners
self.all_codes_polygons_points = [None, None, None, None]
self.found_codes_number = 0
self.code_found_flag = False
# Get the resolution from config file
self.frame_width = self.config.get("video_resolution", "width")
self.frame_height = self.config.get("video_resolution", "height")
self.current_loop = 0
self.detect_corners_frames_number = 0
# Compute pythagoras value
@staticmethod
def pythagoras(value_x, value_y):
value = math.sqrt(value_x ** 2 + value_y ** 2)
# Return pythagoras value
return value
# Compute distance between two points
@staticmethod
def calculate_distance(value1_x, value1_y, value2_x, value2_y):
value_x = value1_x - value2_x
value_y = value1_y - value2_y
distance = BoardDetector.pythagoras(value_x, value_y)
# Return distance between two points
return distance
# Compute normal vector
@staticmethod
def normal_vector(point1, point2):
distance = BoardDetector.calculate_distance(point1[0], point1[1], point2[0], point2[1])
normal_vector_x = (point1[0] - point2[0]) / distance
normal_vector_y = (point1[1] - point2[1]) / distance
# Return normal vector
return normal_vector_x, normal_vector_y
# Compute position of diagonal board corners, based on position of qr-codes centroids
def set_corners(self, qrcode_points, centroid1, centroid2, centroid_corner_distance):
corner1 = [0, 0]
corner2 = [0, 0]
# TODO: write a function
# Compute vector of length 1 for diagonal of board corners (between two centroids)
board_diagonal_normal_vector = self.normal_vector(centroid1, centroid2)
# Compute vector of length 1 for both diagonals of a qr_code square and choose the right one
# Compute vector of length 1 for the first diagonal
qrcode_diagonal1_normal_vector = self.normal_vector(qrcode_points[0], qrcode_points[2])
# Compute vector of length 1 for the second diagonal
qrcode_diagonal2_normal_vector = self.normal_vector(qrcode_points[1], qrcode_points[3])
# Choose the right diagonal, must have the same sign as the diagonal of given board corners
# Minus * Minus -> Plus
# Plus * Plus -> Plus
# Plus * Minus -> Minus
# If the product is > 0, it is the right diagonal of the qrcode
if board_diagonal_normal_vector[0] * qrcode_diagonal1_normal_vector[0] * \
board_diagonal_normal_vector[1] * qrcode_diagonal1_normal_vector[1] > 0:
diagonal = qrcode_diagonal1_normal_vector
else:
diagonal = qrcode_diagonal2_normal_vector
# Compute vectors of length = centroid_corner_distance for diagonal direction
diagonal_vector_x = diagonal[0] * centroid_corner_distance
diagonal_vector_y = diagonal[1] * centroid_corner_distance
# Ensure that vectors are positive in x and y directions
diagonal_vector_x = self.pythagoras(diagonal_vector_x, 0)
diagonal_vector_y = self.pythagoras(diagonal_vector_y, 0)
# Compute position of corners (distance between corners must be bigger than between centroids)
if centroid1[0] < centroid2[0]:
corner1[0] = centroid1[0] - diagonal_vector_x
corner2[0] = centroid2[0] + diagonal_vector_x
else:
corner1[0] = centroid1[0] + diagonal_vector_x
corner2[0] = centroid2[0] - diagonal_vector_x
if centroid1[1] < centroid2[1]:
corner1[1] = centroid1[1] - diagonal_vector_y
corner2[1] = centroid2[1] + diagonal_vector_y
else:
corner1[1] = centroid1[1] + diagonal_vector_y
corner2[1] = centroid2[1] - diagonal_vector_y
# Cast to int
corner1[0] = int(corner1[0])
corner1[1] = int(corner1[1])
corner2[0] = int(corner2[0])
corner2[1] = int(corner2[1])
# Do not allow for position lower than 0
if corner1[0] < 0:
corner1[0] = 0
if corner1[1] < 0:
corner1[1] = 0
if corner2[0] < 0:
corner2[0] = 0
if corner2[1] < 0:
corner2[1] = 0
return corner1, corner2
# Save four polygons of QR-Codes decoded over couple of frames and read metadata
def read_qr_codes(self, decoded_codes):
# Check all found codes
for code in decoded_codes:
# Decode binary data which is saved in QR-code
code_data = code.data.decode()
# If data in array with top left, top right, bottom right, bottom left
# data is not set yet, add the new found data
if "TL" in code_data and self.all_codes_polygons_points[0] is None:
self.all_codes_polygons_points[0] = code.polygon
logger.debug("detected TL at {}".format(code.polygon))
if "TR" in code_data and self.all_codes_polygons_points[1] is None:
self.all_codes_polygons_points[1] = code.polygon
logger.debug("detected TR at {}".format(code.polygon))
if "BR" in code_data and self.all_codes_polygons_points[2] is None:
self.all_codes_polygons_points[2] = code.polygon
logger.debug("detected BL at {}".format(code.polygon))
if "BL" in code_data and self.all_codes_polygons_points[3] is None:
self.all_codes_polygons_points[3] = code.polygon
logger.debug("detected BR at {}".format(code.polygon))
# Detect the board using four QR-Codes in the board corners
def detect_board(self, color_image, output_stream: TableOutputStream):
# Compute difference between background and the current frame
diff = cv2.absdiff(color_image, self.background.astype("uint8"))
diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
ret_val, diff = cv2.threshold(diff, 0, MAX_VALUE, cv2.THRESH_OTSU)
# Invert image
looking_for_qr_code_image = 255 - diff
# Decode QR or Bar-Codes from both black-white and color image
decoded_codes = pyzbar.decode(looking_for_qr_code_image)
# Mark found QR-codes on the color image and display it in the qr channel
looking_for_qr_code_image = cv2.cvtColor(looking_for_qr_code_image, cv2.COLOR_GRAY2BGR)
self.display_found_codes(looking_for_qr_code_image, decoded_codes)
output_stream.write_to_channel(TableOutputChannel.CHANNEL_QR_DETECTION, looking_for_qr_code_image)
# Read codes which were decoded in this frame:
# save polygons in the array self.board_detector.all_codes_polygons_points and read metadata
self.read_qr_codes(decoded_codes)
top_left_corner = None
top_right_corner = None
bottom_right_corner = None
bottom_left_corner = None
centroids = []
all_board_corners_found = False
centroid_corner_distance = None
# Count found qr-codes
self.board.found_codes_number = sum(code is not None for code in self.all_codes_polygons_points)
# Update the flag
if self.board.found_codes_number > self.found_codes_number:
self.found_codes_number = self.board.found_codes_number
if self.code_found_flag is False:
self.code_found_flag = True
self.detect_corners_frames_number = 0
# Continue if all needed data is available
if self.found_codes_number == 4:
# Iterate through the array with four sets of points for polygons
for points_idx in range(len(self.all_codes_polygons_points)):
# Compute the centroid (middle) of the single code
code_polygon = geometry.Polygon([[point.x, point.y]
for point in self.all_codes_polygons_points[points_idx]])
code_centroid = int(code_polygon.centroid.x), int(code_polygon.centroid.y)
logger.debug("QR-code centroid found: {}".format(code_centroid))
# Compute the distance between the centroid and the first of corners
centroid_corner_distance = BoardDetector.calculate_distance \
(self.all_codes_polygons_points[points_idx][0].x,
self.all_codes_polygons_points[points_idx][0].y,
code_polygon.centroid.x, code_polygon.centroid.y)
# Save all centroids in an array -> [top left, top right, bottom right, bottom left]
centroids.append(code_centroid)
# Compute corners position
if centroid_corner_distance is not None:
# Compute position of the top right and bottom left board corners
top_left_corner, bottom_right_corner = \
self.set_corners(self.all_codes_polygons_points[0],
centroids[0], centroids[2], int(centroid_corner_distance))
logger.debug("TL corner: {}, BR corner: {}".format(top_left_corner, bottom_right_corner))
# Compute position of the top left and bottom right board corners
top_right_corner, bottom_left_corner = \
self.set_corners(self.all_codes_polygons_points[1],
centroids[1], centroids[3], int(centroid_corner_distance))
logger.debug("TR corner: {}, BL corner: {}".format(top_right_corner, bottom_left_corner))
# If all corners are found, save them in the right order
if top_left_corner is not None and top_right_corner is not None and \
bottom_right_corner is not None and bottom_left_corner is not None:
self.board.corners = [top_left_corner, top_right_corner, bottom_right_corner, bottom_left_corner]
self.compute_board_size(self.board.corners)
logger.info("all board corners found: {}".format(self.board.corners))
all_board_corners_found = True
return all_board_corners_found
# Find min and max for x and y position of the board
@staticmethod
def find_min_max(corners):
x = []
y = []
# FIXME: x & y are sometimes switched?!
for corner in corners:
x.append(corner[0])
y.append(corner[1])
# x.append(corner[1])
# y.append(corner[0])
return min(x), min(y), max(x), max(y)
# Wrap the frame perspective to a top-down view (rectangle)
def rectify(self, image, corners):
# Save given corners in a numpy array
source_corners = np.zeros((4, 2), dtype="float32")
source_corners[0] = corners[0]
source_corners[1] = corners[1]
source_corners[2] = corners[2]
source_corners[3] = corners[3]
# Construct destination points which will be used to map the board to a top-down view
destination_corners = np.array([
[0, 0],
[self.board.width - 1, 0],
[self.board.width - 1, self.board.height - 1],
[0, self.board.height - 1]], dtype="float32")
# Calculate the perspective transform matrix
matrix = cv2.getPerspectiveTransform(source_corners, destination_corners)
rectified_image = cv2.warpPerspective(image, matrix, (self.board.width, self.board.height))
return rectified_image
# Compute board size and set in configs
def compute_board_size(self, corners):
min_x, min_y, max_x, max_y = self.find_min_max(corners)
# Compute board size
self.board.width = max_x - min_x
self.board.height = max_y - min_y
ExtentTracker.get_instance().board = Extent.from_rectangle(0, 0, self.board.width, self.board.height)
logger.info('board has been set to {}'.format(ExtentTracker.get_instance().board))
# Display QR-codes location
@staticmethod
def display_found_codes(frame, decoded_objects):
# Loop over all decoded objects
for decoded_object in decoded_objects:
points = decoded_object.polygon
# If the points do not form a quad, find convex hull
if len(points) > 4:
hull = cv2.convexHull(np.array([point for point in points], dtype=np.float32))
hull = list(map(tuple, np.squeeze(hull)))
else:
hull = points
# Number of points in the convex hull
n = len(hull)
# Draw the convex hull
for j in range(0, n):
cv2.line(frame, hull[j], hull[(j + 1) % n], (0, 255, 0), 3)
# Compute region of interest (board area) from the color image
def rectify_image(self, region_of_interest, color_image):
# Check if found QR-code markers positions are included in the frame size
if all([0, 0] < corners < [color_image.shape[1], color_image.shape[0]]
for corners in self.board.corners):
# Eliminate perspective transformations and show only the board
rectified_image = self.rectify(color_image, self.board.corners)
region_of_interest[0:self.board.height, 0:self.board.width] = rectified_image
# return the clipped board
return region_of_interest[0:self.board.height, 0:self.board.width]
# saves the average image over a certain time period returns true if enough iterations were done
# FIXME: as the background currently is only used for qr-code detection we might try it without it
# FIXME: or integrate the qr-code check in the iterative background generation
def compute_background(self, color_image):
# Save background
if self.current_loop == 0:
self.background = color_image.copy().astype("float")
self.last_color_image = self.background
if self.current_loop < MAX_LOOP_NUMBER:
# Update a running average
cv2.accumulateWeighted(color_image, self.background, INPUT_WEIGHT)
self.current_loop += 1
# finish white balance if no more change from the average can be detected
if (self.last_color_image.all() == self.background.all()) and (self.current_loop > MIN_LOOP_NUMBER):
logger.info("found a stable white balance after {} iterations".format(self.current_loop))
return True
else:
logger.info("white balance reached maximum number of iterations ({})".format(MAX_LOOP_NUMBER))
return True
return False
|
{"hexsha": "26cd0d7994c9bee9daaabfe82adb1e08c3cc49fa", "size": 15710, "ext": "py", "lang": "Python", "max_stars_repo_path": "LabTable/BrickDetection/BoardDetector.py", "max_stars_repo_name": "boku-ilen/legoboard", "max_stars_repo_head_hexsha": "ec9bcd6467b83f7bc873639911480d65caf2f813", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "LabTable/BrickDetection/BoardDetector.py", "max_issues_repo_name": "boku-ilen/legoboard", "max_issues_repo_head_hexsha": "ec9bcd6467b83f7bc873639911480d65caf2f813", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LabTable/BrickDetection/BoardDetector.py", "max_forks_repo_name": "boku-ilen/legoboard", "max_forks_repo_head_hexsha": "ec9bcd6467b83f7bc873639911480d65caf2f813", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5608465608, "max_line_length": 112, "alphanum_fraction": 0.6512412476, "include": true, "reason": "import numpy", "num_tokens": 3566}
|
/-
Copyright (c) 2020 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import topology.opens
import ring_theory.ideal.prod
import ring_theory.ideal.over
import linear_algebra.finsupp
import algebra.punit_instances
/-!
# Prime spectrum of a commutative ring
The prime spectrum of a commutative ring is the type of all prime ideals.
It is naturally endowed with a topology: the Zariski topology.
(It is also naturally endowed with a sheaf of rings,
which is constructed in `algebraic_geometry.structure_sheaf`.)
## Main definitions
* `prime_spectrum R`: The prime spectrum of a commutative ring `R`,
i.e., the set of all prime ideals of `R`.
* `zero_locus s`: The zero locus of a subset `s` of `R`
is the subset of `prime_spectrum R` consisting of all prime ideals that contain `s`.
* `vanishing_ideal t`: The vanishing ideal of a subset `t` of `prime_spectrum R`
is the intersection of points in `t` (viewed as prime ideals).
## Conventions
We denote subsets of rings with `s`, `s'`, etc...
whereas we denote subsets of prime spectra with `t`, `t'`, etc...
## Inspiration/contributors
The contents of this file draw inspiration from
<https://github.com/ramonfmir/lean-scheme>
which has contributions from Ramon Fernandez Mir, Kevin Buzzard, Kenny Lau,
and Chris Hughes (on an earlier repository).
-/
noncomputable theory
open_locale classical
universes u v
variables (R : Type u) [comm_ring R]
/-- The prime spectrum of a commutative ring `R`
is the type of all prime ideals of `R`.
It is naturally endowed with a topology (the Zariski topology),
and a sheaf of commutative rings (see `algebraic_geometry.structure_sheaf`).
It is a fundamental building block in algebraic geometry. -/
@[nolint has_inhabited_instance]
def prime_spectrum := {I : ideal R // I.is_prime}
variable {R}
namespace prime_spectrum
/-- A method to view a point in the prime spectrum of a commutative ring
as an ideal of that ring. -/
abbreviation as_ideal (x : prime_spectrum R) : ideal R := x.val
instance is_prime (x : prime_spectrum R) :
x.as_ideal.is_prime := x.2
/--
The prime spectrum of the zero ring is empty.
-/
lemma punit (x : prime_spectrum punit) : false :=
x.1.ne_top_iff_one.1 x.2.1 $ subsingleton.elim (0 : punit) 1 ▸ x.1.zero_mem
section
variables (R) (S : Type v) [comm_ring S]
/-- The prime spectrum of `R × S` is in bijection with the disjoint unions of the prime spectrum of
`R` and the prime spectrum of `S`. -/
noncomputable def prime_spectrum_prod :
prime_spectrum (R × S) ≃ prime_spectrum R ⊕ prime_spectrum S :=
ideal.prime_ideals_equiv R S
variables {R S}
@[simp] lemma prime_spectrum_prod_symm_inl_as_ideal (x : prime_spectrum R) :
((prime_spectrum_prod R S).symm (sum.inl x)).as_ideal = ideal.prod x.as_ideal ⊤ :=
by { cases x, refl }
@[simp] lemma prime_spectrum_prod_symm_inr_as_ideal (x : prime_spectrum S) :
((prime_spectrum_prod R S).symm (sum.inr x)).as_ideal = ideal.prod ⊤ x.as_ideal :=
by { cases x, refl }
end
@[ext] lemma ext {x y : prime_spectrum R} :
x = y ↔ x.as_ideal = y.as_ideal :=
subtype.ext_iff_val
/-- The zero locus of a set `s` of elements of a commutative ring `R`
is the set of all prime ideals of the ring that contain the set `s`.
An element `f` of `R` can be thought of as a dependent function
on the prime spectrum of `R`.
At a point `x` (a prime ideal)
the function (i.e., element) `f` takes values in the quotient ring `R` modulo the prime ideal `x`.
In this manner, `zero_locus s` is exactly the subset of `prime_spectrum R`
where all "functions" in `s` vanish simultaneously.
-/
def zero_locus (s : set R) : set (prime_spectrum R) :=
{x | s ⊆ x.as_ideal}
@[simp] lemma mem_zero_locus (x : prime_spectrum R) (s : set R) :
x ∈ zero_locus s ↔ s ⊆ x.as_ideal := iff.rfl
@[simp] lemma zero_locus_span (s : set R) :
zero_locus (ideal.span s : set R) = zero_locus s :=
by { ext x, exact (submodule.gi R R).gc s x.as_ideal }
/-- The vanishing ideal of a set `t` of points
of the prime spectrum of a commutative ring `R`
is the intersection of all the prime ideals in the set `t`.
An element `f` of `R` can be thought of as a dependent function
on the prime spectrum of `R`.
At a point `x` (a prime ideal)
the function (i.e., element) `f` takes values in the quotient ring `R` modulo the prime ideal `x`.
In this manner, `vanishing_ideal t` is exactly the ideal of `R`
consisting of all "functions" that vanish on all of `t`.
-/
def vanishing_ideal (t : set (prime_spectrum R)) : ideal R :=
⨅ (x : prime_spectrum R) (h : x ∈ t), x.as_ideal
lemma coe_vanishing_ideal (t : set (prime_spectrum R)) :
(vanishing_ideal t : set R) = {f : R | ∀ x : prime_spectrum R, x ∈ t → f ∈ x.as_ideal} :=
begin
ext f,
rw [vanishing_ideal, set_like.mem_coe, submodule.mem_infi],
apply forall_congr, intro x,
rw [submodule.mem_infi],
end
lemma mem_vanishing_ideal (t : set (prime_spectrum R)) (f : R) :
f ∈ vanishing_ideal t ↔ ∀ x : prime_spectrum R, x ∈ t → f ∈ x.as_ideal :=
by rw [← set_like.mem_coe, coe_vanishing_ideal, set.mem_set_of_eq]
@[simp] lemma vanishing_ideal_singleton (x : prime_spectrum R) :
vanishing_ideal ({x} : set (prime_spectrum R)) = x.as_ideal :=
by simp [vanishing_ideal]
lemma subset_zero_locus_iff_le_vanishing_ideal (t : set (prime_spectrum R)) (I : ideal R) :
t ⊆ zero_locus I ↔ I ≤ vanishing_ideal t :=
⟨λ h f k, (mem_vanishing_ideal _ _).mpr (λ x j, (mem_zero_locus _ _).mpr (h j) k), λ h,
λ x j, (mem_zero_locus _ _).mpr (le_trans h (λ f h, ((mem_vanishing_ideal _ _).mp h) x j))⟩
section gc
variable (R)
/-- `zero_locus` and `vanishing_ideal` form a galois connection. -/
lemma gc : @galois_connection
(ideal R) (order_dual (set (prime_spectrum R))) _ _
(λ I, zero_locus I) (λ t, vanishing_ideal t) :=
λ I t, subset_zero_locus_iff_le_vanishing_ideal t I
/-- `zero_locus` and `vanishing_ideal` form a galois connection. -/
lemma gc_set : @galois_connection
(set R) (order_dual (set (prime_spectrum R))) _ _
(λ s, zero_locus s) (λ t, vanishing_ideal t) :=
have ideal_gc : galois_connection (ideal.span) coe := (submodule.gi R R).gc,
by simpa [zero_locus_span, function.comp] using ideal_gc.compose (gc R)
lemma subset_zero_locus_iff_subset_vanishing_ideal (t : set (prime_spectrum R)) (s : set R) :
t ⊆ zero_locus s ↔ s ⊆ vanishing_ideal t :=
(gc_set R) s t
end gc
lemma subset_vanishing_ideal_zero_locus (s : set R) :
s ⊆ vanishing_ideal (zero_locus s) :=
(gc_set R).le_u_l s
lemma le_vanishing_ideal_zero_locus (I : ideal R) :
I ≤ vanishing_ideal (zero_locus I) :=
(gc R).le_u_l I
@[simp] lemma vanishing_ideal_zero_locus_eq_radical (I : ideal R) :
vanishing_ideal (zero_locus (I : set R)) = I.radical := ideal.ext $ λ f,
begin
rw [mem_vanishing_ideal, ideal.radical_eq_Inf, submodule.mem_Inf],
exact ⟨(λ h x hx, h ⟨x, hx.2⟩ hx.1), (λ h x hx, h x.1 ⟨hx, x.2⟩)⟩
end
@[simp] lemma zero_locus_radical (I : ideal R) : zero_locus (I.radical : set R) = zero_locus I :=
vanishing_ideal_zero_locus_eq_radical I ▸ (gc R).l_u_l_eq_l I
lemma subset_zero_locus_vanishing_ideal (t : set (prime_spectrum R)) :
t ⊆ zero_locus (vanishing_ideal t) :=
(gc R).l_u_le t
lemma zero_locus_anti_mono {s t : set R} (h : s ⊆ t) : zero_locus t ⊆ zero_locus s :=
(gc_set R).monotone_l h
lemma zero_locus_anti_mono_ideal {s t : ideal R} (h : s ≤ t) :
zero_locus (t : set R) ⊆ zero_locus (s : set R) :=
(gc R).monotone_l h
lemma vanishing_ideal_anti_mono {s t : set (prime_spectrum R)} (h : s ⊆ t) :
vanishing_ideal t ≤ vanishing_ideal s :=
(gc R).monotone_u h
lemma zero_locus_subset_zero_locus_iff (I J : ideal R) :
zero_locus (I : set R) ⊆ zero_locus (J : set R) ↔ J ≤ I.radical :=
⟨λ h, ideal.radical_le_radical_iff.mp (vanishing_ideal_zero_locus_eq_radical I ▸
vanishing_ideal_zero_locus_eq_radical J ▸ vanishing_ideal_anti_mono h),
λ h, zero_locus_radical I ▸ zero_locus_anti_mono_ideal h⟩
lemma zero_locus_subset_zero_locus_singleton_iff (f g : R) :
zero_locus ({f} : set R) ⊆ zero_locus {g} ↔ g ∈ (ideal.span ({f} : set R)).radical :=
by rw [← zero_locus_span {f}, ← zero_locus_span {g}, zero_locus_subset_zero_locus_iff,
ideal.span_le, set.singleton_subset_iff, set_like.mem_coe]
lemma zero_locus_bot :
zero_locus ((⊥ : ideal R) : set R) = set.univ :=
(gc R).l_bot
@[simp] lemma zero_locus_singleton_zero :
zero_locus ({0} : set R) = set.univ :=
zero_locus_bot
@[simp] lemma zero_locus_empty :
zero_locus (∅ : set R) = set.univ :=
(gc_set R).l_bot
@[simp] lemma vanishing_ideal_univ :
vanishing_ideal (∅ : set (prime_spectrum R)) = ⊤ :=
by simpa using (gc R).u_top
lemma zero_locus_empty_of_one_mem {s : set R} (h : (1:R) ∈ s) :
zero_locus s = ∅ :=
begin
rw set.eq_empty_iff_forall_not_mem,
intros x hx,
rw mem_zero_locus at hx,
have x_prime : x.as_ideal.is_prime := by apply_instance,
have eq_top : x.as_ideal = ⊤, { rw ideal.eq_top_iff_one, exact hx h },
apply x_prime.ne_top eq_top,
end
@[simp] lemma zero_locus_singleton_one :
zero_locus ({1} : set R) = ∅ :=
zero_locus_empty_of_one_mem (set.mem_singleton (1 : R))
lemma zero_locus_empty_iff_eq_top {I : ideal R} :
zero_locus (I : set R) = ∅ ↔ I = ⊤ :=
begin
split,
{ contrapose!,
intro h,
apply set.ne_empty_iff_nonempty.mpr,
rcases ideal.exists_le_maximal I h with ⟨M, hM, hIM⟩,
exact ⟨⟨M, hM.is_prime⟩, hIM⟩ },
{ rintro rfl, apply zero_locus_empty_of_one_mem, trivial }
end
@[simp] lemma zero_locus_univ :
zero_locus (set.univ : set R) = ∅ :=
zero_locus_empty_of_one_mem (set.mem_univ 1)
lemma zero_locus_sup (I J : ideal R) :
zero_locus ((I ⊔ J : ideal R) : set R) = zero_locus I ∩ zero_locus J :=
(gc R).l_sup
lemma zero_locus_union (s s' : set R) :
zero_locus (s ∪ s') = zero_locus s ∩ zero_locus s' :=
(gc_set R).l_sup
lemma vanishing_ideal_union (t t' : set (prime_spectrum R)) :
vanishing_ideal (t ∪ t') = vanishing_ideal t ⊓ vanishing_ideal t' :=
(gc R).u_inf
lemma zero_locus_supr {ι : Sort*} (I : ι → ideal R) :
zero_locus ((⨆ i, I i : ideal R) : set R) = (⋂ i, zero_locus (I i)) :=
(gc R).l_supr
lemma zero_locus_Union {ι : Sort*} (s : ι → set R) :
zero_locus (⋃ i, s i) = (⋂ i, zero_locus (s i)) :=
(gc_set R).l_supr
lemma zero_locus_bUnion (s : set (set R)) :
zero_locus (⋃ s' ∈ s, s' : set R) = ⋂ s' ∈ s, zero_locus s' :=
by simp only [zero_locus_Union]
lemma vanishing_ideal_Union {ι : Sort*} (t : ι → set (prime_spectrum R)) :
vanishing_ideal (⋃ i, t i) = (⨅ i, vanishing_ideal (t i)) :=
(gc R).u_infi
lemma zero_locus_inf (I J : ideal R) :
zero_locus ((I ⊓ J : ideal R) : set R) = zero_locus I ∪ zero_locus J :=
set.ext $ λ x, by simpa using x.2.inf_le
lemma union_zero_locus (s s' : set R) :
zero_locus s ∪ zero_locus s' = zero_locus ((ideal.span s) ⊓ (ideal.span s') : ideal R) :=
by { rw zero_locus_inf, simp }
lemma zero_locus_mul (I J : ideal R) :
zero_locus ((I * J : ideal R) : set R) = zero_locus I ∪ zero_locus J :=
set.ext $ λ x, by simpa using x.2.mul_le
lemma zero_locus_singleton_mul (f g : R) :
zero_locus ({f * g} : set R) = zero_locus {f} ∪ zero_locus {g} :=
set.ext $ λ x, by simpa using x.2.mul_mem_iff_mem_or_mem
@[simp] lemma zero_locus_pow (I : ideal R) {n : ℕ} (hn : 0 < n) :
zero_locus ((I ^ n : ideal R) : set R) = zero_locus I :=
zero_locus_radical (I ^ n) ▸ (I.radical_pow n hn).symm ▸ zero_locus_radical I
@[simp] lemma zero_locus_singleton_pow (f : R) (n : ℕ) (hn : 0 < n) :
zero_locus ({f ^ n} : set R) = zero_locus {f} :=
set.ext $ λ x, by simpa using x.2.pow_mem_iff_mem n hn
lemma sup_vanishing_ideal_le (t t' : set (prime_spectrum R)) :
vanishing_ideal t ⊔ vanishing_ideal t' ≤ vanishing_ideal (t ∩ t') :=
begin
intros r,
rw [submodule.mem_sup, mem_vanishing_ideal],
rintro ⟨f, hf, g, hg, rfl⟩ x ⟨hxt, hxt'⟩,
rw mem_vanishing_ideal at hf hg,
apply submodule.add_mem; solve_by_elim
end
lemma mem_compl_zero_locus_iff_not_mem {f : R} {I : prime_spectrum R} :
I ∈ (zero_locus {f} : set (prime_spectrum R))ᶜ ↔ f ∉ I.as_ideal :=
by rw [set.mem_compl_eq, mem_zero_locus, set.singleton_subset_iff]; refl
/-- The Zariski topology on the prime spectrum of a commutative ring
is defined via the closed sets of the topology:
they are exactly those sets that are the zero locus of a subset of the ring. -/
instance zariski_topology : topological_space (prime_spectrum R) :=
topological_space.of_closed (set.range prime_spectrum.zero_locus)
(⟨set.univ, by simp⟩)
begin
intros Zs h,
rw set.sInter_eq_Inter,
let f : Zs → set R := λ i, classical.some (h i.2),
have hf : ∀ i : Zs, ↑i = zero_locus (f i) := λ i, (classical.some_spec (h i.2)).symm,
simp only [hf],
exact ⟨_, zero_locus_Union _⟩
end
(by { rintro _ _ ⟨s, rfl⟩ ⟨t, rfl⟩, exact ⟨_, (union_zero_locus s t).symm⟩ })
lemma is_open_iff (U : set (prime_spectrum R)) :
is_open U ↔ ∃ s, Uᶜ = zero_locus s :=
by simp only [@eq_comm _ Uᶜ]; refl
lemma is_closed_iff_zero_locus (Z : set (prime_spectrum R)) :
is_closed Z ↔ ∃ s, Z = zero_locus s :=
by rw [← is_open_compl_iff, is_open_iff, compl_compl]
lemma is_closed_zero_locus (s : set R) :
is_closed (zero_locus s) :=
by { rw [is_closed_iff_zero_locus], exact ⟨s, rfl⟩ }
lemma is_closed_singleton_iff_is_maximal (x : prime_spectrum R) :
is_closed ({x} : set (prime_spectrum R)) ↔ x.as_ideal.is_maximal :=
begin
refine (is_closed_iff_zero_locus _).trans ⟨λ h, _, λ h, _⟩,
{ obtain ⟨s, hs⟩ := h,
rw [eq_comm, set.eq_singleton_iff_unique_mem] at hs,
refine ⟨⟨x.2.1, λ I hI, not_not.1 (mt (ideal.exists_le_maximal I) $
not_exists.2 (λ J, not_and.2 $ λ hJ hIJ,_))⟩⟩,
exact ne_of_lt (lt_of_lt_of_le hI hIJ) (symm $ congr_arg prime_spectrum.as_ideal
(hs.2 ⟨J, hJ.is_prime⟩ (λ r hr, hIJ (le_of_lt hI $ hs.1 hr)))) },
{ refine ⟨x.as_ideal.1, _⟩,
rw [eq_comm, set.eq_singleton_iff_unique_mem],
refine ⟨λ _ h, h, λ y hy, prime_spectrum.ext.2 (h.eq_of_le y.2.ne_top hy).symm⟩ }
end
lemma zero_locus_vanishing_ideal_eq_closure (t : set (prime_spectrum R)) :
zero_locus (vanishing_ideal t : set R) = closure t :=
begin
apply set.subset.antisymm,
{ rintro x hx t' ⟨ht', ht⟩,
obtain ⟨fs, rfl⟩ : ∃ s, t' = zero_locus s,
by rwa [is_closed_iff_zero_locus] at ht',
rw [subset_zero_locus_iff_subset_vanishing_ideal] at ht,
exact set.subset.trans ht hx },
{ rw (is_closed_zero_locus _).closure_subset_iff,
exact subset_zero_locus_vanishing_ideal t }
end
lemma vanishing_ideal_closure (t : set (prime_spectrum R)) :
vanishing_ideal (closure t) = vanishing_ideal t :=
zero_locus_vanishing_ideal_eq_closure t ▸ (gc R).u_l_u_eq_u t
lemma t1_space_iff_is_field [is_domain R] :
t1_space (prime_spectrum R) ↔ is_field R :=
begin
refine ⟨_, λ h, _⟩,
{ introI h,
have hbot : ideal.is_prime (⊥ : ideal R) := ideal.bot_prime,
exact not_not.1 (mt (ring.ne_bot_of_is_maximal_of_not_is_field $
(is_closed_singleton_iff_is_maximal _).1 (t1_space.t1 ⟨⊥, hbot⟩)) (not_not.2 rfl)) },
{ refine ⟨λ x, (is_closed_singleton_iff_is_maximal x).2 _⟩,
by_cases hx : x.as_ideal = ⊥,
{ exact hx.symm ▸ @ideal.bot_is_maximal R (@field.to_division_ring _ $ is_field.to_field R h) },
{ exact absurd h (ring.not_is_field_iff_exists_prime.2 ⟨x.as_ideal, ⟨hx, x.2⟩⟩) } }
end
section comap
variables {S : Type v} [comm_ring S] {S' : Type*} [comm_ring S']
lemma preimage_comap_zero_locus_aux (f : R →+* S) (s : set R) :
(λ y, ⟨ideal.comap f y.as_ideal, infer_instance⟩ :
prime_spectrum S → prime_spectrum R) ⁻¹' (zero_locus s) = zero_locus (f '' s) :=
begin
ext x,
simp only [mem_zero_locus, set.image_subset_iff],
refl
end
/-- The function between prime spectra of commutative rings induced by a ring homomorphism.
This function is continuous. -/
def comap (f : R →+* S) : C(prime_spectrum S, prime_spectrum R) :=
{ to_fun := λ y, ⟨ideal.comap f y.as_ideal, infer_instance⟩,
continuous_to_fun :=
begin
simp only [continuous_iff_is_closed, is_closed_iff_zero_locus],
rintro _ ⟨s, rfl⟩,
exact ⟨_, preimage_comap_zero_locus_aux f s⟩
end }
variables (f : R →+* S)
@[simp] lemma comap_as_ideal (y : prime_spectrum S) :
(comap f y).as_ideal = ideal.comap f y.as_ideal :=
rfl
@[simp] lemma comap_id : comap (ring_hom.id R) = continuous_map.id := by { ext, refl }
@[simp] lemma comap_comp (f : R →+* S) (g : S →+* S') :
comap (g.comp f) = (comap f).comp (comap g) :=
rfl
@[simp] lemma preimage_comap_zero_locus (s : set R) :
(comap f) ⁻¹' (zero_locus s) = zero_locus (f '' s) :=
preimage_comap_zero_locus_aux f s
lemma comap_injective_of_surjective (f : R →+* S) (hf : function.surjective f) :
function.injective (comap f) :=
λ x y h, prime_spectrum.ext.2 (ideal.comap_injective_of_surjective f hf
(congr_arg prime_spectrum.as_ideal h : (comap f x).as_ideal = (comap f y).as_ideal))
lemma comap_singleton_is_closed_of_surjective (f : R →+* S) (hf : function.surjective f)
(x : prime_spectrum S) (hx : is_closed ({x} : set (prime_spectrum S))) :
is_closed ({comap f x} : set (prime_spectrum R)) :=
begin
haveI : x.as_ideal.is_maximal := (is_closed_singleton_iff_is_maximal x).1 hx,
exact (is_closed_singleton_iff_is_maximal _).2 (ideal.comap_is_maximal_of_surjective f hf)
end
lemma comap_singleton_is_closed_of_is_integral (f : R →+* S) (hf : f.is_integral)
(x : prime_spectrum S) (hx : is_closed ({x} : set (prime_spectrum S))) :
is_closed ({comap f x} : set (prime_spectrum R)) :=
(is_closed_singleton_iff_is_maximal _).2 (ideal.is_maximal_comap_of_is_integral_of_is_maximal'
f hf x.as_ideal $ (is_closed_singleton_iff_is_maximal x).1 hx)
variable S
lemma localization_comap_inducing [algebra R S] (M : submonoid R)
[is_localization M S] : inducing (comap (algebra_map R S)) :=
begin
constructor,
rw topological_space_eq_iff,
intro U,
simp_rw ← is_closed_compl_iff,
generalize : Uᶜ = Z,
simp_rw [is_closed_induced_iff, is_closed_iff_zero_locus],
split,
{ rintro ⟨s, rfl⟩,
refine ⟨_,⟨(algebra_map R S) ⁻¹' (ideal.span s),rfl⟩,_⟩,
rw [preimage_comap_zero_locus, ← zero_locus_span, ← zero_locus_span s],
congr' 1,
exact congr_arg submodule.carrier (is_localization.map_comap M S (ideal.span s)) },
{ rintro ⟨_, ⟨t, rfl⟩, rfl⟩, simp }
end
lemma localization_comap_injective [algebra R S] (M : submonoid R)
[is_localization M S] : function.injective (comap (algebra_map R S)) :=
begin
intros p q h,
replace h := congr_arg (λ (x : prime_spectrum R), ideal.map (algebra_map R S) x.as_ideal) h,
dsimp only at h,
erw [is_localization.map_comap M S, is_localization.map_comap M S] at h,
ext1,
exact h
end
lemma localization_comap_embedding [algebra R S] (M : submonoid R)
[is_localization M S] : embedding (comap (algebra_map R S)) :=
⟨localization_comap_inducing S M, localization_comap_injective S M⟩
lemma localization_comap_range [algebra R S] (M : submonoid R)
[is_localization M S] :
set.range (comap (algebra_map R S)) = { p | disjoint (M : set R) p.as_ideal } :=
begin
ext x,
split,
{ rintro ⟨p, rfl⟩ x ⟨hx₁, hx₂⟩,
exact (p.2.1 : ¬ _)
(p.as_ideal.eq_top_of_is_unit_mem hx₂ (is_localization.map_units S ⟨x, hx₁⟩)) },
{ intro h,
use ⟨x.as_ideal.map (algebra_map R S),
is_localization.is_prime_of_is_prime_disjoint M S _ x.2 h⟩,
ext1,
exact is_localization.comap_map_of_is_prime_disjoint M S _ x.2 h }
end
end comap
section basic_open
/-- `basic_open r` is the open subset containing all prime ideals not containing `r`. -/
def basic_open (r : R) : topological_space.opens (prime_spectrum R) :=
{ val := { x | r ∉ x.as_ideal },
property := ⟨{r}, set.ext $ λ x, set.singleton_subset_iff.trans $ not_not.symm⟩ }
@[simp] lemma mem_basic_open (f : R) (x : prime_spectrum R) :
x ∈ basic_open f ↔ f ∉ x.as_ideal := iff.rfl
lemma is_open_basic_open {a : R} : is_open ((basic_open a) : set (prime_spectrum R)) :=
(basic_open a).property
@[simp] lemma basic_open_eq_zero_locus_compl (r : R) :
(basic_open r : set (prime_spectrum R)) = (zero_locus {r})ᶜ :=
set.ext $ λ x, by simpa only [set.mem_compl_eq, mem_zero_locus, set.singleton_subset_iff]
@[simp] lemma basic_open_one : basic_open (1 : R) = ⊤ :=
topological_space.opens.ext $ by {simp, refl}
@[simp] lemma basic_open_zero : basic_open (0 : R) = ⊥ :=
topological_space.opens.ext $ by {simp, refl}
lemma basic_open_le_basic_open_iff (f g : R) :
basic_open f ≤ basic_open g ↔ f ∈ (ideal.span ({g} : set R)).radical :=
by rw [topological_space.opens.le_def, basic_open_eq_zero_locus_compl,
basic_open_eq_zero_locus_compl, set.le_eq_subset, set.compl_subset_compl,
zero_locus_subset_zero_locus_singleton_iff]
lemma basic_open_mul (f g : R) : basic_open (f * g) = basic_open f ⊓ basic_open g :=
topological_space.opens.ext $ by {simp [zero_locus_singleton_mul]}
lemma basic_open_mul_le_left (f g : R) : basic_open (f * g) ≤ basic_open f :=
by { rw basic_open_mul f g, exact inf_le_left }
lemma basic_open_mul_le_right (f g : R) : basic_open (f * g) ≤ basic_open g :=
by { rw basic_open_mul f g, exact inf_le_right }
@[simp] lemma basic_open_pow (f : R) (n : ℕ) (hn : 0 < n) : basic_open (f ^ n) = basic_open f :=
topological_space.opens.ext $ by simpa using zero_locus_singleton_pow f n hn
lemma is_topological_basis_basic_opens : topological_space.is_topological_basis
(set.range (λ (r : R), (basic_open r : set (prime_spectrum R)))) :=
begin
apply topological_space.is_topological_basis_of_open_of_nhds,
{ rintros _ ⟨r, rfl⟩,
exact is_open_basic_open },
{ rintros p U hp ⟨s, hs⟩,
rw [← compl_compl U, set.mem_compl_eq, ← hs, mem_zero_locus, set.not_subset] at hp,
obtain ⟨f, hfs, hfp⟩ := hp,
refine ⟨basic_open f, ⟨f, rfl⟩, hfp, _⟩,
rw [← set.compl_subset_compl, ← hs, basic_open_eq_zero_locus_compl, compl_compl],
exact zero_locus_anti_mono (set.singleton_subset_iff.mpr hfs) }
end
lemma is_basis_basic_opens :
topological_space.opens.is_basis (set.range (@basic_open R _)) :=
begin
unfold topological_space.opens.is_basis,
convert is_topological_basis_basic_opens,
rw ← set.range_comp,
end
lemma is_compact_basic_open (f : R) : is_compact (basic_open f : set (prime_spectrum R)) :=
is_compact_of_finite_subfamily_closed $ λ ι Z hZc hZ,
begin
let I : ι → ideal R := λ i, vanishing_ideal (Z i),
have hI : ∀ i, Z i = zero_locus (I i) := λ i,
by simpa only [zero_locus_vanishing_ideal_eq_closure] using (hZc i).closure_eq.symm,
rw [basic_open_eq_zero_locus_compl f, set.inter_comm, ← set.diff_eq,
set.diff_eq_empty, funext hI, ← zero_locus_supr] at hZ,
obtain ⟨n, hn⟩ : f ∈ (⨆ (i : ι), I i).radical,
{ rw ← vanishing_ideal_zero_locus_eq_radical,
apply vanishing_ideal_anti_mono hZ,
exact (subset_vanishing_ideal_zero_locus {f} (set.mem_singleton f)) },
rcases submodule.exists_finset_of_mem_supr I hn with ⟨s, hs⟩,
use s,
-- Using simp_rw here, because `hI` and `zero_locus_supr` need to be applied underneath binders
simp_rw [basic_open_eq_zero_locus_compl f, set.inter_comm, ← set.diff_eq,
set.diff_eq_empty, hI, ← zero_locus_supr],
rw ← zero_locus_radical, -- this one can't be in `simp_rw` because it would loop
apply zero_locus_anti_mono,
rw set.singleton_subset_iff,
exact ⟨n, hs⟩
end
lemma localization_away_comap_range (S : Type v) [comm_ring S] [algebra R S] (r : R)
[is_localization.away r S] : set.range (comap (algebra_map R S)) = basic_open r :=
begin
rw localization_comap_range S (submonoid.powers r),
ext,
simp only [mem_zero_locus, basic_open_eq_zero_locus_compl, set_like.mem_coe, set.mem_set_of_eq,
set.singleton_subset_iff, set.mem_compl_eq],
split,
{ intros h₁ h₂,
exact h₁ ⟨submonoid.mem_powers r, h₂⟩ },
{ rintros h₁ _ ⟨⟨n, rfl⟩, h₃⟩,
exact h₁ (x.2.mem_of_pow_mem _ h₃) },
end
lemma localization_away_open_embedding (S : Type v) [comm_ring S] [algebra R S] (r : R)
[is_localization.away r S] : open_embedding (comap (algebra_map R S)) :=
{ to_embedding := localization_comap_embedding S (submonoid.powers r),
open_range := by { rw localization_away_comap_range S r, exact is_open_basic_open } }
end basic_open
/-- The prime spectrum of a commutative ring is a compact topological space. -/
instance : compact_space (prime_spectrum R) :=
{ compact_univ := by { convert is_compact_basic_open (1 : R), rw basic_open_one, refl } }
section order
/-!
## The specialization order
We endow `prime_spectrum R` with a partial order,
where `x ≤ y` if and only if `y ∈ closure {x}`.
TODO: maybe define sober topological spaces, and generalise this instance to those
-/
instance : partial_order (prime_spectrum R) :=
subtype.partial_order _
@[simp] lemma as_ideal_le_as_ideal (x y : prime_spectrum R) :
x.as_ideal ≤ y.as_ideal ↔ x ≤ y :=
subtype.coe_le_coe
@[simp] lemma as_ideal_lt_as_ideal (x y : prime_spectrum R) :
x.as_ideal < y.as_ideal ↔ x < y :=
subtype.coe_lt_coe
lemma le_iff_mem_closure (x y : prime_spectrum R) :
x ≤ y ↔ y ∈ closure ({x} : set (prime_spectrum R)) :=
by rw [← as_ideal_le_as_ideal, ← zero_locus_vanishing_ideal_eq_closure,
mem_zero_locus, vanishing_ideal_singleton, set_like.coe_subset_coe]
end order
end prime_spectrum
namespace local_ring
variables (R) [local_ring R]
/--
The closed point in the prime spectrum of a local ring.
-/
def closed_point : prime_spectrum R :=
⟨maximal_ideal R, (maximal_ideal.is_maximal R).is_prime⟩
variable {R}
lemma local_hom_iff_comap_closed_point {S : Type v} [comm_ring S] [local_ring S]
{f : R →+* S} : is_local_ring_hom f ↔ prime_spectrum.comap f (closed_point S) = closed_point R :=
by { rw [(local_hom_tfae f).out 0 4, subtype.ext_iff], refl }
end local_ring
|
{"author": "jjaassoonn", "repo": "projective_space", "sha": "11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce", "save_path": "github-repos/lean/jjaassoonn-projective_space", "path": "github-repos/lean/jjaassoonn-projective_space/projective_space-11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce/src/algebraic_geometry/prime_spectrum/basic.lean"}
|
module JungleHelperSwingCassetteBlock
using ..Ahorn, Maple
@mapdef Entity "JungleHelper/SwingCassetteBlock" SwingCassetteBlock(x::Integer, y::Integer, width::Integer=Maple.defaultBlockWidth, height::Integer=Maple.defaultBlockHeight, index::Integer=0, tempo::Number=1.0)
const colorNames = Dict{String, Int}(
"Blue" => 0,
"Rose" => 1,
"Bright Sun" => 2,
"Malachite" => 3
)
const placements = Ahorn.PlacementDict(
"Swing Cassette Block ($index - $color) (Jungle Helper)" => Ahorn.EntityPlacement(
SwingCassetteBlock,
"rectangle",
Dict{String, Any}(
"index" => index,
)
) for (color, index) in colorNames
)
Ahorn.editingOptions(entity::SwingCassetteBlock) = Dict{String, Any}(
"index" => colorNames
)
Ahorn.minimumSize(entity::SwingCassetteBlock) = 16, 16
Ahorn.resizable(entity::SwingCassetteBlock) = true, true
Ahorn.selection(entity::SwingCassetteBlock) = Ahorn.getEntityRectangle(entity)
const colors = Dict{Int, Ahorn.colorTupleType}(
1 => (240, 73, 190, 255) ./ 255,
2 => (252, 220, 58, 255) ./ 255,
3 => (56, 224, 78, 255) ./ 255,
)
const defaultColor = (73, 170, 240, 255) ./ 255
const borderMultiplier = (0.9, 0.9, 0.9, 1)
const frame = "objects/cassetteblock/solid"
function getCassetteBlockRectangles(room::Maple.Room)
entities = filter(e -> e.name == "cassetteBlock" || e.name == "JungleHelper/SwingCassetteBlock", room.entities)
rects = Dict{Int, Array{Ahorn.Rectangle, 1}}()
for e in entities
index = get(e.data, "index", 0)
rectList = get!(rects, index) do
Ahorn.Rectangle[]
end
push!(rectList, Ahorn.Rectangle(
Int(get(e.data, "x", 0)),
Int(get(e.data, "y", 0)),
Int(get(e.data, "width", 8)),
Int(get(e.data, "height", 8))
))
end
return rects
end
# Is there a casette block we should connect to at the offset?
function notAdjacent(entity::SwingCassetteBlock, ox, oy, rects)
x, y = Ahorn.position(entity)
rect = Ahorn.Rectangle(x + ox + 4, y + oy + 4, 1, 1)
for r in rects
if Ahorn.checkCollision(r, rect)
return false
end
end
return true
end
function drawCassetteBlock(ctx::Ahorn.Cairo.CairoContext, entity::SwingCassetteBlock, room::Maple.Room)
cassetteBlockRectangles = getCassetteBlockRectangles(room)
x, y = Ahorn.position(entity)
width = Int(get(entity.data, "width", 32))
height = Int(get(entity.data, "height", 32))
tileWidth = ceil(Int, width / 8)
tileHeight = ceil(Int, height / 8)
index = Int(get(entity.data, "index", 0))
color = get(colors, index, defaultColor)
rect = Ahorn.Rectangle(x, y, width, height)
rects = get(cassetteBlockRectangles, index, Ahorn.Rectangle[])
if !(rect in rects)
push!(rects, rect)
end
for x in 1:tileWidth, y in 1:tileHeight
drawX, drawY = (x - 1) * 8, (y - 1) * 8
closedLeft = !notAdjacent(entity, drawX - 8, drawY, rects)
closedRight = !notAdjacent(entity, drawX + 8, drawY, rects)
closedUp = !notAdjacent(entity, drawX, drawY - 8, rects)
closedDown = !notAdjacent(entity, drawX, drawY + 8, rects)
completelyClosed = closedLeft && closedRight && closedUp && closedDown
if completelyClosed
if notAdjacent(entity, drawX + 8, drawY - 8, rects)
Ahorn.drawImage(ctx, frame, drawX, drawY, 24, 0, 8, 8, tint=color)
elseif notAdjacent(entity, drawX - 8, drawY - 8, rects)
Ahorn.drawImage(ctx, frame, drawX, drawY, 24, 8, 8, 8, tint=color)
elseif notAdjacent(entity, drawX + 8, drawY + 8, rects)
Ahorn.drawImage(ctx, frame, drawX, drawY, 24, 16, 8, 8, tint=color)
elseif notAdjacent(entity, drawX - 8, drawY + 8, rects)
Ahorn.drawImage(ctx, frame, drawX, drawY, 24, 24, 8, 8, tint=color)
else
Ahorn.drawImage(ctx, frame, drawX, drawY, 8, 8, 8, 8, tint=color)
end
else
if closedLeft && closedRight && !closedUp && closedDown
Ahorn.drawImage(ctx, frame, drawX, drawY, 8, 0, 8, 8, tint=color)
elseif closedLeft && closedRight && closedUp && !closedDown
Ahorn.drawImage(ctx, frame, drawX, drawY, 8, 16, 8, 8, tint=color)
elseif closedLeft && !closedRight && closedUp && closedDown
Ahorn.drawImage(ctx, frame, drawX, drawY, 16, 8, 8, 8, tint=color)
elseif !closedLeft && closedRight && closedUp && closedDown
Ahorn.drawImage(ctx, frame, drawX, drawY, 0, 8, 8, 8, tint=color)
elseif closedLeft && !closedRight && !closedUp && closedDown
Ahorn.drawImage(ctx, frame, drawX, drawY, 16, 0, 8, 8, tint=color)
elseif !closedLeft && closedRight && !closedUp && closedDown
Ahorn.drawImage(ctx, frame, drawX, drawY, 0, 0, 8, 8, tint=color)
elseif !closedLeft && closedRight && closedUp && !closedDown
Ahorn.drawImage(ctx, frame, drawX, drawY, 0, 16, 8, 8, tint=color)
elseif closedLeft && !closedRight && closedUp && !closedDown
Ahorn.drawImage(ctx, frame, drawX, drawY, 16, 16, 8, 8, tint=color)
end
end
end
end
Ahorn.render(ctx::Ahorn.Cairo.CairoContext, entity::SwingCassetteBlock, room::Maple.Room) = drawCassetteBlock(ctx, entity, room)
end
|
{"hexsha": "c226034f4912c5088af4d5d65fabe63b9a177a52", "size": 5662, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Ahorn/entities/swingCassetteBlock.jl", "max_stars_repo_name": "Jackal-Celeste/JungleHelper", "max_stars_repo_head_hexsha": "c98f58b78c2c1855556d556b86e0959199ab2db7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-13T22:20:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-08T14:57:08.000Z", "max_issues_repo_path": "Ahorn/entities/swingCassetteBlock.jl", "max_issues_repo_name": "Jackal-Celeste/JungleHelper", "max_issues_repo_head_hexsha": "c98f58b78c2c1855556d556b86e0959199ab2db7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-06-06T16:50:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-16T19:37:47.000Z", "max_forks_repo_path": "Ahorn/entities/swingCassetteBlock.jl", "max_forks_repo_name": "Jackal-Celeste/JungleHelper", "max_forks_repo_head_hexsha": "c98f58b78c2c1855556d556b86e0959199ab2db7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-10T20:38:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-11T17:07:42.000Z", "avg_line_length": 35.6100628931, "max_line_length": 211, "alphanum_fraction": 0.5943129636, "num_tokens": 1660}
|
[STATEMENT]
lemma Spy_see_priK [simp]:
"evs \<in> zg ==> (Key (priK A) \<in> parts (spies evs)) = (A \<in> bad)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. evs \<in> zg \<Longrightarrow> (Key (priEK A) \<in> parts (knows Spy evs)) = (A \<in> bad)
[PROOF STEP]
apply (erule zg.induct)
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. (Key (priEK A) \<in> parts (knows Spy [])) = (A \<in> bad)
2. \<And>evsf X B. \<lbrakk>evsf \<in> zg; (Key (priEK A) \<in> parts (knows Spy evsf)) = (A \<in> bad); X \<in> synth (analz (knows Spy evsf))\<rbrakk> \<Longrightarrow> (Key (priEK A) \<in> parts (knows Spy (Says Spy B X # evsf))) = (A \<in> bad)
3. \<And>evsr Aa B X. \<lbrakk>evsr \<in> zg; (Key (priEK A) \<in> parts (knows Spy evsr)) = (A \<in> bad); Says Aa B X \<in> set evsr\<rbrakk> \<Longrightarrow> (Key (priEK A) \<in> parts (knows Spy (Gets B X # evsr))) = (A \<in> bad)
4. \<And>evs1 L C K m NRO Aa B. \<lbrakk>evs1 \<in> zg; (Key (priEK A) \<in> parts (knows Spy evs1)) = (A \<in> bad); Nonce L \<notin> used evs1; C = Crypt K (Number m); K \<in> symKeys; NRO = Crypt (priEK Aa) \<lbrace>Number f_nro, Agent B, Nonce L, C\<rbrace>\<rbrakk> \<Longrightarrow> (Key (priEK A) \<in> parts (knows Spy (Says Aa B \<lbrace>Number f_nro, Agent B, Nonce L, C, NRO\<rbrace> # evs1))) = (A \<in> bad)
5. \<And>evs2 B L C NRO Aa NRR. \<lbrakk>evs2 \<in> zg; (Key (priEK A) \<in> parts (knows Spy evs2)) = (A \<in> bad); Gets B \<lbrace>Number f_nro, Agent B, Nonce L, C, NRO\<rbrace> \<in> set evs2; NRO = Crypt (priEK Aa) \<lbrace>Number f_nro, Agent B, Nonce L, C\<rbrace>; NRR = Crypt (priEK B) \<lbrace>Number f_nrr, Agent Aa, Nonce L, C\<rbrace>\<rbrakk> \<Longrightarrow> (Key (priEK A) \<in> parts (knows Spy (Says B Aa \<lbrace>Number f_nrr, Agent Aa, Nonce L, NRR\<rbrace> # evs2))) = (A \<in> bad)
6. \<And>evs3 C K M Aa B L NRO NRR sub_K. \<lbrakk>evs3 \<in> zg; (Key (priEK A) \<in> parts (knows Spy evs3)) = (A \<in> bad); C = Crypt K M; K \<in> symKeys; Says Aa B \<lbrace>Number f_nro, Agent B, Nonce L, C, NRO\<rbrace> \<in> set evs3; Gets Aa \<lbrace>Number f_nrr, Agent Aa, Nonce L, NRR\<rbrace> \<in> set evs3; NRR = Crypt (priEK B) \<lbrace>Number f_nrr, Agent Aa, Nonce L, C\<rbrace>; sub_K = Crypt (priEK Aa) \<lbrace>Number f_sub, Agent B, Nonce L, Key K\<rbrace>\<rbrakk> \<Longrightarrow> (Key (priEK A) \<in> parts (knows Spy (Says Aa TTP \<lbrace>Number f_sub, Agent B, Nonce L, Key K, sub_K\<rbrace> # evs3))) = (A \<in> bad)
7. \<And>evs4 K B L sub_K Aa con_K. \<lbrakk>evs4 \<in> zg; (Key (priEK A) \<in> parts (knows Spy evs4)) = (A \<in> bad); K \<in> symKeys; Gets TTP \<lbrace>Number f_sub, Agent B, Nonce L, Key K, sub_K\<rbrace> \<in> set evs4; sub_K = Crypt (priEK Aa) \<lbrace>Number f_sub, Agent B, Nonce L, Key K\<rbrace>; con_K = Crypt (priEK TTP) \<lbrace>Number f_con, Agent Aa, Agent B, Nonce L, Key K\<rbrace>\<rbrakk> \<Longrightarrow> (Key (priEK A) \<in> parts (knows Spy (Says TTP Spy con_K # Notes TTP \<lbrace>Number f_con, Agent Aa, Agent B, Nonce L, Key K, con_K\<rbrace> # evs4))) = (A \<in> bad)
[PROOF STEP]
apply (frule_tac [5] ZG2_msg_in_parts_spies, auto)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 1485, "file": null, "length": 3}
|
"""
Tests on matrix multiply. As the underlying code is now playing with
the TRANSA TRANSB parameters to minimize copying, several tests are
needed to make sure that all cases are handled correctly as its logic
is rather complex.
"""
from __future__ import print_function
from unittest import TestCase, skipIf, skip
import numpy as np
from numpy.testing import run_module_suite, assert_allclose
from pkg_resources import parse_version
import gulinalg
M = 75
N = 50
K = 100
# This class tests the cases that code can handle without copy-rearranging of
# any of the input/output arguments.
class TestNoCopy(TestCase):
# no output specified (operation allocated) ================================
def test_matrix_multiply_cc(self):
"""matrix multiply two C layout matrices"""
a = np.ascontiguousarray(np.random.randn(M,N))
b = np.ascontiguousarray(np.random.randn(N,K))
res = gulinalg.matrix_multiply(a,b)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_cc_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = gulinalg.matrix_multiply(a, b, workers=workers)
assert_allclose(res, ref)
def test_matrix_multiply_cf(self):
"""matrix multiply C layout by FORTRAN layout matrices"""
a = np.ascontiguousarray(np.random.randn(M,N))
b = np.asfortranarray(np.random.randn(N,K))
res = gulinalg.matrix_multiply(a,b)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_cf_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))
b = np.asfortranarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = gulinalg.matrix_multiply(a, b, workers=workers)
assert_allclose(res, ref)
def test_matrix_multiply_fc(self):
"""matrix multiply FORTRAN layout by C layout matrices"""
a = np.asfortranarray(np.random.randn(M,N))
b = np.ascontiguousarray(np.random.randn(N,K))
res = gulinalg.matrix_multiply(a,b)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_fc_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.asfortranarray(np.stack((a,) * n_outer, axis=0))
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = gulinalg.matrix_multiply(a, b, workers=workers)
assert_allclose(res, ref)
def test_matrix_multiply_ff(self):
"""matrix multiply two FORTRAN layout matrices"""
a = np.asfortranarray(np.random.randn(M,N))
b = np.asfortranarray(np.random.randn(N,K))
res = gulinalg.matrix_multiply(a,b)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_fc_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.asfortranarray(np.stack((a,) * n_outer, axis=0))
b = np.asfortranarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = gulinalg.matrix_multiply(a, b, workers=workers)
assert_allclose(res, ref)
# C explicit outputs =======================================================
def test_matrix_multiply_cc_c(self):
"""matrix multiply two C layout matrices, explicit C array output"""
a = np.ascontiguousarray(np.random.randn(M,N))
b = np.ascontiguousarray(np.random.randn(N,K))
res = np.zeros((M,K), order='C')
gulinalg.matrix_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_cc_c_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='C')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_matrix_multiply_cf_c(self):
"""matrix multiply C layout by FORTRAN layout matrices, explicit C array output"""
a = np.ascontiguousarray(np.random.randn(M,N))
b = np.asfortranarray(np.random.randn(N,K))
res = np.zeros((M,K), order='C')
gulinalg.matrix_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_cf_c_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))
b = np.asfortranarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='C')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_matrix_multiply_fc_c(self):
"""matrix multiply FORTRAN layout by C layout matrices, explicit C array output"""
a = np.asfortranarray(np.random.randn(M,N))
b = np.ascontiguousarray(np.random.randn(N,K))
res = np.zeros((M,K), order='C')
gulinalg.matrix_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_fc_c_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.asfortranarray(np.stack((a,) * n_outer, axis=0))
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='C')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_matrix_multiply_ff_c(self):
"""matrix multiply two FORTRAN layout matrices, explicit C array output"""
a = np.asfortranarray(np.random.randn(M,N))
b = np.asfortranarray(np.random.randn(N,K))
res = np.zeros((M,K), order='C')
gulinalg.matrix_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_ff_c_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.asfortranarray(np.stack((a,) * n_outer, axis=0))
b = np.asfortranarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='C')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
# FORTRAN explicit outputs =================================================
def test_matrix_multiply_cc_f(self):
"""matrix multiply two C layout matrices, explicit FORTRAN array output"""
a = np.ascontiguousarray(np.random.randn(M,N))
b = np.ascontiguousarray(np.random.randn(N,K))
res = np.zeros((M,K), order='F')
gulinalg.matrix_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_cc_f_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='F')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_matrix_multiply_cf_f(self):
"""matrix multiply C layout by FORTRAN layout matrices, explicit FORTRAN array output"""
a = np.ascontiguousarray(np.random.randn(M,N))
b = np.asfortranarray(np.random.randn(N,K))
res = np.zeros((M,K), order='F')
gulinalg.matrix_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_cf_f_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))
b = np.asfortranarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='F')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_matrix_multiply_fc_f(self):
"""matrix multiply FORTRAN layout by C layout matrices, explicit FORTRAN array output"""
a = np.asfortranarray(np.random.randn(M,N))
b = np.ascontiguousarray(np.random.randn(N,K))
res = np.zeros((M,K), order='F')
gulinalg.matrix_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_fc_f_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.asfortranarray(np.stack((a,) * n_outer, axis=0))
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='F')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_matrix_multiply_ff_f(self):
"""matrix multiply two FORTRAN layout matrices, explicit FORTRAN array output"""
a = np.asfortranarray(np.random.randn(M,N))
b = np.asfortranarray(np.random.randn(N,K))
res = np.zeros((M,K), order='F')
gulinalg.matrix_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matrix_multiply_ff_f_batch(self):
"""matrix multiply two C layout matrices"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a,b)
a = np.asfortranarray(np.stack((a,) * n_outer, axis=0))
b = np.asfortranarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='F')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
# this class test the cases where there is at least one operand/output that
# requires copy/rearranging.
class TestWithCopy(TestCase):
# No output specified (operation allocated) ================================
def test_input_non_contiguous_1(self):
"""first input not contiguous"""
a = np.ascontiguousarray(np.random.randn(M,N,2))[:,:,0]
b = np.ascontiguousarray(np.random.randn(N,K))
res = np.zeros((M,K), order='C')
assert not a.flags.c_contiguous and not a.flags.f_contiguous
gulinalg.matrix_multiply(a, b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_input_non_contiguous_1_batch(self):
"""first input not contiguous"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N, 2)
b = np.random.randn(N, K)
ref = np.dot(a[..., 0], b)
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))[..., 0]
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))
assert not a.flags.c_contiguous and not a.flags.f_contiguous
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='C')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_input_non_contiguous_2(self):
"""second input not contiguous"""
a = np.ascontiguousarray(np.random.randn(M,N))
b = np.ascontiguousarray(np.random.randn(N,K,2))[:,:,0]
res = np.zeros((M,K), order='C')
assert not b.flags.c_contiguous and not b.flags.f_contiguous
gulinalg.matrix_multiply(a, b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_input_non_contiguous_2_batch(self):
"""second input not contiguous"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K, 2)
ref = np.dot(a, b[..., 0])
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))[..., 0]
assert not b.flags.c_contiguous and not b.flags.f_contiguous
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='C')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_input_non_contiguous_3(self):
"""neither input contiguous"""
a = np.ascontiguousarray(np.random.randn(M,N,2))[:,:,0]
b = np.ascontiguousarray(np.random.randn(N,K,2))[:,:,0]
res = np.zeros((M,K), order='C')
assert not a.flags.c_contiguous and not a.flags.f_contiguous
assert not b.flags.c_contiguous and not b.flags.f_contiguous
gulinalg.matrix_multiply(a, b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_input_non_contiguous_3_batch(self):
"""neither input contiguous"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N, 2)
b = np.random.randn(N, K, 2)
ref = np.dot(a[..., 0], b[..., 0])
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))[..., 0]
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))[..., 0]
assert not a.flags.c_contiguous and not a.flags.f_contiguous
assert not b.flags.c_contiguous and not b.flags.f_contiguous
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K), order='C')
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_output_non_contiguous(self):
"""output not contiguous"""
a = np.ascontiguousarray(np.random.randn(M,N))
b = np.ascontiguousarray(np.random.randn(N,K))
res = np.zeros((M,K,2), order='C')[:,:,0]
assert not res.flags.c_contiguous and not res.flags.f_contiguous
gulinalg.matrix_multiply(a, b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_output_non_contiguous_batch(self):
"""output not contiguous"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N)
b = np.random.randn(N, K)
ref = np.dot(a, b)
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K, 2), order='C')[..., 0]
assert not res.flags.c_contiguous and not res.flags.f_contiguous
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_all_non_contiguous(self):
"""neither input nor output contiguous"""
a = np.ascontiguousarray(np.random.randn(M,N,2))[:,:,0]
b = np.ascontiguousarray(np.random.randn(N,K,2))[:,:,0]
res = np.zeros((M,K,2), order='C')[:,:,0]
assert not a.flags.c_contiguous and not a.flags.f_contiguous
assert not b.flags.c_contiguous and not b.flags.f_contiguous
assert not res.flags.c_contiguous and not res.flags.f_contiguous
gulinalg.matrix_multiply(a, b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_all_non_contiguous_batch(self):
"""neither input nor output contiguous"""
n_outer = 4
for workers in [1, -1]:
a = np.random.randn(M, N, 2)
b = np.random.randn(N, K, 2)
ref = np.dot(a[..., 0], b[..., 0])
a = np.ascontiguousarray(np.stack((a,) * n_outer, axis=0))[..., 0]
b = np.ascontiguousarray(np.stack((b,) * n_outer, axis=0))[..., 0]
ref = np.stack((ref,) * n_outer, axis=0)
res = np.zeros((n_outer, M, K, 2), order='C')[..., 0]
assert not a.flags.c_contiguous and not a.flags.f_contiguous
assert not b.flags.c_contiguous and not b.flags.f_contiguous
assert not res.flags.c_contiguous and not res.flags.f_contiguous
gulinalg.matrix_multiply(a, b, workers=workers, out=res)
assert_allclose(res, ref)
def test_stride_tricks(self):
"""test that matrices that are contiguous but have their dimension
overlapped *copy*, as BLAS does not support them"""
a = np.ascontiguousarray(np.random.randn(M + N))
a = np.lib.stride_tricks.as_strided(a, shape=(M,N),
strides=(a.itemsize, a.itemsize))
b = np.ascontiguousarray(np.random.randn(N,K))
res = gulinalg.matrix_multiply(a,b)
ref = np.dot(a,b)
assert_allclose(res, ref)
# Some simple tests showing that the gufunc stuff works
class TestVector(TestCase):
def test_vector(self):
"""test vectorized matrix multiply"""
a = np.ascontiguousarray(np.random.randn(10, M, N))
b = np.ascontiguousarray(np.random.randn(10, N, K))
res = gulinalg.matrix_multiply(a,b)
assert res.shape == (10, M, K)
ref = np.stack([np.dot(a[i], b[i]) for i in range(len(a))])
assert_allclose(res, ref)
def test_broadcast(self):
"""test broadcast matrix multiply"""
a = np.ascontiguousarray(np.random.randn(M, N))
b = np.ascontiguousarray(np.random.randn(10, N, K))
res = gulinalg.matrix_multiply(a,b)
assert res.shape == (10, M, K)
ref = np.stack([np.dot(a, b[i]) for i in range(len(b))])
assert_allclose(res, ref)
def test_vectorized_matvec(self):
"""test vectorized matrix multiply"""
a = np.ascontiguousarray(np.random.randn(10, M, N))
b = np.ascontiguousarray(np.random.randn(10, N))
res = gulinalg.matvec_multiply(a,b)
assert res.shape == (10, M)
ref = np.stack([np.dot(a[i], b[i]) for i in range(len(a))])
assert_allclose(res, ref)
def test_broadcast_matvec(self):
"""test broadcast matrix multiply"""
a = np.ascontiguousarray(np.random.randn(M, N))
b = np.ascontiguousarray(np.random.randn(10, N))
res = gulinalg.matvec_multiply(a,b)
assert res.shape == (10, M)
ref = np.stack([np.dot(a, b[i]) for i in range(len(b))])
assert_allclose(res, ref)
class TestMatvecNoCopy(TestCase):
def test_matvec_multiply_cf_c(self):
"""matrix multiply C layout by FORTRAN layout vector, explicit C array output"""
a = np.ascontiguousarray(np.random.randint(100, size=(M,N)))
b = np.asfortranarray(np.random.randint(N, size=(N)))
res = np.zeros(M, order='C')
gulinalg.matvec_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
def test_matvec_multiply_fc_f(self):
"""matrix multiply FORTRAN layout by C layout vector, explicit FORTRAN array output"""
a = np.asfortranarray(np.random.randint(100, size=(M,N)))
b = np.ascontiguousarray(np.random.randint(N, size=(N)))
res = np.zeros(M, order='F')
gulinalg.matvec_multiply(a,b, out=res)
ref = np.dot(a,b)
assert_allclose(res, ref)
if __name__ == '__main__':
run_module_suite()
|
{"hexsha": "26949ac7866bc3a72d663493cae76ba3d58e59f2", "size": 21653, "ext": "py", "lang": "Python", "max_stars_repo_path": "gulinalg/tests/test_matrix_multiply.py", "max_stars_repo_name": "grlee77/gulinalg", "max_stars_repo_head_hexsha": "21c62eaa7d3777a3bf7fa58c66d084af7c4d5579", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-16T21:07:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-16T21:07:26.000Z", "max_issues_repo_path": "gulinalg/tests/test_matrix_multiply.py", "max_issues_repo_name": "grlee77/gulinalg", "max_issues_repo_head_hexsha": "21c62eaa7d3777a3bf7fa58c66d084af7c4d5579", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-11-09T16:34:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-21T15:43:40.000Z", "max_forks_repo_path": "gulinalg/tests/test_matrix_multiply.py", "max_forks_repo_name": "grlee77/gulinalg", "max_forks_repo_head_hexsha": "21c62eaa7d3777a3bf7fa58c66d084af7c4d5579", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-13T19:48:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-17T14:52:11.000Z", "avg_line_length": 42.7924901186, "max_line_length": 96, "alphanum_fraction": 0.5819055096, "include": true, "reason": "import numpy,from numpy", "num_tokens": 5501}
|
#!/usr/bin/env python
#
# Copyright (c) 2014 10X Genomics, Inc. All rights reserved.
#
import os
# 10X chemistry types
GEMCODE = 'GemCode'
CHROMIUM = 'Chromium'
# Mass of 1 bp in nanograms
NG_PER_BP = 1.1454e-12
# Where the code sits
CODE_PATH=os.path.dirname(os.path.abspath(__file__)) + '/'
# Where barcode whitelists live
ALARMS_LOCATION = CODE_PATH + 'alarms/'
# Code subfolders
TEST_FILE_IN_DIR = CODE_PATH + 'test_files/inputs/'
TEST_FILE_OUT_DIR = CODE_PATH + 'test_files/outputs/'
# What is considered a high confidence mapped read pair
HIGH_CONF_MAPQ = 60
# Distance to mate to ensure the
MIN_MATE_OFFSET_DUP_FILTER = 20
# Sequencing settings
ILLUMINA_MAX_QUAL = 50 # higher than possible right now
ILLUMINA_QUAL_OFFSET = 33
DEFAULT_HIGH_MAPQ = 60
# Demultiplex settings
DEMULTIPLEX_DEFAULT_SAMPLE_INDEX_LENGTH = 8
DEMULTIPLEX_BARCODE_LENGTH = 14
DEMULTIPLEX_INVALID_SAMPLE_INDEX = 'X'
# Tail trim fraction when computing depth CV
COVERAGE_TRIM_TAIL = 0.01
# Subsampling coverages for dup rate
DUPLICATE_SUBSAMPLE_COVERAGES = [16.0, 8.0, 4.0, 2.0, 1.0, 0.5, 0.25, 0.125]
# Phasing confidence required to call a fragment as phased
FRAGMENT_PHASING_THRESHOLD = 0.995
# TAG names
RAW_BARCODE_TAG = 'RX'
PROCESSED_BARCODE_TAG = 'BX'
RAW_BARCODE_QUAL_TAG = 'QX'
SAMPLE_INDEX_TAG = 'BC'
SAMPLE_INDEX_QUAL_TAG = 'QT'
PHASE_SET_BAM_TAG = 'PS'
HAPLOTYPE_BAM_TAG = 'HP'
PHASING_CONF_BAM_TAG = 'PC'
TRIM_TAG = 'TR'
TRIM_QUAL_TAG = 'TQ'
MOLECULE_ID_BAM_TAG = 'MI'
# Parallelization settings
PARALLEL_LOCUS_SIZE = int(4E7)
PARALLEL_NUM_READS_SIZE = 1000000
# Single partition fragment calling
FRAGMENT_LINK_DISTANCE = 30000
#
# Settings for metrics computation
#
# Quality cutoffs for bcs
BC_QUAL_CUTOFFS = [15, 20, 25, 30]
# Map quality cutoffs for insert sizes
INSERT_MAPQ_CUTOFFS = [0, 30, 60]
# Map quality cutoffs for target distances
TARGET_MAPQ_CUTOFFS = [0, 30, 60]
# What is considered a high confidence mapped single read
MODERATE_CONF_MAPQ = 29
# Longest insert size to tabulate
MAX_INSERT_SIZE = 10000
# Longest target distance to tabulate
MAX_TARGET_DIST = 10000
# Distance to consider reads far away for far chimeras
READ_MATE_FAR_DIST = 5000
# Distance to exclude directions of reads as untrustworthy for outer and same-dir chimeras
READ_MATE_CHIM_TOO_CLOSE_DIST = 20
# left coverage tail cutoff for customers
CUSTOMER_LEFT_TAIL_COVERAGE = 5
# Bin size for fragment length histogram passed to loupe
FRAG_LEN_HIST_BIN_SIZE = 100
# Bin size for calculating length-weighted mode of fragment size histogram
FRAG_LEN_HIST_BIN_SIZE_FOR_LW_MODE = 2000
# Variant Calling filter string
QUAL_FILTER = '(%QUAL <= 15 || (AF[0] > 0.5 && %QUAL < 50))'
LARIAT_RESCUE_FILTER = '(((RESCUED+NOT_RESCUED) > 0 & RESCUED/(RESCUED+NOT_RESCUED) > 0.1) & (MMD == -1 | MMD >= 3.0)) '
ALLELE_FRACTION_FILTER = '(AO[0] < 2 || AO[0]/(AO[0] + RO) < 0.15)'
VARIANT_CALL_FILTER = {'10X_RESCUED_MOLECULE_HIGH_DIVERSITY':LARIAT_RESCUE_FILTER,'10X_QUAL_FILTER':QUAL_FILTER, '10X_ALLELE_FRACTION_FILTER': ALLELE_FRACTION_FILTER}
VCF_WHITE_LIST_INFO_FIELDS = {'AA','AN','CIGAR','END','DB','H2', 'H3', '1000G','SOMATIC','VALIDATED', 'TENX'}
# Preflight constants
MIN_PROCESS_NOFILE = 1024
MIN_GLOBAL_NOFILE = 2**15
REQUIRED_MIN_READS = 3
REQUIRED_NUM_READS = 3
GLOBAL_NOFILE_PATH = '/proc/sys/fs/file-max'
BCL_PROCESSOR_FASTQ_MODE = 'BCL_PROCESSOR'
ILMN_BCL2FASTQ_FASTQ_MODE = 'ILMN_BCL2FASTQ'
PACKAGE_VERSION_CMDS = [
{
'name': 'mrc',
'cmd' : 'mrc --version',
},
{
'name': 'mrp',
'cmd' : 'mrp --version',
},
{
'name': 'Anaconda',
'cmd' : 'python --version 2>&1 | cat ',
},
{
'name': 'numpy',
'cmd' : 'python -c "import numpy; print numpy.__version__"'
},
{
'name': 'scipy',
'cmd' : 'python -c "import scipy; print scipy.__version__"'
},
{
'name': 'pysam',
'cmd' : 'python -c "import pysam; print pysam.__version__"'
},
{
'name': 'PyVCF',
'cmd' : 'python -c "import vcf; print vcf.VERSION"'
},
{
'name': 'h5py',
'cmd' : 'python -c "import h5py; print h5py.__version__"'
},
{
'name': 'pandas',
'cmd' : 'python -c "import pandas; print pandas.__version__"'
},
{
'name': 'bwa',
'cmd' : 'bwa 2>&1 | grep "^ *Version"'
},
{
'name': 'samtools',
'cmd' : 'samtools 2>&1 | grep "^ *Version"'
},
{
'name': 'freebayes',
'cmd' : 'freebayes -h | grep ^version',
},
]
# Product microfluidics settings
FLUIDICS_PARAMS = {
'GemCode': {
'z2_vol_per_gem': 144e-12, # 144pL
'total_z2_vol_input': 65e-6, # 65uL
},
'Chromium': {
'z2_vol_per_gem': 24.5e-12, # 24.5pL
'total_z2_vol_input': 90e-6, # 90uL
}
}
# Sample index map
# GemCode Tubes
SI_001 = ['TCGCCATA', 'GTATACAC', 'AATGGTGG', 'CGCATGCT']
SI_002 = ['TATCCTCG', 'GCGAGGTC', 'CGCTTCAA', 'ATAGAAGT']
SI_003 = ['TGACGTCG', 'CTTGTGTA', 'ACGACCGT', 'GACTAAAC']
SI_004 = ['ATCTAGCT', 'GAGCGTAC', 'TCAGCCTG', 'CGTATAGA']
SI_005 = ['CCGTTCCC', 'ATACAGTT', 'TGTAGTAA', 'GACGCAGG']
SI_006 = ['TCAATTGG', 'AGTTAGAA', 'GAGCGCTT', 'CTCGCACC']
SI_007 = ['CTGCCTTG', 'ACTAGCCC', 'GGCTAGAT', 'TAAGTAGA']
SI_008 = ['GGCAGAAA', 'ACGGTTCT', 'CATTCGTC', 'TTACACGG']
# GemCode Plate
SI_P01_A1 = ['TTGTAAGA', 'GGCGTTTC', 'CCTACCAT', 'AAACGGCG']
SI_P01_B1 = ['CTAGCTGT', 'GCCAACAA', 'AGGCTACC', 'TATTGGTG']
SI_P01_C1 = ['GATGCAGT', 'AGACTTTC', 'TTCTACAG', 'CCGAGGCA']
SI_P01_D1 = ['AGCTGCGT', 'GTGGAGCA', 'TCTATTAG', 'CAACCATC']
SI_P01_E1 = ['CGCCCGTA', 'GTTTGCCT', 'TAAGTTAG', 'ACGAAAGC']
SI_P01_F1 = ['TGACTAGT', 'GATAGGTA', 'CCGTACAG', 'ATCGCTCC']
SI_P01_G1 = ['CATATGCG', 'ATGCGATT', 'TCCGCTAC', 'GGATACGA']
SI_P01_H1 = ['TCTTGTCC', 'CGGGAGTA', 'GTCACAGG', 'AAACTCAT']
SI_P01_A2 = ['AGCCCTTT', 'TCTTAGGC', 'GTGAGAAG', 'CAAGTCCA']
SI_P01_B2 = ['GGTCGAGC', 'TTAGATTG', 'CCCACCCA', 'AAGTTGAT']
SI_P01_C2 = ['CCGAGAAC', 'TGCTCTGT', 'GTAGTGCG', 'AATCACTA']
SI_P01_D2 = ['ACATTCCG', 'GACACAAT', 'CTGCGGTA', 'TGTGATGC']
SI_P01_E2 = ['TCATCAAG', 'GTTGGTCC', 'AGGCTGGT', 'CACAACTA']
SI_P01_F2 = ['TGCCCGCT', 'GCAAACGC', 'CATTGATA', 'ATGGTTAG']
SI_P01_G2 = ['CGGTGAGC', 'ATAACCTA', 'TCCGAGCG', 'GATCTTAT']
SI_P01_H2 = ['CCGAACTC', 'AACGGTCA', 'TTATTGGT', 'GGTCCAAG']
SI_P01_A3 = ['AAAGCATA', 'GCCTTTAT', 'CTGCAGCC', 'TGTAGCGG']
SI_P01_B3 = ['TCATCCTT', 'ATTGGACG', 'CAGCTTAC', 'GGCAAGGA']
SI_P01_C3 = ['ACGTTACA', 'TTACCTAC', 'GACGACGG', 'CGTAGGTT']
SI_P01_D3 = ['GAGCACGC', 'CGAAGTTG', 'TTCGTGAA', 'ACTTCACT']
SI_P01_E3 = ['TCTGCAGG', 'CGGCTCCA', 'AACAAGTC', 'GTATGTAT']
SI_P01_F3 = ['TTAGGACC', 'AGTCTGTA', 'GCCTCCGT', 'CAGAATAG']
SI_P01_G3 = ['TACGAGTT', 'ATGTCCAG', 'GCTATAGC', 'CGACGTCA']
SI_P01_H3 = ['TTGGGCTT', 'GACAAACC', 'ACACCTAA', 'CGTTTGGG']
SI_P01_A4 = ['CATGGCAG', 'AGAACGCC', 'GTCTTTGA', 'TCGCAATT']
SI_P01_B4 = ['GACAGGCT', 'CCTCTAAC', 'AGGGACTG', 'TTATCTGA']
SI_P01_C4 = ['ACATTGGC', 'GAGCCCAT', 'CTTAGTCA', 'TGCGAATG']
SI_P01_D4 = ['AAATCGTC', 'GCGATCGG', 'CTTCGAAT', 'TGCGATCA']
SI_P01_E4 = ['GTAAACAT', 'TATCCTGA', 'AGCTGACG', 'CCGGTGTC']
SI_P01_F4 = ['GCATGATA', 'CGTCCTCT', 'AACGACAC', 'TTGATGGG']
SI_P01_G4 = ['CCTGCGGT', 'GTACAACG', 'AGCTTCTC', 'TAGAGTAA']
SI_P01_H4 = ['TTCCATCT', 'ACTGGAGC', 'CGGTCGTG', 'GAAATCAA']
SI_P01_A5 = ['CAGTCTGG', 'TCACACTC', 'ATTGGGAA', 'GGCATACT']
SI_P01_B5 = ['TCATGCGA', 'ATCGTACT', 'CATCAGTG', 'GGGACTAC']
SI_P01_C5 = ['TCAGTCAA', 'CACTGACT', 'ATGCATTC', 'GGTACGGG']
SI_P01_D5 = ['GTCGACTC', 'AGACGGAT', 'CCTTTAGA', 'TAGACTCG']
SI_P01_E5 = ['CCGTTGAA', 'TATGCTCT', 'ATCCAAGG', 'GGAAGCTC']
SI_P01_F5 = ['TCTGACTA', 'GTACGGCT', 'CGGTTTAG', 'AACACAGC']
SI_P01_G5 = ['ATGAAGTA', 'GAAGCTCG', 'TCTTTCGT', 'CGCCGAAC']
SI_P01_H5 = ['ATAGTATG', 'TATAAGGA', 'GGCTCCAC', 'CCGCGTCT']
SI_P01_A6 = ['CTTTCGAC', 'ACGGGACT', 'TGCATCTG', 'GAACATGA']
SI_P01_B6 = ['GCGCACCT', 'AACGCGAA', 'CTATTTGG', 'TGTAGATC']
SI_P01_C6 = ['CGCTCAGG', 'GAGGTTTA', 'ACTCAGAC', 'TTAAGCCT']
SI_P01_D6 = ['GAAGTCTT', 'TGCAGGGC', 'ATGCCAAA', 'CCTTATCG']
SI_P01_E6 = ['TGATGGCT', 'GCCATTTG', 'ATTGAAAC', 'CAGCCCGA']
SI_P01_F6 = ['GACTTCCT', 'TGAGGAAG', 'ATGCCGGC', 'CCTAATTA']
SI_P01_G6 = ['GTGCGACA', 'TCAGTGTT', 'AGCACTGG', 'CATTACAC']
SI_P01_H6 = ['AAGCATAA', 'CCCATCGC', 'TTAGCGCT', 'GGTTGATG']
SI_P01_A7 = ['CTCATCAT', 'TAACGTCC', 'AGGTCATA', 'GCTGAGGG']
SI_P01_B7 = ['TCCACACG', 'CTTCTGTT', 'GAATGCAC', 'AGGGATGA']
SI_P01_C7 = ['GGCGGAAT', 'ACACCGGG', 'CATAATCC', 'TTGTTCTA']
SI_P01_D7 = ['CCGGATCC', 'GGTCGCAT', 'TTAACGTG', 'AACTTAGA']
SI_P01_E7 = ['AAGACGTG', 'CCATGTGT', 'GTTCACAA', 'TGCGTACC']
SI_P01_F7 = ['GGTTAGAC', 'CAAACTTT', 'ACCCGAGA', 'TTGGTCCG']
SI_P01_G7 = ['GCCGGTAA', 'TGACTGCC', 'ATTACCGG', 'CAGTAATT']
SI_P01_H7 = ['TGGCACGA', 'AACGGGTG', 'CTAATTCT', 'GCTTCAAC']
SI_P01_A8 = ['GACTGTTC', 'ATGATACG', 'CCACAGAA', 'TGTGCCGT']
SI_P01_B8 = ['ACGTTCAC', 'TGCCCAGA', 'CAAGGTCT', 'GTTAAGTG']
SI_P01_C8 = ['TTTATCCC', 'GCACGTTT', 'CAGGAAGA', 'AGCTCGAG']
SI_P01_D8 = ['AATCTTTG', 'GGATGAGT', 'CTCAAGAC', 'TCGGCCCA']
SI_P01_E8 = ['GCTTACAT', 'TAGGGTGC', 'AGCCTATG', 'CTAACGCA']
SI_P01_F8 = ['AGTTGGGA', 'TACATTCT', 'CCAGAAAG', 'GTGCCCTC']
SI_P01_G8 = ['AAGTACTC', 'GGAACTCT', 'TCCCTGAG', 'CTTGGAGA']
SI_P01_H8 = ['AAGAGCGG', 'TCATAGCA', 'GGCCCATC', 'CTTGTTAT']
SI_P01_A9 = ['GAGTGCGT', 'CTCCAACA', 'ACAACTTG', 'TGTGTGAC']
SI_P01_B9 = ['AAGCGTGT', 'CTTGACCG', 'TGATTAAC', 'GCCACGTA']
SI_P01_C9 = ['AGATCGGT', 'CATCGTCG', 'GTCATATA', 'TCGGACAC']
SI_P01_D9 = ['CAAGGGAC', 'ACCTACTG', 'GGGACACA', 'TTTCTTGT']
SI_P01_E9 = ['AGTAAGCA', 'TACCGCGG', 'CCGGTAAT', 'GTATCTTC']
SI_P01_F9 = ['AGTTAGTT', 'GTACTTAA', 'CACGCACG', 'TCGAGCGC']
SI_P01_G9 = ['TTGACTTC', 'GCCGAAGT', 'CAATGGCA', 'AGTCTCAG']
SI_P01_H9 = ['GGAATATG', 'ACCTGCCA', 'CTTCATAC', 'TAGGCGGT']
SI_P01_A10 = ['ACAGCAAC', 'TTTCGCGA', 'CGCAATTT', 'GAGTTGCG']
SI_P01_B10 = ['ACCATTAA', 'CTGGACGT', 'GAACGGTC', 'TGTTCACG']
SI_P01_C10 = ['CGTGCTAA', 'TCACTCCT', 'ATCTGATC', 'GAGAAGGG']
SI_P01_D10 = ['CTTATTTG', 'GCGGGCAT', 'AGATAACA', 'TACCCGGC']
SI_P01_E10 = ['GCACCAGT', 'CGCAGGAG', 'TAGTACCA', 'ATTGTTTC']
SI_P01_F10 = ['TCGACAAT', 'GAATACTG', 'ATTCGTGC', 'CGCGTGCA']
SI_P01_G10 = ['CGGAGACT', 'TCCTATGA', 'ATACTGAG', 'GATGCCTC']
SI_P01_H10 = ['GACCGCCA', 'TCGAATTG', 'ATTTCAGC', 'CGAGTGAT']
SI_P01_A11 = ['CTTTCCTT', 'TAGGTAAA', 'ACCAGTCC', 'GGACAGGG']
SI_P01_B11 = ['TCCAGATA', 'GATTCGCT', 'CGACATAG', 'ATGGTCGC']
SI_P01_C11 = ['GTTTGTGG', 'ACCGAACA', 'TAGACGAC', 'CGACTCTT']
SI_P01_D11 = ['GCTACTTC', 'CACCTCAG', 'ATATGAGA', 'TGGGAGCT']
SI_P01_E11 = ['ATCGCCAT', 'TCACGGTA', 'GGGTTTCC', 'CATAAAGG']
SI_P01_F11 = ['GAACCCGG', 'AGCAGTTA', 'TCGTAGAT', 'CTTGTACC']
SI_P01_G11 = ['AGGGCGTT', 'CTATACGC', 'TACATAAG', 'GCTCGTCA']
SI_P01_H11 = ['TCTCGACT', 'AGGATCGA', 'CACGATTC', 'GTATCGAG']
SI_P01_A12 = ['TTATGGAA', 'ACTACTGT', 'CGGGAACG', 'GACCTCTC']
SI_P01_B12 = ['GAAAGACA', 'CGCTACAT', 'ACGCTTGG', 'TTTGCGTC']
SI_P01_C12 = ['TAAGCCAC', 'CCGTTATG', 'GGTAATGT', 'ATCCGGCA']
SI_P01_D12 = ['GCTGTGTA', 'AGAAACGT', 'CACTCAAC', 'TTGCGTCG']
SI_P01_E12 = ['CGCTATCC', 'ACGCGGAA', 'TAAATCGT', 'GTTGCATG']
SI_P01_F12 = ['AATTGAAC', 'TGGACCCT', 'CCAGTGGA', 'GTCCATTG']
SI_P01_G12 = ['CATGCGTA', 'ACCCGCAC', 'TGATATCG', 'GTGATAGT']
SI_P01_H12 = ['TGTGTATA', 'GTCCAGGC', 'CAATCCCT', 'ACGAGTAG']
# WGS+ Tubes
SI_T2_1 = ['GGGTGATC', 'TTACCGAT', 'AATGACGA', 'CCCATTCG']
SI_T2_2 = ['GGGTCGAA', 'ATCCGCCC', 'TCTATAGT', 'CAAGATTG']
SI_T2_3 = ['GCTGATAT', 'TGCCGAGC', 'AAATTGCG', 'CTGACCTA']
SI_T2_4 = ['ACTTCTGA', 'TTCATCTT', 'CGACGACG', 'GAGGAGAC']
SI_T2_5 = ['GAATACAA', 'AGCATACC', 'TCGGGTTT', 'CTTCCGGG']
SI_T2_6 = ['TATTGAGA', 'GTAGTCAG', 'CGCCATTC', 'ACGACGCT']
SI_T2_7 = ['AAATCTGT', 'GTCCAACC', 'TCTGGCTG', 'CGGATGAA']
SI_T2_8 = ['CCTTGAAC', 'GAAATCGG', 'TGGCCTCT', 'ATCGAGTA']
# WGS+ Plate
SI_P2_A1 = ['GGTTTACT', 'CTAAACGG', 'TCGGCGTC', 'AACCGTAA']
SI_P2_A2 = ['TTTCATGA', 'ACGTCCCT', 'CGCATGTG', 'GAAGGAAC']
SI_P2_A3 = ['CAGTACTG', 'AGTAGTCT', 'GCAGTAGA', 'TTCCCGAC']
SI_P2_A4 = ['TATGATTC', 'CCCACAGT', 'ATGCTGAA', 'GGATGCCG']
SI_P2_A5 = ['CTAGGTGA', 'TCGTTCAG', 'AGCCAATT', 'GATACGCC']
SI_P2_A6 = ['CGCTATGT', 'GCTGTCCA', 'TTGAGATC', 'AAACCGAG']
SI_P2_A7 = ['ACAGAGGT', 'TATAGTTG', 'CGGTCCCA', 'GTCCTAAC']
SI_P2_A8 = ['GCATCTCC', 'TGTAAGGT', 'CTGCGATG', 'AACGTCAA']
SI_P2_A9 = ['TCTTAAAG', 'CGAGGCTC', 'GTCCTTCT', 'AAGACGGA']
SI_P2_A10 = ['GAAACCCT', 'TTTCTGTC', 'CCGTGTGA', 'AGCGAAAG']
SI_P2_A11 = ['GTCCGGTC', 'AAGATCAT', 'CCTGAAGG', 'TGATCTCA']
SI_P2_A12 = ['AGTGGAAC', 'GTCTCCTT', 'TCACATCA', 'CAGATGGG']
SI_P2_B1 = ['GTAATCTT', 'TCCGGAAG', 'AGTTCGGC', 'CAGCATCA']
SI_P2_B2 = ['TACTCTTC', 'CCTGTGCG', 'GGACACGT', 'ATGAGAAA']
SI_P2_B3 = ['GTGTATTA', 'TGTGCGGG', 'ACCATAAC', 'CAACGCCT']
SI_P2_B4 = ['ACTTCATA', 'GAGATGAC', 'TGCCGTGG', 'CTAGACCT']
SI_P2_B5 = ['AATAATGG', 'CCAGGGCA', 'TGCCTCAT', 'GTGTCATC']
SI_P2_B6 = ['CGTTAATC', 'GCCACGCT', 'TTACTCAG', 'AAGGGTGA']
SI_P2_B7 = ['AAACCTCA', 'GCCTTGGT', 'CTGGACTC', 'TGTAGAAG']
SI_P2_B8 = ['AAAGTGCT', 'GCTACCTG', 'TGCTGTAA', 'CTGCAAGC']
SI_P2_B9 = ['CTGTAACT', 'TCTAGCGA', 'AGAGTGTG', 'GACCCTAC']
SI_P2_B10 = ['ACCGTATG', 'GATTAGAT', 'CTGACTGA', 'TGACGCCC']
SI_P2_B11 = ['GTTCCTCA', 'AGGTACGC', 'TAAGTATG', 'CCCAGGAT']
SI_P2_B12 = ['TACCACCA', 'CTAAGTTT', 'GGGTCAAG', 'ACTGTGGC']
SI_P2_C1 = ['CCACTTAT', 'AACTGGCG', 'TTGGCATA', 'GGTAACGC']
SI_P2_C2 = ['CCTAGACC', 'ATCTCTGT', 'TAGCTCTA', 'GGAGAGAG']
SI_P2_C3 = ['TCAGCCGT', 'CAGAGGCC', 'GGTCAATA', 'ATCTTTAG']
SI_P2_C4 = ['ACAATTCA', 'TGCGCAGC', 'CATCACTT', 'GTGTGGAG']
SI_P2_C5 = ['CGACTTGA', 'TACAGACT', 'ATTGCGTG', 'GCGTACAC']
SI_P2_C6 = ['ATTACTTC', 'TGCGAACT', 'GCATTCGG', 'CAGCGGAA']
SI_P2_C7 = ['GTCTCTCG', 'AATCTCTC', 'CGGAGGGA', 'TCAGAAAT']
SI_P2_C8 = ['GTTGAGAA', 'AGATCTGG', 'TCGATACT', 'CACCGCTC']
SI_P2_C9 = ['GCGCAGAA', 'ATCTTACC', 'TATGGTGT', 'CGAACCTG']
SI_P2_C10 = ['TCTCAGTG', 'GAGACTAT', 'CGCTTAGC', 'ATAGGCCA']
SI_P2_C11 = ['GAGGATCT', 'AGACCATA', 'TCCTGCGC', 'CTTATGAG']
SI_P2_C12 = ['TCTCGTTT', 'GGCTAGCG', 'ATGACCGC', 'CAAGTAAA']
SI_P2_D1 = ['CACTCGGA', 'GCTGAATT', 'TGAAGTAC', 'ATGCTCCG']
SI_P2_D2 = ['TAACAAGG', 'GGTTCCTC', 'ATCATGCA', 'CCGGGTAT']
SI_P2_D3 = ['ACATTACT', 'TTTGGGTA', 'CAGCCCAC', 'GGCAATGG']
SI_P2_D4 = ['CCCTAACA', 'ATTCCGAT', 'TGGATTGC', 'GAAGGCTG']
SI_P2_D5 = ['CTCGTCAC', 'GATCAGCA', 'ACAACAGG', 'TGGTGTTT']
SI_P2_D6 = ['CATGCGAT', 'TGATATTC', 'GTGATCGA', 'ACCCGACG']
SI_P2_D7 = ['ATTTGCTA', 'TAGACACC', 'CCACAGGG', 'GGCGTTAT']
SI_P2_D8 = ['GCAACAAA', 'TAGTTGTC', 'CGCCATCG', 'ATTGGCGT']
SI_P2_D9 = ['AGGAGATG', 'GATGTGGT', 'CTACATCC', 'TCCTCCAA']
SI_P2_D10 = ['CAATACCC', 'TGTCTATG', 'ACCACGAA', 'GTGGGTGT']
SI_P2_D11 = ['CTTTGCGG', 'TGCACAAA', 'AAGCAGTC', 'GCAGTTCT']
SI_P2_D12 = ['GCACAATG', 'CTTGGTAC', 'TGCACCGT', 'AAGTTGCA']
SI_P2_E1 = ['TGGTAAAC', 'GAAAGGGT', 'ACTGCTCG', 'CTCCTCTA']
SI_P2_E2 = ['GTGGTACC', 'TACTATAG', 'ACAAGGTA', 'CGTCCCGT']
SI_P2_E3 = ['AGGTATTG', 'CTCCTAGT', 'TCAAGGCC', 'GATGCCAA']
SI_P2_E4 = ['TTCGCCCT', 'GGATGGGC', 'AATCAATG', 'CCGATTAA']
SI_P2_E5 = ['CATTAGCG', 'TTCGCTGA', 'ACAAGAAT', 'GGGCTCTC']
SI_P2_E6 = ['CTGCGGCT', 'GACTCAAA', 'AGAAACTC', 'TCTGTTGG']
SI_P2_E7 = ['CACGCCTT', 'GTATATAG', 'TCTCGGGC', 'AGGATACA']
SI_P2_E8 = ['ATAGTTAC', 'TGCTGAGT', 'CCTACGTA', 'GAGCACCG']
SI_P2_E9 = ['TTGTTTCC', 'GGAGGAGG', 'CCTAACAA', 'AACCCGTT']
SI_P2_E10 = ['AAATGTGC', 'GGGCAAAT', 'TCTATCCG', 'CTCGCGTA']
SI_P2_E11 = ['AAGCGCTG', 'CGTTTGAT', 'GTAGCACA', 'TCCAATGC']
SI_P2_E12 = ['ACCGGCTC', 'GAGTTAGT', 'CGTCCTAG', 'TTAAAGCA']
SI_P2_F1 = ['GTTGCAGC', 'TGGAATTA', 'CAATGGAG', 'ACCCTCCT']
SI_P2_F2 = ['TTTACATG', 'CGCGATAC', 'ACGCGGGT', 'GAATTCCA']
SI_P2_F3 = ['TTCAGGTG', 'ACGGACAT', 'GATCTTGA', 'CGATCACC']
SI_P2_F4 = ['CCCAATAG', 'GTGTCGCT', 'AGAGTCGC', 'TATCGATA']
SI_P2_F5 = ['GACTACGT', 'CTAGCGAG', 'TCTATATC', 'AGGCGTCA']
SI_P2_F6 = ['CGGAGCAC', 'GACCTATT', 'ACTTAGGA', 'TTAGCTCG']
SI_P2_F7 = ['CGTGCAGA', 'AACAAGAT', 'TCGCTTCG', 'GTATGCTC']
SI_P2_F8 = ['CATGAACA', 'TCACTCGC', 'AGCTGGAT', 'GTGACTTG']
SI_P2_F9 = ['CAAGCTCC', 'GTTCACTG', 'TCGTGAAA', 'AGCATGGT']
SI_P2_F10 = ['GCTTGGCT', 'AAACAAAC', 'CGGGCTTA', 'TTCATCGG']
SI_P2_F11 = ['GCGAGAGT', 'TACGTTCA', 'AGTCCCAC', 'CTATAGTG']
SI_P2_F12 = ['TGATGCAT', 'GCTACTGA', 'CACCTGCC', 'ATGGAATG']
SI_P2_G1 = ['ATGAATCT', 'GATCTCAG', 'CCAGGAGC', 'TGCTCGTA']
SI_P2_G2 = ['TGATTCTA', 'ACTAGGAG', 'CAGCCACT', 'GTCGATGC']
SI_P2_G3 = ['CCTCATTC', 'AGCATCCG', 'GTGGCAAT', 'TAATGGGA']
SI_P2_G4 = ['GCGATGTG', 'AGATACAA', 'TTTCCACT', 'CACGGTGC']
SI_P2_G5 = ['GAGCAAGA', 'TCTGTGAT', 'CGCAGTTC', 'ATATCCCG']
SI_P2_G6 = ['CTGACGCG', 'GGTCGTAC', 'TCCTTCTT', 'AAAGAAGA']
SI_P2_G7 = ['GGTATGCA', 'CTCGAAAT', 'ACACCTTC', 'TAGTGCGG']
SI_P2_G8 = ['TATGAGCT', 'CCGATAGC', 'ATACCCAA', 'GGCTGTTG']
SI_P2_G9 = ['TAGGACGT', 'ATCCCACA', 'GGAATGTC', 'CCTTGTAG']
SI_P2_G10 = ['TCGCCAGC', 'AATGTTAG', 'CGATAGCT', 'GTCAGCTA']
SI_P2_G11 = ['TTATCGTT', 'AGCAGAGC', 'CATCTCCA', 'GCGGATAG']
SI_P2_G12 = ['ATTCTAAG', 'CCCGATTA', 'TGGAGGCT', 'GAATCCGC']
SI_P2_H1 = ['GTATGTCA', 'TGTCAGAC', 'CACGTCGG', 'ACGACATT']
SI_P2_H2 = ['TAATGACC', 'ATGCCTTA', 'GCCGAGAT', 'CGTATCGG']
SI_P2_H3 = ['CCAAGATG', 'AGGCCCGA', 'TACGTGAC', 'GTTTATCT']
SI_P2_H4 = ['GCCATTCC', 'CAAGAATT', 'TTGCCGGA', 'AGTTGCAG']
SI_P2_H5 = ['CCACTACA', 'GATTCTGG', 'TGCGGCTT', 'ATGAAGAC']
SI_P2_H6 = ['TAGGATAA', 'CCTTTGTC', 'GTACGCGG', 'AGCACACT']
SI_P2_H7 = ['AGCTATCA', 'CATATAAC', 'TCAGGGTG', 'GTGCCCGT']
SI_P2_H8 = ['TTGTTGAT', 'GCTCAACC', 'CAAAGTGG', 'AGCGCCTA']
SI_P2_H9 = ['ACACTGTT', 'CAGGATGG', 'GGCTGAAC', 'TTTACCCA']
SI_P2_H10 = ['GTAATTGC', 'AGTCGCTT', 'CACGAGAA', 'TCGTCACG']
SI_P2_H11 = ['GGCGAGTA', 'ACTTCTAT', 'CAAATACG', 'TTGCGCGC']
SI_P2_H12 = ['GACAGCAT', 'TTTGTACA', 'AGGCCGTG', 'CCATATGC']
SI_NA_A1 = SI_3A_A1 = SI_P03_A1 = ['AAACGGCG', 'CCTACCAT', 'GGCGTTTC', 'TTGTAAGA']
SI_NA_B1 = SI_3A_B1 = SI_P03_B1 = ['AGGCTACC', 'CTAGCTGT', 'GCCAACAA', 'TATTGGTG']
SI_NA_C1 = SI_3A_C1 = SI_P03_C1 = ['AGACTTTC', 'CCGAGGCA', 'GATGCAGT', 'TTCTACAG']
SI_NA_D1 = SI_3A_D1 = SI_P03_D1 = ['AGCTGCGT', 'CAACCATC', 'GTGGAGCA', 'TCTATTAG']
SI_NA_E1 = SI_3A_E1 = SI_P03_E1 = ['ACGAAAGC', 'CGCCCGTA', 'GTTTGCCT', 'TAAGTTAG']
SI_NA_F1 = SI_3A_F1 = SI_P03_F1 = ['ATCGCTCC', 'CCGTACAG', 'GATAGGTA', 'TGACTAGT']
SI_NA_G1 = SI_3A_G1 = SI_P03_G1 = ['ATGCGATT', 'CATATGCG', 'GGATACGA', 'TCCGCTAC']
SI_NA_H1 = SI_3A_H1 = SI_P03_H1 = ['AAACTCAT', 'CGGGAGTA', 'GTCACAGG', 'TCTTGTCC']
SI_NA_A2 = SI_3A_A2 = SI_P03_A2 = ['AGCCCTTT', 'CAAGTCCA', 'GTGAGAAG', 'TCTTAGGC']
SI_NA_B2 = SI_3A_B2 = SI_P03_B2 = ['AAGTTGAT', 'CCCACCCA', 'GGTCGAGC', 'TTAGATTG']
SI_NA_C2 = SI_3A_C2 = SI_P03_C2 = ['AATCACTA', 'CCGAGAAC', 'GTAGTGCG', 'TGCTCTGT']
SI_NA_D2 = SI_3A_D2 = SI_P03_D2 = ['ACATTCCG', 'CTGCGGTA', 'GACACAAT', 'TGTGATGC']
SI_NA_E2 = SI_3A_E2 = SI_P03_E2 = ['AGGCTGGT', 'CACAACTA', 'GTTGGTCC', 'TCATCAAG']
SI_NA_F2 = SI_3A_F2 = SI_P03_F2 = ['ATGGTTAG', 'CATTGATA', 'GCAAACGC', 'TGCCCGCT']
SI_NA_G2 = SI_3A_G2 = SI_P03_G2 = ['ATAACCTA', 'CGGTGAGC', 'GATCTTAT', 'TCCGAGCG']
SI_NA_H2 = SI_3A_H2 = SI_P03_H2 = ['AACGGTCA', 'CCGAACTC', 'GGTCCAAG', 'TTATTGGT']
SI_NA_A3 = SI_3A_A3 = SI_P03_A3 = ['AAAGCATA', 'CTGCAGCC', 'GCCTTTAT', 'TGTAGCGG']
SI_NA_B3 = SI_3A_B3 = SI_P03_B3 = ['ATTGGACG', 'CAGCTTAC', 'GGCAAGGA', 'TCATCCTT']
SI_NA_C3 = SI_3A_C3 = SI_P03_C3 = ['ACGTTACA', 'CGTAGGTT', 'GACGACGG', 'TTACCTAC']
SI_NA_D3 = SI_3A_D3 = SI_P03_D3 = ['ACTTCACT', 'CGAAGTTG', 'GAGCACGC', 'TTCGTGAA']
SI_NA_E3 = SI_3A_E3 = SI_P03_E3 = ['AACAAGTC', 'CGGCTCCA', 'GTATGTAT', 'TCTGCAGG']
SI_NA_F3 = SI_3A_F3 = SI_P03_F3 = ['AGTCTGTA', 'CAGAATAG', 'GCCTCCGT', 'TTAGGACC']
SI_NA_G3 = SI_3A_G3 = SI_P03_G3 = ['ATGTCCAG', 'CGACGTCA', 'GCTATAGC', 'TACGAGTT']
SI_NA_H3 = SI_3A_H3 = SI_P03_H3 = ['ACACCTAA', 'CGTTTGGG', 'GACAAACC', 'TTGGGCTT']
SI_NA_A4 = SI_3A_A4 = SI_P03_A4 = ['AGAACGCC', 'CATGGCAG', 'GTCTTTGA', 'TCGCAATT']
SI_NA_B4 = SI_3A_B4 = SI_P03_B4 = ['AGGGACTG', 'CCTCTAAC', 'GACAGGCT', 'TTATCTGA']
SI_NA_C4 = SI_3A_C4 = SI_P03_C4 = ['ACATTGGC', 'CTTAGTCA', 'GAGCCCAT', 'TGCGAATG']
SI_NA_D4 = SI_3A_D4 = SI_P03_D4 = ['AAATCGTC', 'CTTCGAAT', 'GCGATCGG', 'TGCGATCA']
SI_NA_E4 = SI_3A_E4 = SI_P03_E4 = ['AGCTGACG', 'CCGGTGTC', 'GTAAACAT', 'TATCCTGA']
SI_NA_F4 = SI_3A_F4 = SI_P03_F4 = ['AACGACAC', 'CGTCCTCT', 'GCATGATA', 'TTGATGGG']
SI_NA_G4 = SI_3A_G4 = SI_P03_G4 = ['AGCTTCTC', 'CCTGCGGT', 'GTACAACG', 'TAGAGTAA']
SI_NA_H4 = SI_3A_H4 = SI_P03_H4 = ['ACTGGAGC', 'CGGTCGTG', 'GAAATCAA', 'TTCCATCT']
SI_NA_A5 = SI_3A_A5 = SI_P03_A5 = ['ATTGGGAA', 'CAGTCTGG', 'GGCATACT', 'TCACACTC']
SI_NA_B5 = SI_3A_B5 = SI_P03_B5 = ['ATCGTACT', 'CATCAGTG', 'GGGACTAC', 'TCATGCGA']
SI_NA_C5 = SI_3A_C5 = SI_P03_C5 = ['ATGCATTC', 'CACTGACT', 'GGTACGGG', 'TCAGTCAA']
SI_NA_D5 = SI_3A_D5 = SI_P03_D5 = ['AGACGGAT', 'CCTTTAGA', 'GTCGACTC', 'TAGACTCG']
SI_NA_E5 = SI_3A_E5 = SI_P03_E5 = ['ATCCAAGG', 'CCGTTGAA', 'GGAAGCTC', 'TATGCTCT']
SI_NA_F5 = SI_3A_F5 = SI_P03_F5 = ['AACACAGC', 'CGGTTTAG', 'GTACGGCT', 'TCTGACTA']
SI_NA_G5 = SI_3A_G5 = SI_P03_G5 = ['ATGAAGTA', 'CGCCGAAC', 'GAAGCTCG', 'TCTTTCGT']
SI_NA_H5 = SI_3A_H5 = SI_P03_H5 = ['ATAGTATG', 'CCGCGTCT', 'GGCTCCAC', 'TATAAGGA']
SI_NA_A6 = SI_3A_A6 = SI_P03_A6 = ['ACGGGACT', 'CTTTCGAC', 'GAACATGA', 'TGCATCTG']
SI_NA_B6 = SI_3A_B6 = SI_P03_B6 = ['AACGCGAA', 'CTATTTGG', 'GCGCACCT', 'TGTAGATC']
SI_NA_C6 = SI_3A_C6 = SI_P03_C6 = ['ACTCAGAC', 'CGCTCAGG', 'GAGGTTTA', 'TTAAGCCT']
SI_NA_D6 = SI_3A_D6 = SI_P03_D6 = ['ATGCCAAA', 'CCTTATCG', 'GAAGTCTT', 'TGCAGGGC']
SI_NA_E6 = SI_3A_E6 = SI_P03_E6 = ['ATTGAAAC', 'CAGCCCGA', 'GCCATTTG', 'TGATGGCT']
SI_NA_F6 = SI_3A_F6 = SI_P03_F6 = ['ATGCCGGC', 'CCTAATTA', 'GACTTCCT', 'TGAGGAAG']
SI_NA_G6 = SI_3A_G6 = SI_P03_G6 = ['AGCACTGG', 'CATTACAC', 'GTGCGACA', 'TCAGTGTT']
SI_NA_H6 = SI_3A_H6 = SI_P03_H6 = ['AAGCATAA', 'CCCATCGC', 'GGTTGATG', 'TTAGCGCT']
SI_NA_A7 = SI_3A_A7 = SI_P03_A7 = ['AGGTCATA', 'CTCATCAT', 'GCTGAGGG', 'TAACGTCC']
SI_NA_B7 = SI_3A_B7 = SI_P03_B7 = ['AGGGATGA', 'CTTCTGTT', 'GAATGCAC', 'TCCACACG']
SI_NA_C7 = SI_3A_C7 = SI_P03_C7 = ['ACACCGGG', 'CATAATCC', 'GGCGGAAT', 'TTGTTCTA']
SI_NA_D7 = SI_3A_D7 = SI_P03_D7 = ['AACTTAGA', 'CCGGATCC', 'GGTCGCAT', 'TTAACGTG']
SI_NA_E7 = SI_3A_E7 = SI_P03_E7 = ['AAGACGTG', 'CCATGTGT', 'GTTCACAA', 'TGCGTACC']
SI_NA_F7 = SI_3A_F7 = SI_P03_F7 = ['ACCCGAGA', 'CAAACTTT', 'GGTTAGAC', 'TTGGTCCG']
SI_NA_G7 = SI_3A_G7 = SI_P03_G7 = ['ATTACCGG', 'CAGTAATT', 'GCCGGTAA', 'TGACTGCC']
SI_NA_H7 = SI_3A_H7 = SI_P03_H7 = ['AACGGGTG', 'CTAATTCT', 'GCTTCAAC', 'TGGCACGA']
SI_NA_A8 = SI_3A_A8 = SI_P03_A8 = ['ATGATACG', 'CCACAGAA', 'GACTGTTC', 'TGTGCCGT']
SI_NA_B8 = SI_3A_B8 = SI_P03_B8 = ['ACGTTCAC', 'CAAGGTCT', 'GTTAAGTG', 'TGCCCAGA']
SI_NA_C8 = SI_3A_C8 = SI_P03_C8 = ['AGCTCGAG', 'CAGGAAGA', 'GCACGTTT', 'TTTATCCC']
SI_NA_D8 = SI_3A_D8 = SI_P03_D8 = ['AATCTTTG', 'CTCAAGAC', 'GGATGAGT', 'TCGGCCCA']
SI_NA_E8 = SI_3A_E8 = SI_P03_E8 = ['AGCCTATG', 'CTAACGCA', 'GCTTACAT', 'TAGGGTGC']
SI_NA_F8 = SI_3A_F8 = SI_P03_F8 = ['AGTTGGGA', 'CCAGAAAG', 'GTGCCCTC', 'TACATTCT']
SI_NA_G8 = SI_3A_G8 = SI_P03_G8 = ['AAGTACTC', 'CTTGGAGA', 'GGAACTCT', 'TCCCTGAG']
SI_NA_H8 = SI_3A_H8 = SI_P03_H8 = ['AAGAGCGG', 'CTTGTTAT', 'GGCCCATC', 'TCATAGCA']
SI_NA_A9 = SI_3A_A9 = SI_P03_A9 = ['ACAACTTG', 'CTCCAACA', 'GAGTGCGT', 'TGTGTGAC']
SI_NA_B9 = SI_3A_B9 = SI_P03_B9 = ['AAGCGTGT', 'CTTGACCG', 'GCCACGTA', 'TGATTAAC']
SI_NA_C9 = SI_3A_C9 = SI_P03_C9 = ['AGATCGGT', 'CATCGTCG', 'GTCATATA', 'TCGGACAC']
SI_NA_D9 = SI_3A_D9 = SI_P03_D9 = ['ACCTACTG', 'CAAGGGAC', 'GGGACACA', 'TTTCTTGT']
SI_NA_E9 = SI_3A_E9 = SI_P03_E9 = ['AGTAAGCA', 'CCGGTAAT', 'GTATCTTC', 'TACCGCGG']
SI_NA_F9 = SI_3A_F9 = SI_P03_F9 = ['AGTTAGTT', 'CACGCACG', 'GTACTTAA', 'TCGAGCGC']
SI_NA_G9 = SI_3A_G9 = SI_P03_G9 = ['AGTCTCAG', 'CAATGGCA', 'GCCGAAGT', 'TTGACTTC']
SI_NA_H9 = SI_3A_H9 = SI_P03_H9 = ['ACCTGCCA', 'CTTCATAC', 'GGAATATG', 'TAGGCGGT']
SI_NA_A10 = SI_3A_A10 = SI_P03_A10 = ['ACAGCAAC', 'CGCAATTT', 'GAGTTGCG', 'TTTCGCGA']
SI_NA_B10 = SI_3A_B10 = SI_P03_B10 = ['ACCATTAA', 'CTGGACGT', 'GAACGGTC', 'TGTTCACG']
SI_NA_C10 = SI_3A_C10 = SI_P03_C10 = ['ATCTGATC', 'CGTGCTAA', 'GAGAAGGG', 'TCACTCCT']
SI_NA_D10 = SI_3A_D10 = SI_P03_D10 = ['AGATAACA', 'CTTATTTG', 'GCGGGCAT', 'TACCCGGC']
SI_NA_E10 = SI_3A_E10 = SI_P03_E10 = ['ATTGTTTC', 'CGCAGGAG', 'GCACCAGT', 'TAGTACCA']
SI_NA_F10 = SI_3A_F10 = SI_P03_F10 = ['ATTCGTGC', 'CGCGTGCA', 'GAATACTG', 'TCGACAAT']
SI_NA_G10 = SI_3A_G10 = SI_P03_G10 = ['ATACTGAG', 'CGGAGACT', 'GATGCCTC', 'TCCTATGA']
SI_NA_H10 = SI_3A_H10 = SI_P03_H10 = ['ATTTCAGC', 'CGAGTGAT', 'GACCGCCA', 'TCGAATTG']
SI_NA_A11 = SI_3A_A11 = SI_P03_A11 = ['ACCAGTCC', 'CTTTCCTT', 'GGACAGGG', 'TAGGTAAA']
SI_NA_B11 = SI_3A_B11 = SI_P03_B11 = ['ATGGTCGC', 'CGACATAG', 'GATTCGCT', 'TCCAGATA']
SI_NA_C11 = SI_3A_C11 = SI_P03_C11 = ['ACCGAACA', 'CGACTCTT', 'GTTTGTGG', 'TAGACGAC']
SI_NA_D11 = SI_3A_D11 = SI_P03_D11 = ['ATATGAGA', 'CACCTCAG', 'GCTACTTC', 'TGGGAGCT']
SI_NA_E11 = SI_3A_E11 = SI_P03_E11 = ['ATCGCCAT', 'CATAAAGG', 'GGGTTTCC', 'TCACGGTA']
SI_NA_F11 = SI_3A_F11 = SI_P03_F11 = ['AGCAGTTA', 'CTTGTACC', 'GAACCCGG', 'TCGTAGAT']
SI_NA_G11 = SI_3A_G11 = SI_P03_G11 = ['AGGGCGTT', 'CTATACGC', 'GCTCGTCA', 'TACATAAG']
SI_NA_H11 = SI_3A_H11 = SI_P03_H11 = ['AGGATCGA', 'CACGATTC', 'GTATCGAG', 'TCTCGACT']
SI_NA_A12 = SI_3A_A12 = SI_P03_A12 = ['ACTACTGT', 'CGGGAACG', 'GACCTCTC', 'TTATGGAA']
SI_NA_B12 = SI_3A_B12 = SI_P03_B12 = ['ACGCTTGG', 'CGCTACAT', 'GAAAGACA', 'TTTGCGTC']
SI_NA_C12 = SI_3A_C12 = SI_P03_C12 = ['ATCCGGCA', 'CCGTTATG', 'GGTAATGT', 'TAAGCCAC']
SI_NA_D12 = SI_3A_D12 = SI_P03_D12 = ['AGAAACGT', 'CACTCAAC', 'GCTGTGTA', 'TTGCGTCG']
SI_NA_E12 = SI_3A_E12 = SI_P03_E12 = ['ACGCGGAA', 'CGCTATCC', 'GTTGCATG', 'TAAATCGT']
SI_NA_F12 = SI_3A_F12 = SI_P03_F12 = ['AATTGAAC', 'CCAGTGGA', 'GTCCATTG', 'TGGACCCT']
SI_NA_G12 = SI_3A_G12 = SI_P03_G12 = ['ACCCGCAC', 'CATGCGTA', 'GTGATAGT', 'TGATATCG']
SI_NA_H12 = SI_3A_H12 = SI_P03_H12 = ['ACGAGTAG', 'CAATCCCT', 'GTCCAGGC', 'TGTGTATA']
# WGS+ Tubes
SI_T2_1 = ['GGGTGATC', 'TTACCGAT', 'AATGACGA', 'CCCATTCG']
SI_T2_2 = ['GGGTCGAA', 'ATCCGCCC', 'TCTATAGT', 'CAAGATTG']
SI_T2_3 = ['GCTGATAT', 'TGCCGAGC', 'AAATTGCG', 'CTGACCTA']
SI_T2_4 = ['ACTTCTGA', 'TTCATCTT', 'CGACGACG', 'GAGGAGAC']
SI_T2_5 = ['GAATACAA', 'AGCATACC', 'TCGGGTTT', 'CTTCCGGG']
SI_T2_6 = ['TATTGAGA', 'GTAGTCAG', 'CGCCATTC', 'ACGACGCT']
SI_T2_7 = ['AAATCTGT', 'GTCCAACC', 'TCTGGCTG', 'CGGATGAA']
SI_T2_8 = ['CCTTGAAC', 'GAAATCGG', 'TGGCCTCT', 'ATCGAGTA']
# Chromium WGS Plate
SI_GA_A1 = SI_P2_A1 = ['GGTTTACT', 'CTAAACGG', 'TCGGCGTC', 'AACCGTAA']
SI_GA_A2 = SI_P2_A2 = ['TTTCATGA', 'ACGTCCCT', 'CGCATGTG', 'GAAGGAAC']
SI_GA_A3 = SI_P2_A3 = ['CAGTACTG', 'AGTAGTCT', 'GCAGTAGA', 'TTCCCGAC']
SI_GA_A4 = SI_P2_A4 = ['TATGATTC', 'CCCACAGT', 'ATGCTGAA', 'GGATGCCG']
SI_GA_A5 = SI_P2_A5 = ['CTAGGTGA', 'TCGTTCAG', 'AGCCAATT', 'GATACGCC']
SI_GA_A6 = SI_P2_A6 = ['CGCTATGT', 'GCTGTCCA', 'TTGAGATC', 'AAACCGAG']
SI_GA_A7 = SI_P2_A7 = ['ACAGAGGT', 'TATAGTTG', 'CGGTCCCA', 'GTCCTAAC']
SI_GA_A8 = SI_P2_A8 = ['GCATCTCC', 'TGTAAGGT', 'CTGCGATG', 'AACGTCAA']
SI_GA_A9 = SI_P2_A9 = ['TCTTAAAG', 'CGAGGCTC', 'GTCCTTCT', 'AAGACGGA']
SI_GA_A10 = SI_P2_A10 = ['GAAACCCT', 'TTTCTGTC', 'CCGTGTGA', 'AGCGAAAG']
SI_GA_A11 = SI_P2_A11 = ['GTCCGGTC', 'AAGATCAT', 'CCTGAAGG', 'TGATCTCA']
SI_GA_A12 = SI_P2_A12 = ['AGTGGAAC', 'GTCTCCTT', 'TCACATCA', 'CAGATGGG']
SI_GA_B1 = SI_P2_B1 = ['GTAATCTT', 'TCCGGAAG', 'AGTTCGGC', 'CAGCATCA']
SI_GA_B2 = SI_P2_B2 = ['TACTCTTC', 'CCTGTGCG', 'GGACACGT', 'ATGAGAAA']
SI_GA_B3 = SI_P2_B3 = ['GTGTATTA', 'TGTGCGGG', 'ACCATAAC', 'CAACGCCT']
SI_GA_B4 = SI_P2_B4 = ['ACTTCATA', 'GAGATGAC', 'TGCCGTGG', 'CTAGACCT']
SI_GA_B5 = SI_P2_B5 = ['AATAATGG', 'CCAGGGCA', 'TGCCTCAT', 'GTGTCATC']
SI_GA_B6 = SI_P2_B6 = ['CGTTAATC', 'GCCACGCT', 'TTACTCAG', 'AAGGGTGA']
SI_GA_B7 = SI_P2_B7 = ['AAACCTCA', 'GCCTTGGT', 'CTGGACTC', 'TGTAGAAG']
SI_GA_B8 = SI_P2_B8 = ['AAAGTGCT', 'GCTACCTG', 'TGCTGTAA', 'CTGCAAGC']
SI_GA_B9 = SI_P2_B9 = ['CTGTAACT', 'TCTAGCGA', 'AGAGTGTG', 'GACCCTAC']
SI_GA_B10 = SI_P2_B10 = ['ACCGTATG', 'GATTAGAT', 'CTGACTGA', 'TGACGCCC']
SI_GA_B11 = SI_P2_B11 = ['GTTCCTCA', 'AGGTACGC', 'TAAGTATG', 'CCCAGGAT']
SI_GA_B12 = SI_P2_B12 = ['TACCACCA', 'CTAAGTTT', 'GGGTCAAG', 'ACTGTGGC']
SI_GA_C1 = SI_P2_C1 = ['CCACTTAT', 'AACTGGCG', 'TTGGCATA', 'GGTAACGC']
SI_GA_C2 = SI_P2_C2 = ['CCTAGACC', 'ATCTCTGT', 'TAGCTCTA', 'GGAGAGAG']
SI_GA_C3 = SI_P2_C3 = ['TCAGCCGT', 'CAGAGGCC', 'GGTCAATA', 'ATCTTTAG']
SI_GA_C4 = SI_P2_C4 = ['ACAATTCA', 'TGCGCAGC', 'CATCACTT', 'GTGTGGAG']
SI_GA_C5 = SI_P2_C5 = ['CGACTTGA', 'TACAGACT', 'ATTGCGTG', 'GCGTACAC']
SI_GA_C6 = SI_P2_C6 = ['ATTACTTC', 'TGCGAACT', 'GCATTCGG', 'CAGCGGAA']
SI_GA_C7 = SI_P2_C7 = ['GTCTCTCG', 'AATCTCTC', 'CGGAGGGA', 'TCAGAAAT']
SI_GA_C8 = SI_P2_C8 = ['GTTGAGAA', 'AGATCTGG', 'TCGATACT', 'CACCGCTC']
SI_GA_C9 = SI_P2_C9 = ['GCGCAGAA', 'ATCTTACC', 'TATGGTGT', 'CGAACCTG']
SI_GA_C10 = SI_P2_C10 = ['TCTCAGTG', 'GAGACTAT', 'CGCTTAGC', 'ATAGGCCA']
SI_GA_C11 = SI_P2_C11 = ['GAGGATCT', 'AGACCATA', 'TCCTGCGC', 'CTTATGAG']
SI_GA_C12 = SI_P2_C12 = ['TCTCGTTT', 'GGCTAGCG', 'ATGACCGC', 'CAAGTAAA']
SI_GA_D1 = SI_P2_D1 = ['CACTCGGA', 'GCTGAATT', 'TGAAGTAC', 'ATGCTCCG']
SI_GA_D2 = SI_P2_D2 = ['TAACAAGG', 'GGTTCCTC', 'ATCATGCA', 'CCGGGTAT']
SI_GA_D3 = SI_P2_D3 = ['ACATTACT', 'TTTGGGTA', 'CAGCCCAC', 'GGCAATGG']
SI_GA_D4 = SI_P2_D4 = ['CCCTAACA', 'ATTCCGAT', 'TGGATTGC', 'GAAGGCTG']
SI_GA_D5 = SI_P2_D5 = ['CTCGTCAC', 'GATCAGCA', 'ACAACAGG', 'TGGTGTTT']
SI_GA_D6 = SI_P2_D6 = ['CATGCGAT', 'TGATATTC', 'GTGATCGA', 'ACCCGACG']
SI_GA_D7 = SI_P2_D7 = ['ATTTGCTA', 'TAGACACC', 'CCACAGGG', 'GGCGTTAT']
SI_GA_D8 = SI_P2_D8 = ['GCAACAAA', 'TAGTTGTC', 'CGCCATCG', 'ATTGGCGT']
SI_GA_D9 = SI_P2_D9 = ['AGGAGATG', 'GATGTGGT', 'CTACATCC', 'TCCTCCAA']
SI_GA_D10 = SI_P2_D10 = ['CAATACCC', 'TGTCTATG', 'ACCACGAA', 'GTGGGTGT']
SI_GA_D11 = SI_P2_D11 = ['CTTTGCGG', 'TGCACAAA', 'AAGCAGTC', 'GCAGTTCT']
SI_GA_D12 = SI_P2_D12 = ['GCACAATG', 'CTTGGTAC', 'TGCACCGT', 'AAGTTGCA']
SI_GA_E1 = SI_P2_E1 = ['TGGTAAAC', 'GAAAGGGT', 'ACTGCTCG', 'CTCCTCTA']
SI_GA_E2 = SI_P2_E2 = ['GTGGTACC', 'TACTATAG', 'ACAAGGTA', 'CGTCCCGT']
SI_GA_E3 = SI_P2_E3 = ['AGGTATTG', 'CTCCTAGT', 'TCAAGGCC', 'GATGCCAA']
SI_GA_E4 = SI_P2_E4 = ['TTCGCCCT', 'GGATGGGC', 'AATCAATG', 'CCGATTAA']
SI_GA_E5 = SI_P2_E5 = ['CATTAGCG', 'TTCGCTGA', 'ACAAGAAT', 'GGGCTCTC']
SI_GA_E6 = SI_P2_E6 = ['CTGCGGCT', 'GACTCAAA', 'AGAAACTC', 'TCTGTTGG']
SI_GA_E7 = SI_P2_E7 = ['CACGCCTT', 'GTATATAG', 'TCTCGGGC', 'AGGATACA']
SI_GA_E8 = SI_P2_E8 = ['ATAGTTAC', 'TGCTGAGT', 'CCTACGTA', 'GAGCACCG']
SI_GA_E9 = SI_P2_E9 = ['TTGTTTCC', 'GGAGGAGG', 'CCTAACAA', 'AACCCGTT']
SI_GA_E10 = SI_P2_E10 = ['AAATGTGC', 'GGGCAAAT', 'TCTATCCG', 'CTCGCGTA']
SI_GA_E11 = SI_P2_E11 = ['AAGCGCTG', 'CGTTTGAT', 'GTAGCACA', 'TCCAATGC']
SI_GA_E12 = SI_P2_E12 = ['ACCGGCTC', 'GAGTTAGT', 'CGTCCTAG', 'TTAAAGCA']
SI_GA_F1 = SI_P2_F1 = ['GTTGCAGC', 'TGGAATTA', 'CAATGGAG', 'ACCCTCCT']
SI_GA_F2 = SI_P2_F2 = ['TTTACATG', 'CGCGATAC', 'ACGCGGGT', 'GAATTCCA']
SI_GA_F3 = SI_P2_F3 = ['TTCAGGTG', 'ACGGACAT', 'GATCTTGA', 'CGATCACC']
SI_GA_F4 = SI_P2_F4 = ['CCCAATAG', 'GTGTCGCT', 'AGAGTCGC', 'TATCGATA']
SI_GA_F5 = SI_P2_F5 = ['GACTACGT', 'CTAGCGAG', 'TCTATATC', 'AGGCGTCA']
SI_GA_F6 = SI_P2_F6 = ['CGGAGCAC', 'GACCTATT', 'ACTTAGGA', 'TTAGCTCG']
SI_GA_F7 = SI_P2_F7 = ['CGTGCAGA', 'AACAAGAT', 'TCGCTTCG', 'GTATGCTC']
SI_GA_F8 = SI_P2_F8 = ['CATGAACA', 'TCACTCGC', 'AGCTGGAT', 'GTGACTTG']
SI_GA_F9 = SI_P2_F9 = ['CAAGCTCC', 'GTTCACTG', 'TCGTGAAA', 'AGCATGGT']
SI_GA_F10 = SI_P2_F10 = ['GCTTGGCT', 'AAACAAAC', 'CGGGCTTA', 'TTCATCGG']
SI_GA_F11 = SI_P2_F11 = ['GCGAGAGT', 'TACGTTCA', 'AGTCCCAC', 'CTATAGTG']
SI_GA_F12 = SI_P2_F12 = ['TGATGCAT', 'GCTACTGA', 'CACCTGCC', 'ATGGAATG']
SI_GA_G1 = SI_P2_G1 = ['ATGAATCT', 'GATCTCAG', 'CCAGGAGC', 'TGCTCGTA']
SI_GA_G2 = SI_P2_G2 = ['TGATTCTA', 'ACTAGGAG', 'CAGCCACT', 'GTCGATGC']
SI_GA_G3 = SI_P2_G3 = ['CCTCATTC', 'AGCATCCG', 'GTGGCAAT', 'TAATGGGA']
SI_GA_G4 = SI_P2_G4 = ['GCGATGTG', 'AGATACAA', 'TTTCCACT', 'CACGGTGC']
SI_GA_G5 = SI_P2_G5 = ['GAGCAAGA', 'TCTGTGAT', 'CGCAGTTC', 'ATATCCCG']
SI_GA_G6 = SI_P2_G6 = ['CTGACGCG', 'GGTCGTAC', 'TCCTTCTT', 'AAAGAAGA']
SI_GA_G7 = SI_P2_G7 = ['GGTATGCA', 'CTCGAAAT', 'ACACCTTC', 'TAGTGCGG']
SI_GA_G8 = SI_P2_G8 = ['TATGAGCT', 'CCGATAGC', 'ATACCCAA', 'GGCTGTTG']
SI_GA_G9 = SI_P2_G9 = ['TAGGACGT', 'ATCCCACA', 'GGAATGTC', 'CCTTGTAG']
SI_GA_G10 = SI_P2_G10 = ['TCGCCAGC', 'AATGTTAG', 'CGATAGCT', 'GTCAGCTA']
SI_GA_G11 = SI_P2_G11 = ['TTATCGTT', 'AGCAGAGC', 'CATCTCCA', 'GCGGATAG']
SI_GA_G12 = SI_P2_G12 = ['ATTCTAAG', 'CCCGATTA', 'TGGAGGCT', 'GAATCCGC']
SI_GA_H1 = SI_P2_H1 = ['GTATGTCA', 'TGTCAGAC', 'CACGTCGG', 'ACGACATT']
SI_GA_H2 = SI_P2_H2 = ['TAATGACC', 'ATGCCTTA', 'GCCGAGAT', 'CGTATCGG']
SI_GA_H3 = SI_P2_H3 = ['CCAAGATG', 'AGGCCCGA', 'TACGTGAC', 'GTTTATCT']
SI_GA_H4 = SI_P2_H4 = ['GCCATTCC', 'CAAGAATT', 'TTGCCGGA', 'AGTTGCAG']
SI_GA_H5 = SI_P2_H5 = ['CCACTACA', 'GATTCTGG', 'TGCGGCTT', 'ATGAAGAC']
SI_GA_H6 = SI_P2_H6 = ['TAGGATAA', 'CCTTTGTC', 'GTACGCGG', 'AGCACACT']
SI_GA_H7 = SI_P2_H7 = ['AGCTATCA', 'CATATAAC', 'TCAGGGTG', 'GTGCCCGT']
SI_GA_H8 = SI_P2_H8 = ['TTGTTGAT', 'GCTCAACC', 'CAAAGTGG', 'AGCGCCTA']
SI_GA_H9 = SI_P2_H9 = ['ACACTGTT', 'CAGGATGG', 'GGCTGAAC', 'TTTACCCA']
SI_GA_H10 = SI_P2_H10 = ['GTAATTGC', 'AGTCGCTT', 'CACGAGAA', 'TCGTCACG']
SI_GA_H11 = SI_P2_H11 = ['GGCGAGTA', 'ACTTCTAT', 'CAAATACG', 'TTGCGCGC']
SI_GA_H12 = SI_P2_H12 = ['GACAGCAT', 'TTTGTACA', 'AGGCCGTG', 'CCATATGC']
SAMPLE_INDEX_MAP = {
# GemCode Tube labels
'SI-001': SI_001,
'SI-002': SI_002,
'SI-003': SI_003,
'SI-004': SI_004,
'SI-005': SI_005,
'SI-006': SI_006,
'SI-007': SI_007,
'SI-008': SI_008,
# GemCode Plate labels
'SI-P01-A1': SI_P01_A1,
'SI-P01-B1': SI_P01_B1,
'SI-P01-C1': SI_P01_C1,
'SI-P01-D1': SI_P01_D1,
'SI-P01-E1': SI_P01_E1,
'SI-P01-F1': SI_P01_F1,
'SI-P01-G1': SI_P01_G1,
'SI-P01-H1': SI_P01_H1,
'SI-P01-A2': SI_P01_A2,
'SI-P01-B2': SI_P01_B2,
'SI-P01-C2': SI_P01_C2,
'SI-P01-D2': SI_P01_D2,
'SI-P01-E2': SI_P01_E2,
'SI-P01-F2': SI_P01_F2,
'SI-P01-G2': SI_P01_G2,
'SI-P01-H2': SI_P01_H2,
'SI-P01-A3': SI_P01_A3,
'SI-P01-B3': SI_P01_B3,
'SI-P01-C3': SI_P01_C3,
'SI-P01-D3': SI_P01_D3,
'SI-P01-E3': SI_P01_E3,
'SI-P01-F3': SI_P01_F3,
'SI-P01-G3': SI_P01_G3,
'SI-P01-H3': SI_P01_H3,
'SI-P01-A4': SI_P01_A4,
'SI-P01-B4': SI_P01_B4,
'SI-P01-C4': SI_P01_C4,
'SI-P01-D4': SI_P01_D4,
'SI-P01-E4': SI_P01_E4,
'SI-P01-F4': SI_P01_F4,
'SI-P01-G4': SI_P01_G4,
'SI-P01-H4': SI_P01_H4,
'SI-P01-A5': SI_P01_A5,
'SI-P01-B5': SI_P01_B5,
'SI-P01-C5': SI_P01_C5,
'SI-P01-D5': SI_P01_D5,
'SI-P01-E5': SI_P01_E5,
'SI-P01-F5': SI_P01_F5,
'SI-P01-G5': SI_P01_G5,
'SI-P01-H5': SI_P01_H5,
'SI-P01-A6': SI_P01_A6,
'SI-P01-B6': SI_P01_B6,
'SI-P01-C6': SI_P01_C6,
'SI-P01-D6': SI_P01_D6,
'SI-P01-E6': SI_P01_E6,
'SI-P01-F6': SI_P01_F6,
'SI-P01-G6': SI_P01_G6,
'SI-P01-H6': SI_P01_H6,
'SI-P01-A7': SI_P01_A7,
'SI-P01-B7': SI_P01_B7,
'SI-P01-C7': SI_P01_C7,
'SI-P01-D7': SI_P01_D7,
'SI-P01-E7': SI_P01_E7,
'SI-P01-F7': SI_P01_F7,
'SI-P01-G7': SI_P01_G7,
'SI-P01-H7': SI_P01_H7,
'SI-P01-A8': SI_P01_A8,
'SI-P01-B8': SI_P01_B8,
'SI-P01-C8': SI_P01_C8,
'SI-P01-D8': SI_P01_D8,
'SI-P01-E8': SI_P01_E8,
'SI-P01-F8': SI_P01_F8,
'SI-P01-G8': SI_P01_G8,
'SI-P01-H8': SI_P01_H8,
'SI-P01-A9': SI_P01_A9,
'SI-P01-B9': SI_P01_B9,
'SI-P01-C9': SI_P01_C9,
'SI-P01-D9': SI_P01_D9,
'SI-P01-E9': SI_P01_E9,
'SI-P01-F9': SI_P01_F9,
'SI-P01-G9': SI_P01_G9,
'SI-P01-H9': SI_P01_H9,
'SI-P01-A10': SI_P01_A10,
'SI-P01-B10': SI_P01_B10,
'SI-P01-C10': SI_P01_C10,
'SI-P01-D10': SI_P01_D10,
'SI-P01-E10': SI_P01_E10,
'SI-P01-F10': SI_P01_F10,
'SI-P01-G10': SI_P01_G10,
'SI-P01-H10': SI_P01_H10,
'SI-P01-A11': SI_P01_A11,
'SI-P01-B11': SI_P01_B11,
'SI-P01-C11': SI_P01_C11,
'SI-P01-D11': SI_P01_D11,
'SI-P01-E11': SI_P01_E11,
'SI-P01-F11': SI_P01_F11,
'SI-P01-G11': SI_P01_G11,
'SI-P01-H11': SI_P01_H11,
'SI-P01-A12': SI_P01_A12,
'SI-P01-B12': SI_P01_B12,
'SI-P01-C12': SI_P01_C12,
'SI-P01-D12': SI_P01_D12,
'SI-P01-E12': SI_P01_E12,
'SI-P01-F12': SI_P01_F12,
'SI-P01-G12': SI_P01_G12,
'SI-P01-H12': SI_P01_H12,
'SI-P03-A1': SI_P03_A1,
'SI-P03-B1': SI_P03_B1,
'SI-P03-C1': SI_P03_C1,
'SI-P03-D1': SI_P03_D1,
'SI-P03-E1': SI_P03_E1,
'SI-P03-F1': SI_P03_F1,
'SI-P03-G1': SI_P03_G1,
'SI-P03-H1': SI_P03_H1,
'SI-P03-A2': SI_P03_A2,
'SI-P03-B2': SI_P03_B2,
'SI-P03-C2': SI_P03_C2,
'SI-P03-D2': SI_P03_D2,
'SI-P03-E2': SI_P03_E2,
'SI-P03-F2': SI_P03_F2,
'SI-P03-G2': SI_P03_G2,
'SI-P03-H2': SI_P03_H2,
'SI-P03-A3': SI_P03_A3,
'SI-P03-B3': SI_P03_B3,
'SI-P03-C3': SI_P03_C3,
'SI-P03-D3': SI_P03_D3,
'SI-P03-E3': SI_P03_E3,
'SI-P03-F3': SI_P03_F3,
'SI-P03-G3': SI_P03_G3,
'SI-P03-H3': SI_P03_H3,
'SI-P03-A4': SI_P03_A4,
'SI-P03-B4': SI_P03_B4,
'SI-P03-C4': SI_P03_C4,
'SI-P03-D4': SI_P03_D4,
'SI-P03-E4': SI_P03_E4,
'SI-P03-F4': SI_P03_F4,
'SI-P03-G4': SI_P03_G4,
'SI-P03-H4': SI_P03_H4,
'SI-P03-A5': SI_P03_A5,
'SI-P03-B5': SI_P03_B5,
'SI-P03-C5': SI_P03_C5,
'SI-P03-D5': SI_P03_D5,
'SI-P03-E5': SI_P03_E5,
'SI-P03-F5': SI_P03_F5,
'SI-P03-G5': SI_P03_G5,
'SI-P03-H5': SI_P03_H5,
'SI-P03-A6': SI_P03_A6,
'SI-P03-B6': SI_P03_B6,
'SI-P03-C6': SI_P03_C6,
'SI-P03-D6': SI_P03_D6,
'SI-P03-E6': SI_P03_E6,
'SI-P03-F6': SI_P03_F6,
'SI-P03-G6': SI_P03_G6,
'SI-P03-H6': SI_P03_H6,
'SI-P03-A7': SI_P03_A7,
'SI-P03-B7': SI_P03_B7,
'SI-P03-C7': SI_P03_C7,
'SI-P03-D7': SI_P03_D7,
'SI-P03-E7': SI_P03_E7,
'SI-P03-F7': SI_P03_F7,
'SI-P03-G7': SI_P03_G7,
'SI-P03-H7': SI_P03_H7,
'SI-P03-A8': SI_P03_A8,
'SI-P03-B8': SI_P03_B8,
'SI-P03-C8': SI_P03_C8,
'SI-P03-D8': SI_P03_D8,
'SI-P03-E8': SI_P03_E8,
'SI-P03-F8': SI_P03_F8,
'SI-P03-G8': SI_P03_G8,
'SI-P03-H8': SI_P03_H8,
'SI-P03-A9': SI_P03_A9,
'SI-P03-B9': SI_P03_B9,
'SI-P03-C9': SI_P03_C9,
'SI-P03-D9': SI_P03_D9,
'SI-P03-E9': SI_P03_E9,
'SI-P03-F9': SI_P03_F9,
'SI-P03-G9': SI_P03_G9,
'SI-P03-H9': SI_P03_H9,
'SI-P03-A10': SI_P03_A10,
'SI-P03-B10': SI_P03_B10,
'SI-P03-C10': SI_P03_C10,
'SI-P03-D10': SI_P03_D10,
'SI-P03-E10': SI_P03_E10,
'SI-P03-F10': SI_P03_F10,
'SI-P03-G10': SI_P03_G10,
'SI-P03-H10': SI_P03_H10,
'SI-P03-A11': SI_P03_A11,
'SI-P03-B11': SI_P03_B11,
'SI-P03-C11': SI_P03_C11,
'SI-P03-D11': SI_P03_D11,
'SI-P03-E11': SI_P03_E11,
'SI-P03-F11': SI_P03_F11,
'SI-P03-G11': SI_P03_G11,
'SI-P03-H11': SI_P03_H11,
'SI-P03-A12': SI_P03_A12,
'SI-P03-B12': SI_P03_B12,
'SI-P03-C12': SI_P03_C12,
'SI-P03-D12': SI_P03_D12,
'SI-P03-E12': SI_P03_E12,
'SI-P03-F12': SI_P03_F12,
'SI-P03-G12': SI_P03_G12,
'SI-P03-H12': SI_P03_H12,
'SI-3A-A1': SI_3A_A1,
'SI-3A-B1': SI_3A_B1,
'SI-3A-C1': SI_3A_C1,
'SI-3A-D1': SI_3A_D1,
'SI-3A-E1': SI_3A_E1,
'SI-3A-F1': SI_3A_F1,
'SI-3A-G1': SI_3A_G1,
'SI-3A-H1': SI_3A_H1,
'SI-3A-A2': SI_3A_A2,
'SI-3A-B2': SI_3A_B2,
'SI-3A-C2': SI_3A_C2,
'SI-3A-D2': SI_3A_D2,
'SI-3A-E2': SI_3A_E2,
'SI-3A-F2': SI_3A_F2,
'SI-3A-G2': SI_3A_G2,
'SI-3A-H2': SI_3A_H2,
'SI-3A-A3': SI_3A_A3,
'SI-3A-B3': SI_3A_B3,
'SI-3A-C3': SI_3A_C3,
'SI-3A-D3': SI_3A_D3,
'SI-3A-E3': SI_3A_E3,
'SI-3A-F3': SI_3A_F3,
'SI-3A-G3': SI_3A_G3,
'SI-3A-H3': SI_3A_H3,
'SI-3A-A4': SI_3A_A4,
'SI-3A-B4': SI_3A_B4,
'SI-3A-C4': SI_3A_C4,
'SI-3A-D4': SI_3A_D4,
'SI-3A-E4': SI_3A_E4,
'SI-3A-F4': SI_3A_F4,
'SI-3A-G4': SI_3A_G4,
'SI-3A-H4': SI_3A_H4,
'SI-3A-A5': SI_3A_A5,
'SI-3A-B5': SI_3A_B5,
'SI-3A-C5': SI_3A_C5,
'SI-3A-D5': SI_3A_D5,
'SI-3A-E5': SI_3A_E5,
'SI-3A-F5': SI_3A_F5,
'SI-3A-G5': SI_3A_G5,
'SI-3A-H5': SI_3A_H5,
'SI-3A-A6': SI_3A_A6,
'SI-3A-B6': SI_3A_B6,
'SI-3A-C6': SI_3A_C6,
'SI-3A-D6': SI_3A_D6,
'SI-3A-E6': SI_3A_E6,
'SI-3A-F6': SI_3A_F6,
'SI-3A-G6': SI_3A_G6,
'SI-3A-H6': SI_3A_H6,
'SI-3A-A7': SI_3A_A7,
'SI-3A-B7': SI_3A_B7,
'SI-3A-C7': SI_3A_C7,
'SI-3A-D7': SI_3A_D7,
'SI-3A-E7': SI_3A_E7,
'SI-3A-F7': SI_3A_F7,
'SI-3A-G7': SI_3A_G7,
'SI-3A-H7': SI_3A_H7,
'SI-3A-A8': SI_3A_A8,
'SI-3A-B8': SI_3A_B8,
'SI-3A-C8': SI_3A_C8,
'SI-3A-D8': SI_3A_D8,
'SI-3A-E8': SI_3A_E8,
'SI-3A-F8': SI_3A_F8,
'SI-3A-G8': SI_3A_G8,
'SI-3A-H8': SI_3A_H8,
'SI-3A-A9': SI_3A_A9,
'SI-3A-B9': SI_3A_B9,
'SI-3A-C9': SI_3A_C9,
'SI-3A-D9': SI_3A_D9,
'SI-3A-E9': SI_3A_E9,
'SI-3A-F9': SI_3A_F9,
'SI-3A-G9': SI_3A_G9,
'SI-3A-H9': SI_3A_H9,
'SI-3A-A10': SI_3A_A10,
'SI-3A-B10': SI_3A_B10,
'SI-3A-C10': SI_3A_C10,
'SI-3A-D10': SI_3A_D10,
'SI-3A-E10': SI_3A_E10,
'SI-3A-F10': SI_3A_F10,
'SI-3A-G10': SI_3A_G10,
'SI-3A-H10': SI_3A_H10,
'SI-3A-A11': SI_3A_A11,
'SI-3A-B11': SI_3A_B11,
'SI-3A-C11': SI_3A_C11,
'SI-3A-D11': SI_3A_D11,
'SI-3A-E11': SI_3A_E11,
'SI-3A-F11': SI_3A_F11,
'SI-3A-G11': SI_3A_G11,
'SI-3A-H11': SI_3A_H11,
'SI-3A-A12': SI_3A_A12,
'SI-3A-B12': SI_3A_B12,
'SI-3A-C12': SI_3A_C12,
'SI-3A-D12': SI_3A_D12,
'SI-3A-E12': SI_3A_E12,
'SI-3A-F12': SI_3A_F12,
'SI-3A-G12': SI_3A_G12,
'SI-3A-H12': SI_3A_H12,
'SI-GA-A1': SI_GA_A1,
'SI-GA-B1': SI_GA_B1,
'SI-GA-C1': SI_GA_C1,
'SI-GA-D1': SI_GA_D1,
'SI-GA-E1': SI_GA_E1,
'SI-GA-F1': SI_GA_F1,
'SI-GA-G1': SI_GA_G1,
'SI-GA-H1': SI_GA_H1,
'SI-GA-A2': SI_GA_A2,
'SI-GA-B2': SI_GA_B2,
'SI-GA-C2': SI_GA_C2,
'SI-GA-D2': SI_GA_D2,
'SI-GA-E2': SI_GA_E2,
'SI-GA-F2': SI_GA_F2,
'SI-GA-G2': SI_GA_G2,
'SI-GA-H2': SI_GA_H2,
'SI-GA-A3': SI_GA_A3,
'SI-GA-B3': SI_GA_B3,
'SI-GA-C3': SI_GA_C3,
'SI-GA-D3': SI_GA_D3,
'SI-GA-E3': SI_GA_E3,
'SI-GA-F3': SI_GA_F3,
'SI-GA-G3': SI_GA_G3,
'SI-GA-H3': SI_GA_H3,
'SI-GA-A4': SI_GA_A4,
'SI-GA-B4': SI_GA_B4,
'SI-GA-C4': SI_GA_C4,
'SI-GA-D4': SI_GA_D4,
'SI-GA-E4': SI_GA_E4,
'SI-GA-F4': SI_GA_F4,
'SI-GA-G4': SI_GA_G4,
'SI-GA-H4': SI_GA_H4,
'SI-GA-A5': SI_GA_A5,
'SI-GA-B5': SI_GA_B5,
'SI-GA-C5': SI_GA_C5,
'SI-GA-D5': SI_GA_D5,
'SI-GA-E5': SI_GA_E5,
'SI-GA-F5': SI_GA_F5,
'SI-GA-G5': SI_GA_G5,
'SI-GA-H5': SI_GA_H5,
'SI-GA-A6': SI_GA_A6,
'SI-GA-B6': SI_GA_B6,
'SI-GA-C6': SI_GA_C6,
'SI-GA-D6': SI_GA_D6,
'SI-GA-E6': SI_GA_E6,
'SI-GA-F6': SI_GA_F6,
'SI-GA-G6': SI_GA_G6,
'SI-GA-H6': SI_GA_H6,
'SI-GA-A7': SI_GA_A7,
'SI-GA-B7': SI_GA_B7,
'SI-GA-C7': SI_GA_C7,
'SI-GA-D7': SI_GA_D7,
'SI-GA-E7': SI_GA_E7,
'SI-GA-F7': SI_GA_F7,
'SI-GA-G7': SI_GA_G7,
'SI-GA-H7': SI_GA_H7,
'SI-GA-A8': SI_GA_A8,
'SI-GA-B8': SI_GA_B8,
'SI-GA-C8': SI_GA_C8,
'SI-GA-D8': SI_GA_D8,
'SI-GA-E8': SI_GA_E8,
'SI-GA-F8': SI_GA_F8,
'SI-GA-G8': SI_GA_G8,
'SI-GA-H8': SI_GA_H8,
'SI-GA-A9': SI_GA_A9,
'SI-GA-B9': SI_GA_B9,
'SI-GA-C9': SI_GA_C9,
'SI-GA-D9': SI_GA_D9,
'SI-GA-E9': SI_GA_E9,
'SI-GA-F9': SI_GA_F9,
'SI-GA-G9': SI_GA_G9,
'SI-GA-H9': SI_GA_H9,
'SI-GA-A10': SI_GA_A10,
'SI-GA-B10': SI_GA_B10,
'SI-GA-C10': SI_GA_C10,
'SI-GA-D10': SI_GA_D10,
'SI-GA-E10': SI_GA_E10,
'SI-GA-F10': SI_GA_F10,
'SI-GA-G10': SI_GA_G10,
'SI-GA-H10': SI_GA_H10,
'SI-GA-A11': SI_GA_A11,
'SI-GA-B11': SI_GA_B11,
'SI-GA-C11': SI_GA_C11,
'SI-GA-D11': SI_GA_D11,
'SI-GA-E11': SI_GA_E11,
'SI-GA-F11': SI_GA_F11,
'SI-GA-G11': SI_GA_G11,
'SI-GA-H11': SI_GA_H11,
'SI-GA-A12': SI_GA_A12,
'SI-GA-B12': SI_GA_B12,
'SI-GA-C12': SI_GA_C12,
'SI-GA-D12': SI_GA_D12,
'SI-GA-E12': SI_GA_E12,
'SI-GA-F12': SI_GA_F12,
'SI-GA-G12': SI_GA_G12,
'SI-GA-H12': SI_GA_H12,
# Agora/VDJ-FB
'SI-NA-A1': SI_NA_A1,
'SI-NA-B1': SI_NA_B1,
'SI-NA-C1': SI_NA_C1,
'SI-NA-D1': SI_NA_D1,
'SI-NA-E1': SI_NA_E1,
'SI-NA-F1': SI_NA_F1,
'SI-NA-G1': SI_NA_G1,
'SI-NA-H1': SI_NA_H1,
'SI-NA-A2': SI_NA_A2,
'SI-NA-B2': SI_NA_B2,
'SI-NA-C2': SI_NA_C2,
'SI-NA-D2': SI_NA_D2,
'SI-NA-E2': SI_NA_E2,
'SI-NA-F2': SI_NA_F2,
'SI-NA-G2': SI_NA_G2,
'SI-NA-H2': SI_NA_H2,
'SI-NA-A3': SI_NA_A3,
'SI-NA-B3': SI_NA_B3,
'SI-NA-C3': SI_NA_C3,
'SI-NA-D3': SI_NA_D3,
'SI-NA-E3': SI_NA_E3,
'SI-NA-F3': SI_NA_F3,
'SI-NA-G3': SI_NA_G3,
'SI-NA-H3': SI_NA_H3,
'SI-NA-A4': SI_NA_A4,
'SI-NA-B4': SI_NA_B4,
'SI-NA-C4': SI_NA_C4,
'SI-NA-D4': SI_NA_D4,
'SI-NA-E4': SI_NA_E4,
'SI-NA-F4': SI_NA_F4,
'SI-NA-G4': SI_NA_G4,
'SI-NA-H4': SI_NA_H4,
'SI-NA-A5': SI_NA_A5,
'SI-NA-B5': SI_NA_B5,
'SI-NA-C5': SI_NA_C5,
'SI-NA-D5': SI_NA_D5,
'SI-NA-E5': SI_NA_E5,
'SI-NA-F5': SI_NA_F5,
'SI-NA-G5': SI_NA_G5,
'SI-NA-H5': SI_NA_H5,
'SI-NA-A6': SI_NA_A6,
'SI-NA-B6': SI_NA_B6,
'SI-NA-C6': SI_NA_C6,
'SI-NA-D6': SI_NA_D6,
'SI-NA-E6': SI_NA_E6,
'SI-NA-F6': SI_NA_F6,
'SI-NA-G6': SI_NA_G6,
'SI-NA-H6': SI_NA_H6,
'SI-NA-A7': SI_NA_A7,
'SI-NA-B7': SI_NA_B7,
'SI-NA-C7': SI_NA_C7,
'SI-NA-D7': SI_NA_D7,
'SI-NA-E7': SI_NA_E7,
'SI-NA-F7': SI_NA_F7,
'SI-NA-G7': SI_NA_G7,
'SI-NA-H7': SI_NA_H7,
'SI-NA-A8': SI_NA_A8,
'SI-NA-B8': SI_NA_B8,
'SI-NA-C8': SI_NA_C8,
'SI-NA-D8': SI_NA_D8,
'SI-NA-E8': SI_NA_E8,
'SI-NA-F8': SI_NA_F8,
'SI-NA-G8': SI_NA_G8,
'SI-NA-H8': SI_NA_H8,
'SI-NA-A9': SI_NA_A9,
'SI-NA-B9': SI_NA_B9,
'SI-NA-C9': SI_NA_C9,
'SI-NA-D9': SI_NA_D9,
'SI-NA-E9': SI_NA_E9,
'SI-NA-F9': SI_NA_F9,
'SI-NA-G9': SI_NA_G9,
'SI-NA-H9': SI_NA_H9,
'SI-NA-A10': SI_NA_A10,
'SI-NA-B10': SI_NA_B10,
'SI-NA-C10': SI_NA_C10,
'SI-NA-D10': SI_NA_D10,
'SI-NA-E10': SI_NA_E10,
'SI-NA-F10': SI_NA_F10,
'SI-NA-G10': SI_NA_G10,
'SI-NA-H10': SI_NA_H10,
'SI-NA-A11': SI_NA_A11,
'SI-NA-B11': SI_NA_B11,
'SI-NA-C11': SI_NA_C11,
'SI-NA-D11': SI_NA_D11,
'SI-NA-E11': SI_NA_E11,
'SI-NA-F11': SI_NA_F11,
'SI-NA-G11': SI_NA_G11,
'SI-NA-H11': SI_NA_H11,
'SI-NA-A12': SI_NA_A12,
'SI-NA-B12': SI_NA_B12,
'SI-NA-C12': SI_NA_C12,
'SI-NA-D12': SI_NA_D12,
'SI-NA-E12': SI_NA_E12,
'SI-NA-F12': SI_NA_F12,
'SI-NA-G12': SI_NA_G12,
'SI-NA-H12': SI_NA_H12,
# GemCode Part numbers
'220027': SI_001,
'220028': SI_002,
'220029': SI_003,
'220030': SI_004,
'220031': SI_005,
'220032': SI_006,
'220033': SI_007,
'220034': SI_008,
# WGS+ Tube labels
'SI-T2-1': SI_T2_1,
'SI-T2-2': SI_T2_2,
'SI-T2-3': SI_T2_3,
'SI-T2-4': SI_T2_4,
'SI-T2-5': SI_T2_5,
'SI-T2-6': SI_T2_6,
'SI-T2-7': SI_T2_7,
'SI-T2-8': SI_T2_8,
# WGS+ Plate labels
'SI-P2-A1': SI_P2_A1,
'SI-P2-A2': SI_P2_A2,
'SI-P2-A3': SI_P2_A3,
'SI-P2-A4': SI_P2_A4,
'SI-P2-A5': SI_P2_A5,
'SI-P2-A6': SI_P2_A6,
'SI-P2-A7': SI_P2_A7,
'SI-P2-A8': SI_P2_A8,
'SI-P2-A9': SI_P2_A9,
'SI-P2-A10': SI_P2_A10,
'SI-P2-A11': SI_P2_A11,
'SI-P2-A12': SI_P2_A12,
'SI-P2-B1': SI_P2_B1,
'SI-P2-B2': SI_P2_B2,
'SI-P2-B3': SI_P2_B3,
'SI-P2-B4': SI_P2_B4,
'SI-P2-B5': SI_P2_B5,
'SI-P2-B6': SI_P2_B6,
'SI-P2-B7': SI_P2_B7,
'SI-P2-B8': SI_P2_B8,
'SI-P2-B9': SI_P2_B9,
'SI-P2-B10': SI_P2_B10,
'SI-P2-B11': SI_P2_B11,
'SI-P2-B12': SI_P2_B12,
'SI-P2-C1': SI_P2_C1,
'SI-P2-C2': SI_P2_C2,
'SI-P2-C3': SI_P2_C3,
'SI-P2-C4': SI_P2_C4,
'SI-P2-C5': SI_P2_C5,
'SI-P2-C6': SI_P2_C6,
'SI-P2-C7': SI_P2_C7,
'SI-P2-C8': SI_P2_C8,
'SI-P2-C9': SI_P2_C9,
'SI-P2-C10': SI_P2_C10,
'SI-P2-C11': SI_P2_C11,
'SI-P2-C12': SI_P2_C12,
'SI-P2-D1': SI_P2_D1,
'SI-P2-D2': SI_P2_D2,
'SI-P2-D3': SI_P2_D3,
'SI-P2-D4': SI_P2_D4,
'SI-P2-D5': SI_P2_D5,
'SI-P2-D6': SI_P2_D6,
'SI-P2-D7': SI_P2_D7,
'SI-P2-D8': SI_P2_D8,
'SI-P2-D9': SI_P2_D9,
'SI-P2-D10': SI_P2_D10,
'SI-P2-D11': SI_P2_D11,
'SI-P2-D12': SI_P2_D12,
'SI-P2-E1': SI_P2_E1,
'SI-P2-E2': SI_P2_E2,
'SI-P2-E3': SI_P2_E3,
'SI-P2-E4': SI_P2_E4,
'SI-P2-E5': SI_P2_E5,
'SI-P2-E6': SI_P2_E6,
'SI-P2-E7': SI_P2_E7,
'SI-P2-E8': SI_P2_E8,
'SI-P2-E9': SI_P2_E9,
'SI-P2-E10': SI_P2_E10,
'SI-P2-E11': SI_P2_E11,
'SI-P2-E12': SI_P2_E12,
'SI-P2-F1': SI_P2_F1,
'SI-P2-F2': SI_P2_F2,
'SI-P2-F3': SI_P2_F3,
'SI-P2-F4': SI_P2_F4,
'SI-P2-F5': SI_P2_F5,
'SI-P2-F6': SI_P2_F6,
'SI-P2-F7': SI_P2_F7,
'SI-P2-F8': SI_P2_F8,
'SI-P2-F9': SI_P2_F9,
'SI-P2-F10': SI_P2_F10,
'SI-P2-F11': SI_P2_F11,
'SI-P2-F12': SI_P2_F12,
'SI-P2-G1': SI_P2_G1,
'SI-P2-G2': SI_P2_G2,
'SI-P2-G3': SI_P2_G3,
'SI-P2-G4': SI_P2_G4,
'SI-P2-G5': SI_P2_G5,
'SI-P2-G6': SI_P2_G6,
'SI-P2-G7': SI_P2_G7,
'SI-P2-G8': SI_P2_G8,
'SI-P2-G9': SI_P2_G9,
'SI-P2-G10': SI_P2_G10,
'SI-P2-G11': SI_P2_G11,
'SI-P2-G12': SI_P2_G12,
'SI-P2-H1': SI_P2_H1,
'SI-P2-H2': SI_P2_H2,
'SI-P2-H3': SI_P2_H3,
'SI-P2-H4': SI_P2_H4,
'SI-P2-H5': SI_P2_H5,
'SI-P2-H6': SI_P2_H6,
'SI-P2-H7': SI_P2_H7,
'SI-P2-H8': SI_P2_H8,
'SI-P2-H9': SI_P2_H9,
'SI-P2-H10': SI_P2_H10,
'SI-P2-H11': SI_P2_H11,
'SI-P2-H12': SI_P2_H12,
# WGS+ Plate Label alternate
'SI-P02-A1': SI_P2_A1,
'SI-P02-A2': SI_P2_A2,
'SI-P02-A3': SI_P2_A3,
'SI-P02-A4': SI_P2_A4,
'SI-P02-A5': SI_P2_A5,
'SI-P02-A6': SI_P2_A6,
'SI-P02-A7': SI_P2_A7,
'SI-P02-A8': SI_P2_A8,
'SI-P02-A9': SI_P2_A9,
'SI-P02-A10': SI_P2_A10,
'SI-P02-A11': SI_P2_A11,
'SI-P02-A12': SI_P2_A12,
'SI-P02-B1': SI_P2_B1,
'SI-P02-B2': SI_P2_B2,
'SI-P02-B3': SI_P2_B3,
'SI-P02-B4': SI_P2_B4,
'SI-P02-B5': SI_P2_B5,
'SI-P02-B6': SI_P2_B6,
'SI-P02-B7': SI_P2_B7,
'SI-P02-B8': SI_P2_B8,
'SI-P02-B9': SI_P2_B9,
'SI-P02-B10': SI_P2_B10,
'SI-P02-B11': SI_P2_B11,
'SI-P02-B12': SI_P2_B12,
'SI-P02-C1': SI_P2_C1,
'SI-P02-C2': SI_P2_C2,
'SI-P02-C3': SI_P2_C3,
'SI-P02-C4': SI_P2_C4,
'SI-P02-C5': SI_P2_C5,
'SI-P02-C6': SI_P2_C6,
'SI-P02-C7': SI_P2_C7,
'SI-P02-C8': SI_P2_C8,
'SI-P02-C9': SI_P2_C9,
'SI-P02-C10': SI_P2_C10,
'SI-P02-C11': SI_P2_C11,
'SI-P02-C12': SI_P2_C12,
'SI-P02-D1': SI_P2_D1,
'SI-P02-D2': SI_P2_D2,
'SI-P02-D3': SI_P2_D3,
'SI-P02-D4': SI_P2_D4,
'SI-P02-D5': SI_P2_D5,
'SI-P02-D6': SI_P2_D6,
'SI-P02-D7': SI_P2_D7,
'SI-P02-D8': SI_P2_D8,
'SI-P02-D9': SI_P2_D9,
'SI-P02-D10': SI_P2_D10,
'SI-P02-D11': SI_P2_D11,
'SI-P02-D12': SI_P2_D12,
'SI-P02-E1': SI_P2_E1,
'SI-P02-E2': SI_P2_E2,
'SI-P02-E3': SI_P2_E3,
'SI-P02-E4': SI_P2_E4,
'SI-P02-E5': SI_P2_E5,
'SI-P02-E6': SI_P2_E6,
'SI-P02-E7': SI_P2_E7,
'SI-P02-E8': SI_P2_E8,
'SI-P02-E9': SI_P2_E9,
'SI-P02-E10': SI_P2_E10,
'SI-P02-E11': SI_P2_E11,
'SI-P02-E12': SI_P2_E12,
'SI-P02-F1': SI_P2_F1,
'SI-P02-F2': SI_P2_F2,
'SI-P02-F3': SI_P2_F3,
'SI-P02-F4': SI_P2_F4,
'SI-P02-F5': SI_P2_F5,
'SI-P02-F6': SI_P2_F6,
'SI-P02-F7': SI_P2_F7,
'SI-P02-F8': SI_P2_F8,
'SI-P02-F9': SI_P2_F9,
'SI-P02-F10': SI_P2_F10,
'SI-P02-F11': SI_P2_F11,
'SI-P02-F12': SI_P2_F12,
'SI-P02-G1': SI_P2_G1,
'SI-P02-G2': SI_P2_G2,
'SI-P02-G3': SI_P2_G3,
'SI-P02-G4': SI_P2_G4,
'SI-P02-G5': SI_P2_G5,
'SI-P02-G6': SI_P2_G6,
'SI-P02-G7': SI_P2_G7,
'SI-P02-G8': SI_P2_G8,
'SI-P02-G9': SI_P2_G9,
'SI-P02-G10': SI_P2_G10,
'SI-P02-G11': SI_P2_G11,
'SI-P02-G12': SI_P2_G12,
'SI-P02-H1': SI_P2_H1,
'SI-P02-H2': SI_P2_H2,
'SI-P02-H3': SI_P2_H3,
'SI-P02-H4': SI_P2_H4,
'SI-P02-H5': SI_P2_H5,
'SI-P02-H6': SI_P2_H6,
'SI-P02-H7': SI_P2_H7,
'SI-P02-H8': SI_P2_H8,
'SI-P02-H9': SI_P2_H9,
'SI-P02-H10': SI_P2_H10,
'SI-P02-H11': SI_P2_H11,
'SI-P02-H12': SI_P2_H12,
}
# Chromium lot-specific oligos
CHROMIUM_LOT1_PART_A = ['AGAGCGA', 'CGATTGA', 'TAGACCA', 'AAATGCC', 'CTTTGCG', 'TCAGCAA', 'CTCCTAG', 'ATTATCC']
CHROMIUM_LOT2_PART_A = ['GACACTA', 'CCCTCTC', 'ATCGCGG', 'CTGGCAG', 'CCAGCTT', 'CATAGCA', 'CGTGTTC', 'GCACCAG']
CHROMIUM_LOT3_PART_A = ['ATGTGAC', 'GACGTCG', 'ACTGGCG', 'TGGCAAT', 'GAGGGTA', 'GTTTCGC', 'CAAGTGT', 'TTGAAGC']
CHROMIUM_LOT4_PART_A = ['CGATCCT', 'TGTTGCC', 'ACCTATT', 'ACAACTG', 'CTGTGTC', 'CTGGAAT', 'CAGAGTT', 'GGGCTGT']
CHROMIUM_LOT5_PART_A = ['TAGCTCC', 'CAATTTC', 'GCTCGAG', 'GAAGGCA', 'CGGCATG', 'TATTCCA', 'TCTCTGG', 'AGGTACT']
CHROMIUM_LOT6_PART_A = ['ACTTGCC', 'GTGAGTT', 'GTTGTCC', 'CATAACG', 'TCGTAAG', 'TTATCCA', 'GTGGAGA', 'TCCTGCA']
CHROMIUM_LOT7_PART_A = ['TAAGCCA', 'TCGGTGG', 'AAGGTAA', 'GGAACAG', 'GTGGAAG', 'TTAGACG', 'ATCCTAT', 'TTCCGTG']
CHROMIUM_LOT8_PART_A = ['GGTTTAG', 'CGTATAG', 'ATAGGCT', 'CTCTCGA', 'GTCTTAT', 'GATTGCA', 'TGAGCTA', 'ACGCGTG']
CHROMIUM_LOT9_PART_A = ['CGACACG', 'TCTCGTG', 'TGATGAC', 'TGCGTAA', 'TACCCTG', 'AGGTGCC', 'CTTGTGC', 'GCATGGC']
CHROMIUM_LOT10_PART_A = ['CAGCACG', 'CATGATG', 'ATCAACG', 'GATAAGA', 'CTGGTTC', 'CGATTCC', 'AGGTGAG', 'GGCCTGA']
CHROMIUM_LOT11_PART_A = ['ACAGTTG', 'TAAGCAC', 'ATCTTTG', 'TCTTGCG', 'TACATGG', 'CAAGGTT', 'AGGCTGC', 'GGTCGTG']
CHROMIUM_LOT12_PART_A = ['CCATTAT', 'GTTGCGG', 'AGGGTAG', 'GCCCAAG', 'TGTGCCT', 'ATTCTTG', 'GGTGCCA', 'GTATAGC']
CHROMIUM_LOT13_PART_A = ['GGCATCG', 'GACTGAT', 'TGGTGTA', 'TCCGTTG', 'CCTTCAG', 'CAGGCCA', 'GCACCGA', 'AGATCCA']
CHROMIUM_LOT_MAP = {
'Chromium Lot 1': CHROMIUM_LOT1_PART_A,
'Chromium Lot 2': CHROMIUM_LOT2_PART_A,
'Chromium Lot 3': CHROMIUM_LOT3_PART_A,
'Chromium Lot 4': CHROMIUM_LOT4_PART_A,
'Chromium Lot 5': CHROMIUM_LOT5_PART_A,
'Chromium Lot 6': CHROMIUM_LOT6_PART_A,
'Chromium Lot 7': CHROMIUM_LOT7_PART_A,
'Chromium Lot 8': CHROMIUM_LOT8_PART_A,
'Chromium Lot 9': CHROMIUM_LOT9_PART_A,
'Chromium Lot 10': CHROMIUM_LOT10_PART_A,
'Chromium Lot 11': CHROMIUM_LOT11_PART_A,
'Chromium Lot 12': CHROMIUM_LOT12_PART_A,
'Chromium Lot 13': CHROMIUM_LOT13_PART_A,
}
# GemCode lot-specific oligos
# note: lots 1-15 all use the same part As
GEMCODE_LOT1_PART_A = ['GGGTGA', 'TTCATC', 'CACAAC', 'GAAGAT', 'CAGCAT', 'CGTCAA', 'GAAACA', 'TGTTTC']
GEMCODE_LOT16_PART_A = ['CAAGTC', 'ACAAAG', 'CTGGAT', 'TTGTCT', 'AGCCTA', 'GGGAAC', 'TTCCTA', 'CCGTAA']
GEMCODE_LOT17_PART_A = ['AGTCCA', 'CAGGAG', 'CAATGC', 'CAATCG', 'AACAGA', 'TTACTC', 'ACTGAC', 'TAAGCC']
GEMCODE_LOT18_PART_A = ['GCATGT', 'CCAACA', 'TCGGTA', 'ATCGTG', 'ATTCTC', 'CGTTAG', 'TTCACT', 'GGTTTG']
GEMCODE_LOT19_PART_A = ['CTTTCA', 'TTGTTC', 'TAGCCA', 'GCGTAT', 'CGTACA', 'CCTTCG', 'CACACA', 'TACTTC']
GEMCODE_LOT20_PART_A = ['CTTCAT', 'ATTCCT', 'GTCTCC', 'CAGGGA', 'ATCCGA', 'CGAATC', 'AAACCC', 'CGCTAA']
GEMCODE_LOT21_PART_A = ['CAGATC', 'AATCCG', 'TACGTG', 'GAACAA', 'AGAGCG', 'CCAGAT', 'CGCTTC', 'TTATCC']
GEMCODE_LOT_MAP = {
'GemCode Lots 1-15': GEMCODE_LOT1_PART_A,
'GemCode Lot 16': GEMCODE_LOT16_PART_A,
'GemCode Lot 17': GEMCODE_LOT17_PART_A,
'GemCode Lot 18': GEMCODE_LOT18_PART_A,
'GemCode Lot 19': GEMCODE_LOT19_PART_A,
'GemCode Lot 20': GEMCODE_LOT20_PART_A,
'GemCode Lot 21': GEMCODE_LOT21_PART_A,
}
# if a whitelist isn't in this map, then assume it doesn't contain the alts and don't try to do lot detection
WHITELIST_TO_LOT_MAP = {
"884K-november-2015": GEMCODE_LOT_MAP,
"4M-with-alts-february-2016": CHROMIUM_LOT_MAP,
}
|
{"hexsha": "d2f99762437f5c1e076e47c7ba3a173003bec799", "size": 55723, "ext": "py", "lang": "Python", "max_stars_repo_path": "emptydrops/constants.py", "max_stars_repo_name": "nh3/emptydrops", "max_stars_repo_head_hexsha": "02448048371ddc4e20a691b7a21b9222bcfac67d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-04T18:24:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-24T10:21:19.000Z", "max_issues_repo_path": "emptydrops/constants.py", "max_issues_repo_name": "nh3/emptydrops", "max_issues_repo_head_hexsha": "02448048371ddc4e20a691b7a21b9222bcfac67d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-08-12T17:22:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T22:07:35.000Z", "max_forks_repo_path": "emptydrops/constants.py", "max_forks_repo_name": "nh3/emptydrops", "max_forks_repo_head_hexsha": "02448048371ddc4e20a691b7a21b9222bcfac67d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3789855072, "max_line_length": 166, "alphanum_fraction": 0.6339572528, "include": true, "reason": "import numpy,import scipy", "num_tokens": 27916}
|
import os, json
import config
import numpy as np
import pandas as pd
from datetime import datetime
from pathlib import Path
class Postor(config.Config):
"""
Create a new postor
"""
def __init__(self, hub_path):
super(Postor, self).__init__()
self.hub_path = hub_path
def merger(self, new_path, out_name = 'merged.json', replace = False):
# 改用dict.update()方法!!
with open(self.hub_path, "r", encoding='utf-8') as f:
aqi_hub = json.load(f)
with open(new_path, "r", encoding='utf-8') as f:
aqi_new = json.load(f)
for prov_name, prov_data in aqi_new.items():
print(prov_name)
for city_name, city_data in prov_data.items():
print(city_name)
if not city_name in aqi_hub[prov_name].keys():
aqi_hub[prov_name][city_name] = {}
for mon_name, mon_data in city_data.items():
print(mon_name)
if mon_name in aqi_hub[prov_name][city_name].keys():
if replace == True:
aqi_hub[prov_name][city_name][mon_name] = aqi_new[prov_name][city_name][mon_name]
else:
aqi_hub[prov_name][city_name][mon_name] = aqi_new[prov_name][city_name][mon_name]
with open(self.folder_json.joinpath(out_name), "w", encoding='utf-8') as f:
json.dump(aqi_hub, f, ensure_ascii=False, indent=4)
def batch_json2csv(self, prov_name = None, city_name = None):
with open(self.hub_path, 'r', encoding='utf-8') as f:
histaqi = json.load(f)
aqi_dfs = self.retrieve_data(histaqi, prov_name = prov_name, city_name = city_name)
for prov_name, prov_data in aqi_dfs.items():
prov_path = self.folder_csv.joinpath(prov_name)
if not prov_path.exists():
os.makedirs(prov_path)
for city_name, city_data in prov_data.items():
csv_name = prov_path.joinpath(city_name + '.csv')
if not os.path.exists(csv_name.as_posix()): city_data.to_csv(csv_name)
print(f'{prov_name}: {city_name} is successfully transferred to csv.')
def retrieve_data(self, histaqi, prov_name = None, city_name = None):
try:
if city_name:
print("fetching " + city_name)
city_data = histaqi[prov_name][city_name]
results = self.fetch_data(city_data)
else:
print("city name is not specified, fetching " + prov_name)
results = {}
for city_name, city_data in histaqi[prov_name].items():
print(city_name)
result = self.fetch_data(city_data)
results[city_name] = result
except Exception as identifier:
print(identifier)
print("no name is specified, iterating...")
results = {}
for prov_name, prov_data in histaqi.items():
print(prov_name)
results[prov_name] = {}
for city_name, city_data in prov_data.items():
print(city_name)
result = self.fetch_data(city_data)
results[prov_name][city_name] = result
print("iteration is done")
else:
print("retrieval is done.")
finally:
return results
def fetch_data(self, city_data):
result = []
for val in city_data.values():
result.extend(val)
result = np.array(result).reshape(-1, 9)
result = pd.DataFrame(result, columns = ['Date', 'aqi', 'aqi-rank', \
'pm25', 'pm10', 'so2', 'no2', 'co', 'o3'])
result['Date'] = pd.to_datetime(result['Date']).sort_index()
result.set_index("Date", inplace=True)
result = pd.DataFrame(result, dtype=np.float).sort_index()
return result
def eliminate_spaces(self):
'''
去除city_name中的空格
'''
with open(self.hub_path, 'r', encoding='utf-8') as f:
histaqi = json.load(f)
for prov_name, prov_data in histaqi.items():
print(prov_name)
for city_name in prov_data.keys():
print(city_name)
# print(histaqi[prov_name])
histaqi[prov_name][city_name.strip()] = histaqi[prov_name].pop(city_name)
with open(self.hub_path, "w") as f:
json.dump(histaqi, f, ensure_ascii = False, indent = 4)
|
{"hexsha": "9b7ad98e41280dacbdff14e258b300919af0cdc2", "size": 3748, "ext": "py", "lang": "Python", "max_stars_repo_path": "agrspy/envspy-histaqi/codes/postproc.py", "max_stars_repo_name": "soonyenju/agrspy", "max_stars_repo_head_hexsha": "1c5d11d48933f7392d2246fda487256d5cd5b239", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-01-10T07:00:25.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-10T07:15:00.000Z", "max_issues_repo_path": "agrspy/envspy-histaqi/codes/postproc.py", "max_issues_repo_name": "soonyenju/arspy", "max_issues_repo_head_hexsha": "1c5d11d48933f7392d2246fda487256d5cd5b239", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agrspy/envspy-histaqi/codes/postproc.py", "max_forks_repo_name": "soonyenju/arspy", "max_forks_repo_head_hexsha": "1c5d11d48933f7392d2246fda487256d5cd5b239", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0341880342, "max_line_length": 88, "alphanum_fraction": 0.6883671291, "include": true, "reason": "import numpy", "num_tokens": 1058}
|
"""STOCHASTIC ROSS plotting module.
This module returns graphs for each type of analyses in st_rotor_assembly.py.
"""
import bokeh.palettes as bp
import matplotlib.pyplot as plt
import numpy as np
from bokeh.layouts import gridplot
from bokeh.plotting import figure
from matplotlib import cm
# set bokeh palette of colors
colors1 = bp.Category10[10]
colors2 = bp.Category20c[20]
class ST_CampbellResults:
"""Store stochastic results and provide plots for Campbell Diagram.
It's possible to visualize multiples harmonics in a single plot to check
other speeds which also excite a specific natural frequency.
Two options for plooting are available: Matplotlib and Bokeh. The user
chooses between them using the attribute plot_type. The default is bokeh
Parameters
----------
speed_range : array
Array with the speed range in rad/s.
wd : array
Array with the damped natural frequencies
log_dec : array
Array with the Logarithmic decrement
Returns
-------
ax : matplotlib axes
Returns the matplotlib axes object with the plot
if plot_type == "matplotlib"
bk_ax : bokeh axes
Returns the bokeh axes object with the plot
if plot_type == "bokeh"
"""
def __init__(self, speed_range, wd, log_dec):
self.speed_range = speed_range
self.wd = wd
self.log_dec = log_dec
def plot_nat_freq(self, percentile=[], conf_interval=[], harmonics=[1], **kwargs):
"""Plot the damped natural frequencies vs frequency.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
harmonics: list, optional
List withe the harmonics to be plotted.
The default is to plot 1x.
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. linestyle='--').
Returns
-------
fig : Bokeh figure
The bokeh axes object with the plot.
"""
default_values = dict(line_width=3.0, line_alpha=1.0)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = figure(
width=640,
height=480,
tools="pan, box_zoom, wheel_zoom, reset, save",
title="Campbell Diagram",
y_axis_label="Damped Natural Frequencies",
x_axis_label="Rotor Speed",
)
fig.xaxis.axis_label_text_font_size = "14pt"
fig.yaxis.axis_label_text_font_size = "14pt"
fig.axis.major_label_text_font_size = "14pt"
fig.title.text_font_size = "14pt"
for j in range(self.wd.shape[0]):
fig.line(
x=self.speed_range,
y=np.mean(self.wd[j], axis=1),
line_color=colors1[j],
line_alpha=1.0,
line_width=3.0,
muted_color=colors1[j],
muted_alpha=0.1,
legend_label="Mean - Mode {}".format(j + 1),
)
if len(percentile):
for i, p in enumerate(percentile):
fig.line(
x=self.speed_range,
y=np.percentile(self.wd[j], p, axis=1),
line_color=colors2[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors2[i],
muted_alpha=0.1,
legend_label="percentile: {}%".format(p),
)
if len(conf_interval):
for i, p in enumerate(conf_interval):
fig.line(
x=self.speed_range,
y=np.percentile(self.wd[j], 50 + p / 2, axis=1),
line_color=colors1[j],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[j],
muted_alpha=0.1,
legend_label="confidence interval: {}% - Mode {}".format(
p, j + 1
),
)
fig.line(
x=self.speed_range,
y=np.percentile(self.wd[j], 50 - p / 2, axis=1),
line_color=colors1[j],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[j],
muted_alpha=0.1,
legend_label="confidence interval: {}% - Mode {}".format(
p, j + 1
),
)
fig.legend.background_fill_alpha = 0.1
fig.legend.click_policy = "mute"
fig.legend.label_text_font_size = "10pt"
return fig
def plot_log_dec(self, percentile=[], conf_interval=[], harmonics=[1], **kwargs):
"""Plot the log_dec vs frequency.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
harmonics: list, optional
List withe the harmonics to be plotted.
The default is to plot 1x.
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. linestyle='--').
Returns
-------
fig : Bokeh figure
The bokeh axes object with the plot
"""
default_values = dict(line_width=3.0, line_alpha=1.0)
percentile = np.sort(percentile)
conf_interval = np.sort(conf_interval)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = figure(
width=640,
height=480,
tools="pan, box_zoom, wheel_zoom, reset, save",
title="Campbell Diagram",
y_axis_label="Log Dec",
x_axis_label="Rotor speed",
)
fig.xaxis.axis_label_text_font_size = "14pt"
fig.yaxis.axis_label_text_font_size = "14pt"
fig.axis.major_label_text_font_size = "14pt"
fig.title.text_font_size = "14pt"
for j in range(self.log_dec.shape[0]):
fig.line(
x=self.speed_range,
y=np.mean(self.log_dec[j], axis=1),
line_color=colors1[j],
line_alpha=1.0,
line_width=3.0,
muted_color=colors1[j],
muted_alpha=0.1,
legend_label="Mean - Mode {}".format(j + 1),
)
if len(percentile):
for i, p in enumerate(percentile):
fig.line(
x=self.speed_range,
y=np.percentile(self.log_dec[j], p, axis=1),
line_color=colors2[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors2[i],
muted_alpha=0.1,
legend_label="percentile {}".format(p),
)
if len(conf_interval):
for i, p in enumerate(conf_interval):
fig.line(
x=self.speed_range,
y=np.percentile(self.log_dec[j], 50 + p / 2, axis=1),
line_color=colors1[j],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[j],
muted_alpha=0.1,
legend_label="confidence interval: {}% - Mode {}".format(
p, j + 1
),
)
fig.line(
x=self.speed_range,
y=np.percentile(self.log_dec[j], 50 - p / 2, axis=1),
line_color=colors1[j],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[j],
muted_alpha=0.1,
legend_label="confidence interval: {}% - Mode {}".format(
p, j + 1
),
)
fig.legend.background_fill_alpha = 0.01
fig.legend.click_policy = "mute"
fig.legend.label_text_font_size = "10pt"
return fig
def plot(self, percentile=[], conf_interval=[], *args, **kwargs):
"""Plot Campbell Diagram.
This method plots Campbell Diagram.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
args: optional
harmonics : list, optional
List with the harmonics to be plotted.
The default is to plot 1x.
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. linestyle='--').
Returns
-------
grid_plots : bokeh column
Bokeh column with diagrams for frequency and log dec.
"""
fig0 = self.plot_nat_freq(percentile, conf_interval, **kwargs)
fig1 = self.plot_log_dec(percentile, conf_interval, **kwargs)
grid_plots = gridplot([[fig0, fig1]])
return grid_plots
class ST_FrequencyResponseResults:
"""Store stochastic results and provide plots for Frequency Response.
Parameters
----------
freq_resp : array
Array with the transfer matrix.
speed_range : array
Array with the speed range in rad/s.
magnitude : array
Array with the frequencies, magnitude (dB) of the frequency
response for each pair input/output.
phase : array
Array with the frequencies, phase of the frequency
response for each pair input/output.
Returns
-------
grid_plots : bokeh column
Bokeh column with amplitude and phase plot.
"""
def __init__(self, speed_range, magnitude, phase):
self.speed_range = speed_range
self.magnitude = magnitude
self.phase = phase
def plot_magnitude(
self, percentile=[], conf_interval=[], units="mic-pk-pk", **kwargs
):
"""Plot frequency response.
This method plots the frequency response magnitude given an output and
an input using Bokeh.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0% and 100% inclusive.
units : str, optional
Unit system
Default is "mic-pk-pk".
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. linestyle='--').
Returns
-------
fig : bokeh figure
Bokeh plot axes with magnitude plot.
"""
if units == "m":
y_axis_label = "Amplitude (m)"
elif units == "mic-pk-pk":
y_axis_label = "Amplitude (μ pk-pk)"
else:
y_axis_label = "Amplitude (dB)"
fig = figure(
tools="pan, box_zoom, wheel_zoom, reset, save",
width=640,
height=480,
title="Frequency Response - Magnitude",
x_axis_label="Frequency",
y_axis_label=y_axis_label,
)
fig.xaxis.axis_label_text_font_size = "14pt"
fig.yaxis.axis_label_text_font_size = "14pt"
fig.axis.major_label_text_font_size = "14pt"
fig.title.text_font_size = "14pt"
if len(percentile):
for i, p in enumerate(percentile):
mag_percentile = np.percentile(self.magnitude, p, axis=1)
fig.line(
x=self.speed_range,
y=mag_percentile,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="percentile: {}%".format(p),
)
if len(conf_interval):
for i, p in enumerate(conf_interval):
mag_conf1 = np.percentile(self.magnitude, 50 + p / 2, axis=1)
mag_conf2 = np.percentile(self.magnitude, 50 - p / 2, axis=1)
fig.line(
x=self.speed_range,
y=mag_conf1,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
fig.line(
x=self.speed_range,
y=mag_conf2,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
mag_mean = np.mean(self.magnitude, axis=1)
fig.line(
x=self.speed_range,
y=mag_mean,
line_color="black",
line_alpha=1.0,
line_width=3,
muted_color="black",
muted_alpha=0.1,
legend_label="Mean",
)
fig.legend.background_fill_alpha = 0.1
fig.legend.click_policy = "mute"
fig.legend.label_text_font_size = "10pt"
return fig
def plot_phase(self, percentile=[], conf_interval=[], **kwargs):
"""Plot frequency response.
This method plots the phase response given an output and an input
using bokeh.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. linestyle='--').
Returns
-------
fig : bokeh figure
Bokeh plot axes with phase plot.
"""
fig = figure(
tools="pan, box_zoom, wheel_zoom, reset, save",
width=640,
height=480,
title="Frequency Response - Phase",
x_axis_label="Frequency",
y_axis_label="Phase angle",
)
fig.xaxis.axis_label_text_font_size = "14pt"
fig.yaxis.axis_label_text_font_size = "14pt"
fig.axis.major_label_text_font_size = "14pt"
fig.title.text_font_size = "14pt"
if len(percentile):
for i, p in enumerate(percentile):
phs_percentile = np.percentile(self.phase, p, axis=1)
fig.line(
x=self.speed_range,
y=phs_percentile,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="percentile: {}%".format(p),
)
for i, p in enumerate(conf_interval):
phs_conf1 = np.percentile(self.phase, 50 + p / 2, axis=1)
phs_conf2 = np.percentile(self.phase, 50 - p / 2, axis=1)
fig.line(
x=self.speed_range,
y=phs_conf1,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
fig.line(
x=self.speed_range,
y=phs_conf2,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
legend_label="confidence interval: {}%".format(p),
)
phs_mean = np.mean(self.phase, axis=1)
fig.line(
x=self.speed_range,
y=phs_mean,
line_color="black",
line_alpha=1.0,
line_width=3,
muted_color="black",
muted_alpha=0.1,
legend_label="Mean",
)
fig.legend.background_fill_alpha = 0.1
fig.legend.click_policy = "mute"
fig.legend.label_text_font_size = "10pt"
return fig
def plot(self, percentile=[], conf_interval=[], units="mic-pk-pk", *args, **kwargs):
"""Plot frequency response.
This method plots the frequency and phase response given an output
and an input.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
units : str, optional
Unit system
Default is "mic-pk-pk"
args : optional
Additional plot axes
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. line_color="blue").
Returns
-------
grid_plots : bokeh column
Bokeh column with amplitude and phase plot
"""
fig0 = self.plot_magnitude(percentile, conf_interval, units, **kwargs)
fig1 = self.plot_phase(percentile, conf_interval, **kwargs)
grid_plots = gridplot([[fig0], [fig1]])
return grid_plots
class ST_TimeResponseResults:
"""Store stochastic results and provide plots for Time Response and Orbit Response.
Parameters
----------
time_range : 1-dimensional array
Time array.
yout : array
System response.
xout : array
Time evolution of the state vector.
nodes_list: array
list with nodes from a rotor model.
nodes_pos: array
Rotor nodes axial positions.
Returns
-------
fig : bokeh figure
Returns the bokeh axes object with the plot
ax : matplotlib.axes
Matplotlib axes with orbit response plot.
"""
def __init__(self, time_range, yout, xout, nodes_list=[], nodes_pos=[]):
self.time_range = time_range
self.yout = yout
self.xout = xout
self.nodes_list = nodes_list
self.nodes_pos = nodes_pos
def _plot_time_response(
self, dof, percentile=[], conf_interval=[], *args, **kwargs
):
"""Plot time response.
This method plots the time response given.
Parameters
----------
dof : int
Degree of freedom that will be observed.
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
args : optional
Additional plot axes
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. line_color="blue").
Returns
-------
grid_plots : bokeh figure
Bokeh figure with time response plot.
"""
if dof % 4 == 0:
obs_dof = "x"
elif dof % 4 == 1:
obs_dof = "y"
elif dof % 4 == 2:
obs_dof = "α"
elif dof % 4 == 3:
obs_dof = "β"
fig = figure(
tools="pan, box_zoom, wheel_zoom, reset, save",
width=640,
height=480,
title="Response for node {} and degree of freedom {}".format(
dof // 4, obs_dof
),
x_axis_label="Time (s)",
y_axis_label="Amplitude",
)
fig.xaxis.axis_label_text_font_size = "14pt"
fig.yaxis.axis_label_text_font_size = "14pt"
fig.axis.major_label_text_font_size = "14pt"
fig.title.text_font_size = "14pt"
for i, p in enumerate(percentile):
tr_percentile = np.percentile(self.yout[..., dof], p, axis=0)
fig.line(
x=self.time_range,
y=tr_percentile,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="percentile: {}%".format(p),
)
for i, p in enumerate(conf_interval):
conf1 = np.percentile(self.yout[..., dof], 50 + p / 2, axis=0)
conf2 = np.percentile(self.yout[..., dof], 50 - p / 2, axis=0)
fig.line(
x=self.time_range,
y=conf1,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
fig.line(
x=self.time_range,
y=conf2,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
legend_label="confidence interval: {}%".format(p),
)
t_mean = np.mean(self.yout[..., dof], axis=0)
fig.line(
x=self.time_range,
y=t_mean,
line_color="black",
line_alpha=1.0,
line_width=3,
muted_color="black",
muted_alpha=0.1,
legend_label="Mean",
)
fig.legend.background_fill_alpha = 0.1
fig.legend.click_policy = "mute"
fig.legend.label_text_font_size = "10pt"
return fig
def _plot_orbit_2d(self, node, percentile=[], conf_interval=[], *args, **kwargs):
"""Plot orbit response (2D).
This function plots orbits for a given node on the rotor system in a 2D view.
Parameters
----------
node : int
Select the node to display the respective orbit response.
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
args : optional
Additional plot axes
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. color="blue").
Returns
-------
ax : matplotlib.axes
Matplotlib axes with orbit response plot.
"""
fig = figure(
tools="pan, box_zoom, wheel_zoom, reset, save",
width=640,
height=480,
title="Rotor Orbit: node {}".format(node),
x_axis_label="Amplitude",
y_axis_label="Amplitude",
)
fig.xaxis.axis_label_text_font_size = "14pt"
fig.yaxis.axis_label_text_font_size = "14pt"
fig.axis.major_label_text_font_size = "14pt"
fig.title.text_font_size = "14pt"
for i, p in enumerate(percentile):
fig.line(
np.percentile(self.yout[..., 4 * node], 50 + p / 2, axis=0),
np.percentile(self.yout[..., 4 * node + 1], 50 + p / 2, axis=0),
line_color=colors2[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors2[i],
muted_alpha=0.1,
legend_label="percentile: {}%".format(p),
)
for i, p in enumerate(conf_interval):
fig.line(
np.percentile(self.yout[..., 4 * node], 50 + p / 2, axis=0),
np.percentile(self.yout[..., 4 * node + 1], 50 + p / 2, axis=0),
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
fig.line(
np.percentile(self.yout[..., 4 * node], 50 - p / 2, axis=0),
np.percentile(self.yout[..., 4 * node + 1], 50 - p / 2, axis=0),
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
fig.line(
np.mean(self.yout[..., 4 * node], axis=0),
np.mean(self.yout[..., 4 * node + 1], axis=0),
line_color="black",
line_alpha=1.0,
line_width=3,
muted_color="black",
muted_alpha=0.1,
legend_label="Mean",
)
fig.legend.background_fill_alpha = 0.1
fig.legend.click_policy = "mute"
fig.legend.label_text_font_size = "10pt"
return fig
def _plot_orbit_3d(self, percentile=[], conf_interval=[], *args, **kwargs):
"""Plot orbit response (3D).
This function plots orbits for each node on the rotor system in a 3D view.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
args : optional
Additional plot axes
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. color="blue").
Returns
-------
ax : matplotlib.axes
Matplotlib axes with orbit response plot.
"""
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection="3d")
# plot center line
line = np.zeros(len(self.nodes_pos))
ax.plot(line, line, self.nodes_pos, "k-.", linewidth=1.5, zdir="x")
for i, p in enumerate(percentile):
for n in self.nodes_list:
ax.plot(
np.percentile(self.yout[..., 4 * n], p, axis=0),
np.percentile(self.yout[..., 4 * n + 1], p, axis=0),
self.nodes_pos[n],
color=cm.get_cmap("tab20c")(i),
zdir="x",
label="percentile: {}%".format(p),
)
for i, p in enumerate(conf_interval):
for n in self.nodes_list:
ax.plot(
np.percentile(self.yout[..., 4 * n], 50 + p / 2, axis=0),
np.percentile(self.yout[..., 4 * n + 1], 50 + p / 2, axis=0),
self.nodes_pos[n],
color=cm.get_cmap("tab10")(i),
zdir="x",
label="confidence interval: {}%".format(p),
)
ax.plot(
np.percentile(self.yout[..., 4 * n], 50 - p / 2, axis=0),
np.percentile(self.yout[..., 4 * n + 1], 50 - p / 2, axis=0),
self.nodes_pos[n],
color=cm.get_cmap("tab10")(i),
zdir="x",
label="confidence interval: {}%".format(p),
)
for n in self.nodes_list:
ax.plot(
np.mean(self.yout[..., 4 * n], axis=0),
np.mean(self.yout[..., 4 * n + 1], axis=0),
self.nodes_pos[n],
color="k",
zdir="x",
label="Mean".format(p),
)
ax.set_xlabel("Rotor length (m)", labelpad=16, fontsize=14)
ax.set_ylabel("Amplitude - X direction (m)", labelpad=16, fontsize=14)
ax.set_zlabel("Amplitude - Y direction (m)", labelpad=16, fontsize=14)
ax.set_title("Rotor Orbits", fontsize=16)
ax.tick_params(axis="both", which="major", labelsize=14)
ax.tick_params(axis="both", which="minor", labelsize=14)
return ax
def plot(
self,
plot_type="1d",
node=None,
dof=None,
percentile=[],
conf_interval=[],
*args,
**kwargs
):
"""Plot stochastic time or orbit response.
This function plots calls the auxiliary methods to plot the stochastic orbit
response. The plot type options are:
- 1d: plot time response for a given degree of freedom of a rotor system.
- 2d: plot orbit of a selected node of a rotor system.
- 3d: plot orbits for each node on the rotor system in a 3D view.
If plot_type = "1d": input a dof.
If plot_type = "2d": input a node.
if plot_type = "3d": no need to input a dof or node.
Parameters
----------
plot_type : str, optional
Defines with type of plot to display.
Options are: "2d" or "3d"
Default is "3d".
node : int, optional
Select the node to display the respective orbit response.
Default is None.
dof : int
Degree of freedom that will be observed.
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
args : optional
Additional plot axes
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. color="blue").
Raise
-----
ValueError
Error raised if no node is specified when plot_type = '2d'.
ValueError
Error raised if an odd string is specified to plot_type
Returns
-------
ax : matplotlib.axes
Matplotlib axes with orbit response plot.
"""
if plot_type == "1d":
return self._plot_time_response(
dof, percentile, conf_interval, *args, **kwargs
)
elif plot_type == "2d":
if node not in self.nodes_list:
raise ValueError("Please insert a valid node.")
else:
return self._plot_orbit_2d(
node, percentile, conf_interval, *args, **kwargs
)
elif plot_type == "3d":
return self._plot_orbit_3d(percentile, conf_interval, *args, **kwargs)
else:
raise ValueError("Plot type not supported. Choose between '2d' or '3d'.")
class ST_ForcedResponseResults:
"""Store stochastic results and provide plots for Forced Response.
Parameters
----------
force_resp : array
Array with the force response for each node for each frequency
frequency_range : array
Array with the frequencies
magnitude : array
Magnitude of the frequency response for node for each frequency
phase : array
Phase of the frequency response for node for each frequency
Returns
-------
fig : bokeh figure
Returns the bokeh axes object with the plot
"""
def __init__(self, forced_resp, magnitude, phase, frequency_range):
self.forced_resp = forced_resp
self.magnitude = magnitude
self.phase = phase
self.frequency_range = frequency_range
def plot_magnitude(
self, dof, percentile=[], conf_interval=[], units="mic-pk-pk", **kwargs
):
"""Plot frequency response.
This method plots the unbalance response magnitude given an Bokeh.
Parameters
----------
dof : int
Degree of freedom to observe the response.
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0% and 100% inclusive.
units : str, optional
Unit system
Default is "mic-pk-pk".
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. linestyle='--').
Returns
-------
fig : bokeh figure
Bokeh plot axes with magnitude plot.
"""
if units == "m":
y_axis_label = "Amplitude (m)"
elif units == "mic-pk-pk":
y_axis_label = "Amplitude (μ pk-pk)"
else:
y_axis_label = "Amplitude (dB)"
fig = figure(
tools="pan, box_zoom, wheel_zoom, reset, save",
width=640,
height=480,
title="Unbalance Response - Magnitude",
x_axis_label="Frequency",
y_axis_label=y_axis_label,
)
fig.xaxis.axis_label_text_font_size = "14pt"
fig.yaxis.axis_label_text_font_size = "14pt"
fig.axis.major_label_text_font_size = "14pt"
fig.title.text_font_size = "14pt"
if len(percentile):
for i, p in enumerate(percentile):
mag_percentile = np.percentile(self.magnitude[..., dof], p, axis=0)
fig.line(
x=self.frequency_range,
y=mag_percentile,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="percentile: {}%".format(p),
)
if len(conf_interval):
for i, p in enumerate(conf_interval):
mag_conf1 = np.percentile(self.magnitude[..., dof], 50 + p / 2, axis=0)
mag_conf2 = np.percentile(self.magnitude[..., dof], 50 - p / 2, axis=0)
fig.line(
x=self.frequency_range,
y=mag_conf1,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
fig.line(
x=self.frequency_range,
y=mag_conf2,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
mag_mean = np.mean(self.magnitude[..., dof], axis=0)
fig.line(
x=self.frequency_range,
y=mag_mean,
line_color="black",
line_alpha=1.0,
line_width=3,
muted_color="black",
muted_alpha=0.1,
legend_label="Mean",
)
fig.legend.background_fill_alpha = 0.1
fig.legend.click_policy = "mute"
fig.legend.label_text_font_size = "10pt"
return fig
def plot_phase(self, dof, percentile=[], conf_interval=[], **kwargs):
"""Plot frequency response.
This method plots the phase response given an output and an input
using bokeh.
Parameters
----------
dof : int
Degree of freedom to observe the response.
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. linestyle='--').
Returns
-------
fig : bokeh figure
Bokeh plot axes with phase plot.
"""
fig = figure(
tools="pan, box_zoom, wheel_zoom, reset, save",
width=640,
height=480,
title="Unbalance Response - Phase",
x_axis_label="Frequency",
y_axis_label="Phase angle",
)
fig.xaxis.axis_label_text_font_size = "14pt"
fig.yaxis.axis_label_text_font_size = "14pt"
fig.axis.major_label_text_font_size = "14pt"
fig.title.text_font_size = "14pt"
if len(percentile):
for i, p in enumerate(percentile):
phs_percentile = np.percentile(self.phase[..., dof], p, axis=0)
fig.line(
x=self.frequency_range,
y=phs_percentile,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="percentile: {}%".format(p),
)
for i, p in enumerate(conf_interval):
phs_conf1 = np.percentile(self.phase[..., dof], 50 + p / 2, axis=0)
phs_conf2 = np.percentile(self.phase[..., dof], 50 - p / 2, axis=0)
fig.line(
x=self.frequency_range,
y=phs_conf1,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
fig.line(
x=self.frequency_range,
y=phs_conf2,
line_color=colors1[i],
line_alpha=0.6,
line_width=2.5,
muted_color=colors1[i],
muted_alpha=0.1,
legend_label="confidence interval: {}%".format(p),
)
phs_mean = np.mean(self.phase[..., dof], axis=0)
fig.line(
x=self.frequency_range,
y=phs_mean,
line_color="black",
line_alpha=1.0,
line_width=3,
muted_color="black",
muted_alpha=0.1,
legend_label="Mean",
)
fig.legend.background_fill_alpha = 0.1
fig.legend.click_policy = "mute"
fig.legend.label_text_font_size = "10pt"
return fig
def plot(
self, dof, percentile=[], conf_interval=[], units="mic-pk-pk", *args, **kwargs
):
"""Plot frequency response.
This method plots the frequency and phase response given an output
and an input.
Parameters
----------
dof : int
Degree of freedom to observe the response.
percentile : list, optional
Sequence of percentiles to compute, which must be
between 0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
units : str, optional
Unit system
Default is "mic-pk-pk"
args : optional
Additional plot axes
kwargs : optional
Additional key word arguments can be passed to change
the plot (e.g. line_color="blue").
Returns
-------
grid_plots : bokeh column
Bokeh column with amplitude and phase plot
"""
fig0 = self.plot_magnitude(dof, percentile, conf_interval, units, **kwargs)
fig1 = self.plot_phase(dof, percentile, conf_interval, **kwargs)
grid_plots = gridplot([[fig0], [fig1]])
return grid_plots
|
{"hexsha": "addf19e8035d91bcec7e0a8a77c3e250a3fc9c28", "size": 41078, "ext": "py", "lang": "Python", "max_stars_repo_path": "ross/stochastic/st_results.py", "max_stars_repo_name": "PedroBernardino/ross", "max_stars_repo_head_hexsha": "d8b74aa97b0a02108e15c316b8202964b2f7a532", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ross/stochastic/st_results.py", "max_issues_repo_name": "PedroBernardino/ross", "max_issues_repo_head_hexsha": "d8b74aa97b0a02108e15c316b8202964b2f7a532", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ross/stochastic/st_results.py", "max_forks_repo_name": "PedroBernardino/ross", "max_forks_repo_head_hexsha": "d8b74aa97b0a02108e15c316b8202964b2f7a532", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7529610829, "max_line_length": 88, "alphanum_fraction": 0.5176736939, "include": true, "reason": "import numpy", "num_tokens": 8898}
|
import numpy as np
import requests
import pandas as pd
import os
from bs4 import BeautifulSoup as BS
def search(string,start="", end=""):
lstart=len(start)
lend=len(end)
startpoint=endpoint=0
for i in range(len(string)):
if string[i:i+lstart]==start:
startpoint=i+lstart
for i in range(len(string)-lend, startpoint, -1):
if string[i:i+lend]==end:
endpoint=i
return string[startpoint:endpoint]
def _cd(name):
if not os.path.isdir(name):
os.mkdir(name)
os.chdir(name)
def chd(name):
for i in name.split('/'):
_cd(i)
def savefile(Name, frame):
chd('comps/'+Name)
frame.to_csv('general.csv', index=False)
pd.DataFrame(columns=['event', 'groupcount', 'overlaps with the next event (if so, type "1")', 'NOTE: If two events overlap, the time-consuming one should be on top of the faster one.']).to_csv('schedule.csv', index=False)
def cubingTW(Name=''):
if Name=='':
Name=input('input the comp name on the cubingTW:')
pre_url='https://cubing-tw.net/event/'+Name+'/competitors'
pre=requests.get(pre_url)
presp=BS(pre.text,'html.parser')
pre_list=presp.find('table').find('tfoot').find_all('tr')[1].find_all('th')
event_list=[]
for th in pre_list:
result=th.find('span')
if result!=None:
event_list.append(search(str(result), 'title="', '"'))
pre_list=presp.find('table').find('tbody').find_all('tr')
frame=pd.DataFrame(np.empty((len(pre_list), len(event_list)+3), dtype=object), columns=['index', 'name', 'newbie']+event_list)
for index, competitor in enumerate(pre_list):
competitor=competitor.find_all('td')
frame['index'][index]=int(competitor[0].text)
frame['name'][index]=competitor[1].text
for i, event in enumerate(competitor[5:]):
if '-' not in event.text:
try:
frame[event_list[i]][index]=1
except IndexError: #if the registration isn't closed, another column will be added, causing this error
pass
if competitor[2].text=="":
frame['newbie'][index]=1
savefile(Name, frame)
return Name
def WCA(Name=''):
if Name=='':
Name=input('input the comp name on the WCA site:')
pre_url='https://www.worldcubeassociation.org/competitions/'+Name+'/registrations'
pre=requests.get(pre_url)
presp=BS(pre.text,'html.parser')
pre=presp.find('table')
head=pre.find('thead').find_all('span')
competitors=pre.find('tbody').find_all('tr')
event_list=[]
for i in head:
event_list.append(i.attrs['title'])
emp_list=np.empty((len(competitors), len(event_list)+3), dtype=object)
frame=pd.DataFrame(emp_list, columns=['id', 'name', 'newbie']+event_list)
for index, competitor in enumerate(competitors):
competitor=competitor.find_all('td')
name=competitor[0]
if len(name.find_all('a'))==0:
frame['newbie'][index]=1
frame['name'][index]=name.text.strip()
frame['id'][index]=index+1
for i, event in enumerate(competitor[2:-2]):
if len(event.find_all('span'))==0:
continue
frame[event_list[i]][index]=1
savefile(Name, frame)
return Name
if __name__=='__main__':
WCA()
|
{"hexsha": "fcc19fb875e14db97c576eac2c4a66f2cc91f346", "size": 3354, "ext": "py", "lang": "Python", "max_stars_repo_path": "Web2csv.py", "max_stars_repo_name": "aoaaceai/CompAssistant", "max_stars_repo_head_hexsha": "727833926a9cbae15a4def32d8731c7f23b5fdd7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Web2csv.py", "max_issues_repo_name": "aoaaceai/CompAssistant", "max_issues_repo_head_hexsha": "727833926a9cbae15a4def32d8731c7f23b5fdd7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Web2csv.py", "max_forks_repo_name": "aoaaceai/CompAssistant", "max_forks_repo_head_hexsha": "727833926a9cbae15a4def32d8731c7f23b5fdd7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8571428571, "max_line_length": 226, "alphanum_fraction": 0.6165772212, "include": true, "reason": "import numpy", "num_tokens": 856}
|
# Pytest customization
from __future__ import division, absolute_import, print_function
import os
import pytest
import warnings
from distutils.version import LooseVersion
from scipy._lib._fpumode import get_fpu_mode
from scipy._lib._testutils import FPUModeChangeWarning
def pytest_configure(config):
config.addinivalue_line("markers",
"slow: Tests that are very slow.")
config.addinivalue_line("markers",
"xslow: mark test as extremely slow (not run unless explicitly requested)")
def pytest_runtest_setup(item):
if LooseVersion(pytest.__version__) >= LooseVersion("3.6.0"):
mark = item.get_closest_marker("xslow")
else:
mark = item.get_marker("xslow")
if mark is not None:
try:
v = int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
v = False
if not v:
pytest.skip("very slow test; set environment variable SCIPY_XSLOW=1 to run it")
@pytest.fixture(scope="function", autouse=True)
def check_fpu_mode(request):
"""
Check FPU mode was not changed during the test.
"""
old_mode = get_fpu_mode()
yield
new_mode = get_fpu_mode()
if old_mode != new_mode:
warnings.warn("FPU mode changed from {0:#x} to {1:#x} during "
"the test".format(old_mode, new_mode),
category=FPUModeChangeWarning, stacklevel=0)
|
{"hexsha": "736e88aa38f5eb0336cd318cee7aac144c45731b", "size": 1407, "ext": "py", "lang": "Python", "max_stars_repo_path": "scipy/conftest.py", "max_stars_repo_name": "gitter-badger/scipy", "max_stars_repo_head_hexsha": "0d10fea581d5044bbecc8b4fbe8c11fc102f6592", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-14T23:22:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-14T23:22:16.000Z", "max_issues_repo_path": "scipy/conftest.py", "max_issues_repo_name": "danilo-augusto/scipy", "max_issues_repo_head_hexsha": "4d0d8958ad3f788a1a1c0bcac5cec1af9db26804", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-25T15:36:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-25T15:36:38.000Z", "max_forks_repo_path": "scipy/conftest.py", "max_forks_repo_name": "danilo-augusto/scipy", "max_forks_repo_head_hexsha": "4d0d8958ad3f788a1a1c0bcac5cec1af9db26804", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-06T04:56:58.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-06T04:56:58.000Z", "avg_line_length": 29.9361702128, "max_line_length": 91, "alphanum_fraction": 0.6695095949, "include": true, "reason": "from scipy", "num_tokens": 341}
|
#!/usr/bin/env python
from __future__ import print_function
import jinja2
import argparse
import cv2
import os
import fnmatch
import numpy as np
import rospkg
import numpy as np
import pylab as pl
import scipy
from scipy import interpolate
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument('filename')
args = parser.parse_args()
env = jinja2.Environment()
rospack = rospkg.RosPack()
fla_path= rospack.get_path('fla_description')
texture_path = os.path.join('materials', 'textures')
script_path = os.path.join('materials', 'scripts')
script_dir = os.path.dirname(os.path.realpath(__file__))
with open('material.jinja') as f:
material_template_str = f.read()
material_template = env.from_string(material_template_str)
with open('model.jinja') as f:
model_template_str = f.read()
model_template = env.from_string(model_template_str)
data = {}
# create texture images for yard markers
try:
os.makedirs(texture_path)
except os.error as e:
pass
for i in range(10, 60, 5):
# texture
img = np.zeros((300, 400, 4), dtype=np.uint8)
img[:, :, 0] = 0
img[:, :, 1] = 0
img[:, :, 2] = 0
img[:, :, 3] = 0
img = cv2.putText(img, str(i), (0, 270), cv2.FONT_HERSHEY_SIMPLEX,
10, (255, 255, 255, 255), 30, cv2.LINE_AA)
name = 'yard_{:d}'.format(i)
texture_file = os.path.join('{:s}.png'.format(name))
cv2.imwrite(os.path.join(texture_path, texture_file), img)
# material
data_material = {
'name': name,
'texture_file': texture_file,
}
material_file = os.path.join('{:s}.material'.format(name))
result = material_template.render(data_material)
filename_out = os.path.join(script_path, material_file)
with open(filename_out, 'w') as f_out:
f_out.write(result)
result = model_template.render(data)
filename_out = os.path.join(script_dir, 'stadium.sdf')
with open(filename_out, 'w') as f_out:
f_out.write(result)
# vim: set et fenc=utf-8 ff=unix sts=0 sw=4 ts=4 :
|
{"hexsha": "ce9d10280ddd9ca8c1f88cd1e5f999c3d327e2bc", "size": 2207, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/stadium/stadium.sdf.py", "max_stars_repo_name": "dronecrew/gazebo_fla", "max_stars_repo_head_hexsha": "ae4310a62a52e013674895f89fbef3ca8d121e21", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/stadium/stadium.sdf.py", "max_issues_repo_name": "dronecrew/gazebo_fla", "max_issues_repo_head_hexsha": "ae4310a62a52e013674895f89fbef3ca8d121e21", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/stadium/stadium.sdf.py", "max_forks_repo_name": "dronecrew/gazebo_fla", "max_forks_repo_head_hexsha": "ae4310a62a52e013674895f89fbef3ca8d121e21", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6623376623, "max_line_length": 74, "alphanum_fraction": 0.6270956049, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 579}
|
\documentclass{beamer}
\usepackage{beamerthemevictor,comment,verbatim,graphicx,amssymb}
\usepackage[noeepic]{qtree}
\input{tutmacs}
\input{slidemacs}
\input idxmacs
\begin{document}
\title{Parsing}
\author{Victor Eijkhout}
\date{Notes for CS 594 -- Fall 2004}
\frame{\titlepage}
\section{Introduction}
\frame[containsverbatim]{
\frametitle{What is parsing?}
\begin{itemize}
\item Check for correctness: is this a legal program
\item Uncover meaning: convert to internal representation
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Levels of parsing}
\begin{itemize}
\item Check for illegal characters
\item Build tokens (identifiers, numbers, operators~\&c) from
characters (lexical analysis)
\item Statements tokens (syntactical analysis)
\item Semantical restrictions: define/use~\&c
\end{itemize}
}
\frame[containsverbatim]{
\begin{verbatim}
my_array[ii] = 3+sin(1.0);
\end{verbatim}
\begin{itemize}
\item Lexical analysis: `\n{my_array}', `\verb+[+', `\n{ii}'~\&c.
\item Syntactical: this is an assignment; lhs is something you can
assign to, rhs is arithmetic expression
\item Semantics: \n{my_array} is array, \n{ii}~is integer, \n{sin}~is
defined function
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Mixing of levels}
In Fortran:
\begin{verbatim}
X = SOMENAME( Y+3 )
\end{verbatim}
\begin{itemize}
\item Lexical analysis simple
\item Syntax unclear: rhs can be function call or array element
\item Solution: give lexer access to symbol table
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Correctness}
\begin{itemize}
\item Lexical analysis finds identifiers: \n{5ab} is illegal
\item Syntactical analysis finds expressions: \n{array[ii)} is illegal
\item In \TeX?
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Parsing by automaton}
\begin{itemize}
\item Lexical analysis by Finite State Automaton
\item Syntactical analysis by Pushdown Automaton
\item In practice some mixing of levels
\end{itemize}
}
\subsection{Automata theory}
\frame[containsverbatim]{
\frametitle{Terminology}
\begin{itemize}
\item Language: a set of words \[\{a^n|\hbox{$n$ is prime}\}\]
\item Grammar: set of rules that produces a language
\item Automaton: abstract device that can recognize a language
\item Derivation: actual sequency of rules or transitions used to
derive a string
\item Parse tree: 2D way of writing derivation
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{to be precise}
\begin{itemize}
\item Grammar:
\begin{itemize}
\item Start symbol $S$
\item Terminal symbols $a,b,c,...$ from the alphabet
\item Non-terminals $A,B,C,...$, ultimately to be replaced
\item Rules $\alpha\rightarrow\beta$ where $\alpha,\beta$ strings of
terminals and non-terminals
\end{itemize}
\item Automaton:
\begin{itemize}
\item Starting state
\item Accepting state
\item Work storage
\item Transition diagram
\end{itemize}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Types of languages}
\begin{itemize}
\item Languages differ in types of grammar rules
$\alpha\rightarrow\beta$
\item Automata differ in amount of workspace
\item Four levels Chomsky hierarchy; many other types
\end{itemize}
}
\frame{
\frametitle{Type 0}
\begin{itemize}
\item Regular languages
\item<2-> Turing machine: infinite tape
\item<2-> No restriction on grammar rules
\end{itemize}
}
\frame{
\frametitle{Type 1}
\begin{itemize}
\item Context-sensitive languages
\item<2-> Linear-bounded automata
\item<2-> No rules $\alpha\rightarrow\epsilon$
\item<3-> Normal form: $AB\rightarrow BA$,
$AB\rightarrow A\beta$
\end{itemize}
}
\frame{
\frametitle{Type 2}
\begin{itemize}
\item Context-free languages
\item<2-> Push Down Automata
\item<2-> Only rules $A\rightarrow\alpha$
\item<3-> Normal form: $A\rightarrow b\alpha$ or $A\rightarrow b$
\end{itemize}
}
\frame{
\frametitle{Type 3}
\begin{itemize}
\item Regular languages
\item<2-> Finite State Automata
\item<2-> Only rules $A\rightarrow bC$, $A\rightarrow b$
\end{itemize}
}
\sectionframe{Lexical analysis}
\frame[containsverbatim]{
\frametitle{Function of a lexer}
\begin{itemize}
\item Recognize identifiers, numbers
\item Also side effects: store names of functions
\end{itemize}
}
\subsection{Regular languages}
\frame[containsverbatim]{
\frametitle{Definition}
Inductively, through regular expressions
\begin{itemize}
\item $\epsilon$ is the empty language
\item `$a$' denotes the language~$\{a\}$ ($a$~in alphabet)
\item if $\alpha,\beta$ denote languages~$A,\nobreak B$, then
\begin{itemize}
\item $\alpha\beta$ or $\alpha\cdot\beta$ denotes $\{xy|x\in A,y\in B\}$
\item $\alpha|\beta$ denotes the language $A\cup B$.
\item $\alpha^*$ denotes the language $\cup_{n\geq 0}A^n$.
\end{itemize}
\end{itemize}
}
\frame{
\frametitle{Finite state automata}
\begin{itemize}
\item Starting state $S_0$
\item other states $S_i$; subset: accepting states
\item input alphabet~$I$; output alphabet~$O$
\item transition diagram $I\times S\rightarrow S$
\item<2-> non-deterministic: $I\cup\{\epsilon\}\times S\rightarrow S$
\item<3-> String is accepted if (any) sequence of transitions it
causes leads to an accepting state
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{The NFA of a regular language}
Automaton that accepts~$\epsilon$:\\
\convertMPtoPDF{eps.1}{1}{1}
Automaton that accepts~$a$:\\
\convertMPtoPDF{a.1}{1}{1}
}
\frame[containsverbatim]{
\frametitle{The NFA of a regular language}
Automaton that accepts~$A\cdot B$:\\
\convertMPtoPDF{AB.1}{1}{1}
}
\frame[containsverbatim]{
\frametitle{The NFA of a regular language}
Automaton that accepts~$A\cup B$:\\
\convertMPtoPDF{AvB.1}{1}{1}
}
\frame[containsverbatim]{
\frametitle{The NFA of a regular language}
Automaton that accepts~$A^*$:\\
\convertMPtoPDF{Astar.1}{1}{1}
}
\frame[containsverbatim]{
\frametitle{Characterization}
\begin{itemize}
\item Any sufficiently long string $\alpha=uvw$
\item then $uv^nw$ also in the language
\end{itemize}
}
\subsection{DFAs and NFAs}
\frame{
\frametitle{Example}
Language $a^*|b^*$:
\leavevmode
\convertMPtoPDF{nfa1.1}{1}{1}
$\quad\Rightarrow\quad$
\convertMPtoPDF{dfa1.1}{1}{1}
}
\frame[containsverbatim]{
\frametitle{Example: keywords}
A bit like what happens in lexical analysis:
\convertMPtoPDF{begin.1}{1}{1}
}
\frame[containsverbatim]{
\frametitle{}
Deterministic version
\convertMPtoPDF{begind.1}{1}{1}
}
\frame{
\frametitle{Converting NFA to DFA}
\begin{itemize}
\item Introduce new states
\item<2-> new state is set of old states
\item<3-> new states closed under $\epsilon$-transitions
\end{itemize}
}
\frame{
\convertMPtoPDF{begin.1}{1}{1}
New $S_0=\{0,1,6\}$
\begin{itemize}
\item<2->$S_0+\n{B}\Rightarrow S_1=\{2,6,7\}$,
\item<3->$S_0+\neg\n{B}\Rightarrow S_6=\{6,7\}$
\item<4->$S_1+\n{E}\Rightarrow S_2=\{3,6,7\}$, et cetera
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{NFA for lexical analysis}
\convertMPtoPDF{compound.1}{1}{1}
}
\frame[containsverbatim]{
\frametitle{small problems}
\begin{itemize}
\item Careful with the $\epsilon$-transition back:
\begin{verbatim}
printf("And then he said ""Boo!""");
\end{verbatim}
final state reached three times: only transition when maximum string
recognized
\item Not always:
\begin{verbatim}
X = 4.E3
IF (4.EQ.VAR) THEN
\end{verbatim}
$\Rightarrow$ look-ahead needed
\end{itemize}
}
\subsectionframe{\lex}
\frame[containsverbatim]{
\frametitle{A tool for lexical analysis}
\begin{itemize}
\item You write regular expressions, and \lex\ reports if it finds any
\item Three sections: definitions, rules, code
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Example}
\footnotesize
\begin{verbatim}
%{
int charcount=0,linecount=0;
%}
%%
. charcount++;
\n {linecount++; charcount++;}
%%
int main()
{
yylex();
printf("There were %d characters in %d lines\n",
charcount,linecount);
return 0;
}
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Running lex}
\lex\ code gets translated to C:
\begin{verbatim}
lex -t count.l > count.c
cc -c -o count.o count.c
cc -o counter count.o -ll
\end{verbatim}
Executable uses stdin/out, can be changed
}
\frame[containsverbatim]{
\frametitle{Definitions section}
\begin{itemize}
\item C code: between \verb+%{ ... %}+ copied to top of C file
\item Definitions: `\verb+letter [a-zA-Z]+' (like \n{#define})
\item State definitions (later)
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Example 2}
\begin{verbatim}
%{
int charcount=0,linecount=0,wordcount=0;
%}
letter [^ \t\n]
%%
{letter}+ {wordcount++; charcount+=yyleng;}
. charcount++;
\n {linecount++; charcount++;}
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Rules section}
\begin{itemize}
\item Input is matched by character
\item Actions of longest match are taken, earliest if equal length
\item Matched text is \verb+char *yytext+, length \verb+int yyleng+
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Example 2'}
\begin{verbatim}
{letter}+ {wordcount++; charcount+=yyleng;}
[ \t] spacecount++;
. charcount++;
\n linecount++;
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Example 3}
\begin{verbatim}
[0-9]+ process_integer();
[0-9]+\.[0-9]* |
\.[0-9]+ process_real();
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Regular expressions}
\def\titem#1{\item[{\tt #1}]}
\footnotesize
\begin{description}
\titem{.} Match any character except newlines.
\titem{\char`\\n} A newline character.
\titem{\char`\\t} A tab character.
\titem{\char`\^} The beginning of the line.
\titem{\$} The end of the line.
\titem{<expr>*} Zero or more occurrences of the expression.
\titem{<expr>+} One or more occurrences of the expression.
\titem{<expr>?} Zero or one occurrences of the expression.
\titem{(<expr1>|<expr2>)} One expression of another.
\titem{{[<set>]}} A set of characters or ranges, such as \verb+[,.:;]+
or \verb+[a-zA-Z]+.
\titem{{[\char`\^<set>]}} The complement of the set, for instance
\verb+[^ \t]+.
\end{description}
}
\frame[containsverbatim]{
\frametitle{Example: filtering comments}
\begin{verbatim}
%%
"/*".*"*/" ;
. |
\n ECHO;
\end{verbatim}
works on
\begin{verbatim}
This text /* has a */ comment
in it
\end{verbatim}
}
\frame[containsverbatim]{
Does not work on
\begin{verbatim}
This text /* has */ a /* comment */ in it
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Context}
\begin{itemize}
\item Match in context
\item Left context implemented through states:
\begin{verbatim}
<STATE>(some pattern) {...
\end{verbatim}
State switching:
\begin{verbatim}
<STATE>(some pattern) {some action; BEGIN OTHERSTATE;}
\end{verbatim}
Initial state is \n{INITIAL}, other states defined
\begin{verbatim}
%s MYSTATE
%x MYSTATE
\end{verbatim}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Use of states}
\begin{verbatim}
%x COMM
%%
. |
\n ECHO;
"/*" BEGIN COMM;
<COMM>"*/" BEGIN INITIAL;
<COMM>. |
<COMM>\n ;
%%
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Context'}
\begin{itemize}
\item Right context:
\begin{verbatim}
abc/de {some action}
\end{verbatim}
\item context tokens not in \n{yytext}/\n{yyleng}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Example: text cleanup}
Input:
\begin{verbatim}
This text (all of it )has occasional lapses , in
punctuation(sometimes pretty bad) ,( sometimes not so).
(Ha! ) Is this : fun?Or what!
\end{verbatim}
Solution with context more compact than without.
Define:
\begin{verbatim}
punct [,.;:!?]
text [a-zA-Z]
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{need for context}
\begin{itemize}
\item Consider `\n{),}' `\n{) ,}' `\n{)a}' `\n{) a}'
\item Rules \verb!")" " "+ {printf(")");}! depend on context
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{right context solution}
\footnotesize
\begin{verbatim}
")"" "+/{punct} {printf(")");}
")"/{text} {printf(") ");}
{text}+" "+/")" {while (yytext[yyleng-1]==' ') yyleng--; ECHO;}
({punct}|{text}+)/"(" {ECHO; printf(" ");}
"("" "+/{text} {while (yytext[yyleng-1]==' ') yyleng--; ECHO;}
{text}+" "+/{punct} {while (yytext[yyleng-1]==' ') yyleng--; ECHO;}
^" "+ ;
" "+ {printf(" ");}
. {ECHO;}
\n/\n\n ;
\n {ECHO;}
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{left context solution}
Use defined states:
\begin{verbatim}
punct [,.;:!?]
text [a-zA-Z]
%s OPEN
%s CLOSE
%s TEXT
%s PUNCT
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{left context solution, cont'd}
\footnotesize
\begin{verbatim}
" "+ ;
<INITIAL>"(" {ECHO; BEGIN OPEN;}
<TEXT>"(" |
<PUNCT>"(" {printf(" "); ECHO; BEGIN OPEN;}
")" {ECHO ; BEGIN CLOSE;}
<INITIAL>{text}+ |
<OPEN>{text}+ {ECHO; BEGIN TEXT;}
<CLOSE>{text}+ |
<TEXT>{text}+ |
<PUNCT>{text}+ {printf(" "); ECHO; BEGIN TEXT;}
{punct}+ {ECHO; BEGIN PUNCT;}
\n {ECHO; BEGIN INITIAL;}
\end{verbatim}
}
\sectionframe{Syntactical analysis}
\frame[containsverbatim]{
\frametitle{Function of syntactical analysis}
\begin{itemize}
\item Recognize statements: loops, assignments~\&c
\item Convert to internal representation: parse trees
\begin{quote}
\hbox{%
\Tree [.* [.+ $2\quad$ $5\quad$ ] $3\quad$ ]
\Tree [.+ $2\quad$ [.* $5\quad$ $3\quad$ ] ]
}
\end{quote}
\item Semantics: define/use sequence~\&c
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Grammars}
\begin{itemize}
\item Backus Naur, or other formalism
\item In \LaTeX: \n{bnf.sty}
\begin{examplewithcode}
\begin{bnf}
Expr: number Tail.
Tail: $\epsilon$ ; + number Tail; * number Tail
\end{bnf}
\end{examplewithcode}
(use my \n{bnf.env})
\item most language constructs are context-free
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Concepts}
\begin{itemize}
\item Grammar rules $A\rightarrow x\alpha$
\item Derivations $abcA\gamma\Rightarrow abc x\alpha\gamma$
\item Parse tree
\begin{quote}
\Tree [.. abc [.A $x\quad$ $\alpha\quad$ ] $\gamma$ ]
\end{quote}
\end{itemize}
}
\subsectionframe{Context-free languages}
\frame[containsverbatim]{
\frametitle{Definition}
\begin{itemize}
\item Grammatical: only rules $A\rightarrow \alpha$
\item From automata: pushdown automata
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Pumping lemma}
\begin{itemize}
\item For every language there is an $n$ such that
\begin{itemize}
\item strings longer than~$n$ can be written $uvwxy$
\item and for all~$k$: $uv^kwx^ky$ also in the language
\end{itemize}
\item Proof: \begin{quote}
\Tree [.S $u$ [.A $v$ [.A $w$ ] $x$ ] $y$ ]
\end{quote}
\item Non-\{context-free\} language: $\{a^nb^nc^n\}$
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Deterministic and non-deterministic}
\begin{itemize}
\item No equivalence
\item Deterministic: $L_c=\{\alpha c\alpha^R|c\not\in\alpha\}$
\item Non-deterministic: $L=\nobreak\{\alpha\alpha^R\}$
\end{itemize}
}
\def\mbx{\mathbf{x}}
\def\mba{\mathbf{a}}
\def\mbb{\mathbf{b}}
\def\mbA{\mathbf{A}}
\def\mbB{\mathbf{B}}
\def\mby{\mathbf{y}}
\def\mbf{\mathbf{f}}
\frame[containsverbatim]{
\frametitle{Algebra of languages}
\begin{itemize}
\item Expressions $\mbx$ and $\mby$ denote languages, then
\begin{itemize}
\item union: $\mbx+\mby=\mbx\cup\mby$
\item concatenation: $\mbx\mby=\{w=xy|x\in\mbx,y\in\mby\}$
\item repetition: $\mbx^*=\{w=x^n|x\in\mbx,n\geq0\}$
\end{itemize}
\end{itemize}
}
\frame{
\frametitle{Algebra: solving equations}
\begin{itemize}
\item Equation: $\mbx=\mba+\mbx\mbb$
\item Interpretation: $\mbx=\mba\cup\{w=xb|x\in\mbx,b\in\mbb\}$
\item Solving:
\begin{itemize}
\item first of all $\mbx\supset\mba$
\item then also $\mbx\supset\mba\cdot\mbb$
\item continuing: $\mbx\supset\mba\mbb\mbb$,\dots
\end{itemize}
\item verify: $\mbx=\mba\mbb^*$
\item<2> Numerically: $x=a/(1-b)$
\end{itemize}
}
\frame{
\frametitle{Derive normal form}
\begin{itemize}
\item Normal form: $A\rightarrow a\alpha$
\item Write grammar of context-free language as $\mbx^t=\mbx^t\mbA+\mbf^t$,
where $\mbx$~non-terminals, $\mbf$~rhs that are of normal form,
$\mbx^t\mbA$~describes normal form rhs
\item<2> Example:\\
\hbox{%
$\displaystyle\begin{array}{l}
S\rightarrow aSb|XY|c\\
X\rightarrow YXc|b\\
Y\rightarrow XS
\end{array}$\ \ %
$\displaystyle [S,X,Y]=[S,X,Y]\left[
\begin{matrix}\phi&\phi&\phi\\
Y&\phi&S\\ \phi&Xc&\phi \end{matrix}\right]
+[aSb+c,b,\phi]$
}
\item<3-> Solution:
\[ \mbx^t = \mbf^t\mbA^* \]
\item<4-> Needed: more explicit expression for~$\mbA^*$.
\end{itemize}
}
\frame{
\begin{itemize}
\item Note $\mbA^*=\lambda+\mbA\mbA^*$
\item then normal form: \[
\mbx^t = \mbf^t+\mbf^t\mbA\mbA^*
= \mbf^t+\mbf^t\mathbf{B} \]
where $\mathbf{B}=\mbA\mbA^*$.
\item<2-> $\mbB$:
\[ \mbB=\mbA\mbA^*=
\mbA+\mbA\mbA\mbA^* =
\mbA+\mbA\mbB \]
not necessarily normal form
\item<3-> Elements of~$\mbA$ that start with a
nonterminal can only start with nonterminals in~$\mbx$. Hence
substitute a rule from equation above.
\end{itemize}
}
\subsectionframe{Parsing strategies}
\frame[containsverbatim]{
\frametitle{Top-down parsing}
\begin{itemize}
\item Start with $S$ on the stack, replace by appropriate rule, guided
by input
\item Example: expression \n{2*5+3},
which is produced by the grammar
\begin{bnf}
Expr: number Tail.
Tail: $\epsilon$ ; + number Tail; * number Tail
\end{bnf}
\end{itemize}
}
\frame[containsverbatim]{
\begin{tabbing}
start symbol on stack:$\quad$\=${}2*5+3{}\quad$\=\kill
initial queue:\>$2*5+3$\\
start symbol on stack:\>\>Expr\\
replace\>\>number Tail\\
match\>${}*5+3$\>Tail\\
replace\>\>* number Tail\\
match\>$5+3$\>number Tail\\
match\>${}+3$\>Tail\\
replace\>\>+ number Tail\\
match\>$3$\>number Tail\\
match\>$\epsilon$\>Tail\\
match
\end{tabbing}
\[ E\Rightarrow n\, T\Rightarrow n*n\,T\Rightarrow
n*n+n\,T\Rightarrow n*n+n \]
$LL(1)$
}
\frame[containsverbatim]{
\frametitle{}
Equivalent grammar:
\begin{bnf}
Expr: number; number + Expr; number * Expr
\end{bnf}
assuming one more token look-ahead:
\begin{tabbing}
start symbol on stack:$\quad$\=${}2*5+3{}\quad$\=\kill
initial queue:\>$2*5+3$\\
start symbol on stack:\>\>Expr\\
replace\>\>number * Expr\\
match\>${}5+3$\>Tail\\
replace\>\>number + Expr\\
match\>$3$\>Expr\\
replace\>$3$\>number\\
match\>$\epsilon$
\end{tabbing}
$LL(2)$
}
\frame[containsverbatim]{
\frametitle{$LL$ is recursive descent}
Finding of proper rule:
\begin{verbatim}
define FindIn(Sym,NonTerm)
for all expansions of NonTerm:
if leftmost symbol == Sym
then found
else if leftmost symbol is nonterminal
then FindIn(Sym,that leftmost symbol)
FindIn(symbol,S);
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Problems with $LL(k)$}
\begin{itemize}
\item Some grammars are not $LL(k)$ for any~$k$:\\
if \n{A<B} and \n{A<B>} both legal
\item Infinite loop:
\begin{bnf}
Expr: number; Expr + number; Expr * number
\end{bnf}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Bottom-up: Shift-reduce}
\begin{itemize}
\item Recognize productions from terminals
\item Example: expression $2*5+3$ produced by
\begin{bnf}
E: number; E + E; E * E
\end{bnf}
\end{itemize}
}
\frame[containsverbatim]{
\begin{tabbing}
shift, shift, reduce: abcd\=initial state:initial\=\kill
\>stack\>queue\\
initial state:\>\>$2*5+3$\\
shift\>2\>*5+3\\
reduce\>E\>*5+3\\
shift\>E*\>5+3\\
shift\>E*5\>+3\\
reduce\>E*E\>+3\\
reduce\>E\>+3\\
shift, shift, reduce\>E+E\\
reduce\>E\\
\end{tabbing}
\[ E\Rightarrow E+E\Rightarrow E+3\Rightarrow E*E+3
\Rightarrow E*5+3\Rightarrow 2*5+3 \]
$LR(0)$
}
\frame[containsverbatim]{
\frametitle{Where to start reducing?}
\begin{itemize}
\item `Greedy' reducing is not always best
\item Grammar:
\begin{bnf}
S:aAcBe.
A:bA;b.
B:d.
\end{bnf}
and string \n{abbcde}.
\item Derivation 1:
\[ \n{abbcde}\Leftarrow\n{abAcde}\Leftarrow\n{aAcde}\Leftarrow
\n{aAcBe}\Leftarrow\n{S}. \]
\item Derivation 2:
\[ \n{abbcde}\Leftarrow\n{aAbcde}\Leftarrow aAAcde \Leftarrow ? \]
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Handle}
\begin{quote} If $S\Rightarrow^*\alpha Aw\Rightarrow\alpha\beta w$ is a
right-most derivation, then $A\rightarrow\beta$ at the position
after~$\alpha$ is a handle of~$\alpha Aw$.
\end{quote}
Question: how to find handles
}
\frame[containsverbatim]{
\frametitle{Operator-precedence grammars}
\begin{itemize}
\item Operator grammar: `expr-op-expr'
\item Formally: never two consecutive non-terminals, and no
rules~$A\rightarrow\nobreak\epsilon$.
\item Declare precedences (and associativity)
\begin{tabular}{r|ccc}
&\hbox{number}&$+$&$\times$\\\hline
\hbox{number}&&$\gtrdot$&$\gtrdot$\\
$+$&$\lessdot$&$\gtrdot$&$\lessdot$\\
$\times$&$\lessdot$&$\gtrdot$&$\gtrdot$
\end{tabular}\\
\end{itemize}
}
\frame[containsverbatim]{
\begin{itemize}
\item Annotate expression: $5+2*3$ becomes
$\lessdot5\gtrdot+\lessdot2\gtrdot*\lessdot3\gtrdot$
\item Reducing: $E+E*E$
\item Insert precedences: ${\lessdot}+{\lessdot}*{\gtrdot}$
\item Scan forward to closing, back to open:
${\lessdot}E*E{\gtrdot}$~is handle
\item Reduce: $E+E$
\item $\Rightarrow$ precendences correctly observed
\item (note: no global scanning; still shift-reduce like)
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Definition of $LR$ parser}
An LR parser has the following components
\begin{itemize}
\item Stack and input queue; stack will also contain states
\item Actions `shift', `reduce', `accept', `error'
\item Functions \n{Action} and \n{Goto}
\begin{itemize}
\item With input symbol~$a$ and state on top of the stack~$s$:
\item If \n{Action}$(a,s)$ is `shift', then $a$ and a new state
$s'=\n{Goto}(a,s)$ are pushed on the stack.
\item If \n{Action}$(a,s)$ is `reduce $A\rightarrow\beta$' where
$|\beta|=r$, then $2r$ symbols are popped from the stack, a new
state $s'=\n{Goto}(a,s'')$ is computed based on the newly exposed
state on the top of the stack, and $A$ and~$s'$ are pushed. The
input symbol~$a$ stays in the queue.
\end{itemize}
\end{itemize}
More powerful than simple shift/reduce; states much more complicated
}
\frame[containsverbatim]{
\frametitle{motivating example}
\begin{itemize}
\item Grammar
\begin{bnf}
E: E + E; E * E
\end{bnf}
input string $1+2*3+4$.
\item Define precedences:
\def\op{\mathop{\mathbf{op}}}
$\op(+)=1, \op(\times)=2$
\item Define states as; initially state~0
\item Transitions: push operator precedence, do not change state for numbers
\item Shift/reduce strategy: reduce if precedence of input lower than
of stack top
\end{itemize}
}
\frame[containsverbatim]{
\def\op{\mathop{\mathbf{op}}}
\footnotesize
\begin{tabbing}
1 $S_0$ + $S_1$ 2 $S_1$ * $S_2$ 3 $S_2$ \= $1+2*3+4$ \=\kill
\>$1+2*3+4$\> push symbol; highest precedence is 0\\
1 $S_0$\>$+2*3+4$\>highest precedence now becomes 1\\
1 $S_0$ + $S_1$\>$2*3+4$\\
1 $S_0$ + $S_1$ 2 $S_1$\>$*3+4$\>highest precedence becoming 2\\
1 $S_0$ + $S_1$ 2 $S_1$ * $S_2$\>$3+4$\\
1 $S_0$ + $S_1$ 2 $S_1$ * $S_2$ 3 $S_2$\>$+4$\>reduce because $\op(+){}<2$\\
1 $S_0$ + $S_1$ 6 $S_1$\>$+4$\>the highest exposed precedence is 1\\
1 $S_0$ + $S_1$ 6 $S_1$ + $S_1$\>$4$\\
1 $S_0$ + $S_1$ 6 $S_1$ + $S_1$ 4 $S_1$ \>\>at the end of the queue we reduce\\
1 $S_0$ + $S_1$ 10 $S_1$ \\
11
\end{tabbing}
}
\frame[containsverbatim]{
\frametitle{Parser states}
\begin{description}
\item[item] Grammar rule with location indicated.\\
From \parserule{A}{B C} items:
\parserule{A}{.B C}, \parserule{A}{B .C}, \parserule{A}{B C.}\\
(stack is left of dot, queue right)
\item[closure of an item] The smallest set that
\begin{itemize}
\item Contains that item;
\item If $I$ in closure and $I={}$\parserule{A}{$\alpha$ .B $\beta$}
with~\n{B} nonterminal, then $I$~contains all items \parserule{B}{.$\gamma$}.
\end{itemize}
\item[state] Set of items.
\item[follow] of~\n{A}: set of all terminals that can follow $A$'s~expansions
\end{description}
}
\frame[containsverbatim]{
\frametitle{Motivation: valid items}
\begin{itemize}
\item Recognized so far: $\alpha\beta_1$
\item Consider item \parserule{A}{$\beta_1$.$\beta_2$}
\item Item is called \emph{valid}, if rightmost derivation
\[ S\Rightarrow^*\alpha Aw\Rightarrow \alpha\beta_1\beta_2w \]
\item Case: $\beta_2=\epsilon$, then $A\rightarrow\beta_1$
handle: reduce
\item Case: $\beta_2\not=\epsilon$, so shift~$\beta_2$.
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{example of valid items}
\begin{itemize}
\item String~\n{E+T*} in grammar:
\begin{bnf}
E:E+T; T.
T:T*F; F.
F:(E); id
\end{bnf}
\item Derivations
\[ E\Rightarrow E+T\Rightarrow E+T*F \]
\[ E\Rightarrow E+T\Rightarrow E+T*F\Rightarrow E+T*(E) \]
\[ E\Rightarrow E+T\Rightarrow E+T*F\Rightarrow E+T*\mathrm{id} \]
give items \parserule{T}{T*.F} \parserule{F}{.(E)} \parserule{F}{.id}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{States and transitions}
\begin{itemize}
\item New start symbol~\n{S'}, added production \parserule{S'}{S}.
\item Starting state is closure of \parserule{S'}{.S}.
\item Transition $d(s,\n{X})$: the closure of
\[ \{ \parserule{A}{$\alpha$ X. $\beta$}
| \hbox{\parserule{A}{$\alpha$ .X $\beta$} is in $s$} \} \]
\item The initial state is the closure of \parserule{S'}{.S}.
\end{itemize}
}
\frame{
\footnotesize
Grammar: $S\rightarrow(S)S\mid|\epsilon$
%\begin{bnf}S:(S)S;$\epsilon$\end{bnf}
States (after adding \parserule{S'}{.S}):
\begin{enumerate}\setcounter{enumi}{-1}
\item<2-> $\{ \parserule{S'}{.S}, \parserule{S}{.(S)S}, \parserule{S}{.} \}$
\item<3-> $\{ \parserule{S'}{S.} \}$
\item<4-> $\{ \parserule{S}{(.S)S}, \parserule{S}{.(S)S}, \parserule{S}{.} \}$
\item<5-> $\{ \parserule{S}{(S.)S} \}$
\item<6-> $\{ \parserule{S}{(S).S}, \parserule{S}{.(S)S}, \parserule{S}{.} \}$
\item<7-> $\{ \parserule{S}{(S)S.} \}$
\end{enumerate}
with transitions ($\{A\rightarrow\alpha_\bullet X\beta\in s\Rightarrow
\mathop(cl)(A\rightarrow\alpha X_\bullet\beta)$):
\begin{itemize}
\item<8->$d(0,S) = 1 $
\item<9->$d(0,'(') = 2 $
\item<10->$d(2,S) = 3 $
\item<11->$d(2,'(') = 2 $
\item<12->$d(3,')') = 4 $
\item<13->$d(4,S) = 5 $
\item<14->$d(4,'(') = 2$
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Stack handling}
\begin{tabbing}
\textbf{Loop}:\\
(1) \textbf{else} \=\kill
(1) \textbf{if} \>th\=e current state contains \parserule{S'}{S.}\\
\>\>accept the string\\
(2) \textbf{else} \>\textbf{if} the current state %
contains any other final item \parserule{A}{$\alpha$.}\\
\>\>pop all the tokens in $\alpha$ from the stack,\\
\>\>\hspace{20pt}along with the corresponding states; \\
\>\>let $s$ be the state left on top of the stack:\\
\>\>\hspace{20pt} push \n{A}, push \n{d($s$,A)}\\
(3) \textbf{else} \>\textbf{if} the current state contains any item %
\parserule{A}{$\alpha$ .x $\beta$},\\
\>\>$\qquad$ where x is the next input token\\
\>\>let $s$ be the state on top of the stack: %
push \n{x}, push \n{d($s$,x)}\\
(1) \=\kill
\>\textbf{else} report failure
\end{tabbing}
}
\subsectionframe{Ambiguity and conflicts}
\frame[containsverbatim]{
\frametitle{Shift/reduce conflict}
\begin{itemize}
\item Grammar for $2+5*3$:
\begin{bnf}
<expr>: <number>; <expr> + <expr>; <expr> $\times$ <expr>.
\end{bnf}
\item interpretations:
\hbox{%
\Tree [.* [.+ $2\quad$ $5\quad$ ] $3\quad$ ]
\Tree [.+ $2\quad$ [.* $5\quad$ $3\quad$ ] ]
}
\item Parse: reduce $2+5$ to \n{<expr> + <expr>},\\
then reduce to~\n{<expr>}, or~shift the minus?
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{solutions}
\begin{itemize}
\item Reformulate the grammar as
\begin{bnf}
<expr>: <mulex>; <mulex> + <mulex>.
<mulex>: <term>; <term> $\times$ <term>.
<term>: number.
\end{bnf}
\item new parse:
\begin{quote}
\Tree [.expr [.mulex [.term 2 ] ] + [.mulex [.term 5 ] * [.term 3 ] ] ]
\end{quote}
\item Introduce precedence of operators.\\ Possibly more efficient if
large number of operators.
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{`Dangling else'}
\begin{itemize}
\item Consider the grammar
\begin{bnf}
<statement>: if <clause> then <statement>; if <clause> then <statement> else <statement>
\end{bnf}
\item string
\begin{quote}
\tt if c$_1$ then if c$_2$ then s$_1$ else s$_2$
\end{quote}
\item Interpretations:
\hbox{\tiny
\Tree [.S If Then [.S If Then ] Else ]
\hskip-1cm
\Tree [.S If Then [.S If Then Else ] ]
}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Reduce/reduce conflict}
\begin{itemize}
\item Grammar for \n{x y c}
\begin{bnf}
A : B c d ; E c f.
B : x y.
E : x y.
\end{bnf}
\item $LR(1)$ parser: shift \n{x y},\\ then reduceto \n{B} or~\n{E}?
\item $LR(2)$ parser: sees \n{d} or~\n{f}
\item An $LL$ parser: ambiguity in the first 3 tokens\\
$LL(4)$ parser can sees \n{d} or~\n{f}.
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{}
\begin{itemize}
\item Grammar for $x\,y\,c^n\,\{d|f\}$:
\begin{bnf}
A : B C d ; E C f.
B : x y .
E : x y .
C : c ; C c.
\end{bnf}
\item confusing for any $LR(n)$ or $LL(n)$ parser with a
fixed amount of look-ahead
\item rewrite:
\begin{bnf}
A : BorE c d ; BorE c f.
BorE : x y.
\end{bnf}
or (for an $LL(n)$ parser):
\begin{bnf}
A : BorE c tail.
tail : d ; f.
BorE : x y.
\end{bnf}
\end{itemize}
}
\subsectionframe{\yacc}
\frame[containsverbatim]{
\frametitle{\yacc\ and \lex}
\begin{itemize}
\item \lex\ produces tokens
\item \yacc\ analyzes sequences of tokens
\item lexer returns on recognizing a token
\item main program in \yacc\ code
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{File structure}
\begin{verbatim}
...definitions...
%%
...rules...
%%
...code...
\end{verbatim}
\begin{itemize}
\item Default main calls \n{yyparse}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Example: \yacc\ code header}
\footnotesize
File name \n{words.y}
\begin{verbatim}
%{
#include <stdlib.h>
#include <string.h>
int yylex(void);
#include "words.h"
int nwords=0;
#define MAXWORDS 100
char *words[MAXWORDS];
%}
%token WORD
%%
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{include file}
Generated by running \yacc:
\begin{verbatim}
%% cat words.h
#define WORD 257
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Example: \lex\ code}
\footnotesize
\begin{verbatim}
%{
#include "words.h"
int find_word(char*);
extern int yylval;
%}
%%
[a-zA-Z]+ {yylval = find_word(yytext);
return WORD;}
. ;
\n ;
%%
\end{verbatim}
}
\frame[containsverbatim]{
\footnotesize
\begin{verbatim}
text : ;
| text WORD ; {
if ($2<0) printf("new word\n");
else printf("matched word %d\n",$2);
}
%%
int find_word(char *w)
{ int i;
for (i=0; i<nwords; i++)
if (strcmp(w,words[i])==0) return i;
words[nwords++] = strdup(w); return -1;
}
int main(void)
{
yyparse();
printf("there were %d unique words\n",nwords);
}
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Running \lex\ and \yacc}
\begin{verbatim}
/* create and compile yacc C file */
yacc -d -t -o YACCFILE.c YACCFILE.y
cc -c -o YACCFILE.o YACCFILE.c
/* create and compile lex C file */
lex -t LEXFILE.l > LEXFILE.c
cc -c -o LEXFILE.o LEXFILE.c
/* link together */
cc YACCFILE.o LEXFILE.o -o YACCPROGRAM -ly -ll
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Make with suffix rules}
\footnotesize
\begin{verbatim}
# disable normal rules
.SUFFIXES:
.SUFFIXES: .l .y .o
# lex rules
.l.o :
lex -t $*.l > $*.c
cc -c $*.c -o $*.o
# yacc rules
.y.o :
if [ ! -f $*.h ] ; then touch $*.h ; fi
yacc -d -t -o $*.c $*.y
cc -c -o $*.o $*.c ;
rm $*.c
# link lines
lexprogram : $(LEXFILE).o
cc $(LEXFILE).o -o $(LEXFILE) -ll
yaccprogram : $(YACCFILE).o $(LEXFILE).o
cc $(YACCFILE).o $(LEXFILE).o -o $(YACCFILE) -ly -ll
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{\yacc\ definitions section}
\begin{itemize}
\item C code in between \verb+%{ ... %}+
\item Token definitions: the \lex\ return tokens
\item Associativity rules (later)
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Tokens}
\begin{itemize}
\item Definition: \verb+%token FOO+
\item In \n{.h} file: \verb+#define FOO 257+ (or so)
\item \lex\ code: \verb+return FOO+
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Returning values over the stack}
\begin{itemize}
\item \lex\ assigns to \n{yylval}
\item value is put on top of stack
\item if a \yacc\ rule is matched: \verb+$1, $2, $3+ are assigned\\
(as many as elements in rhs)
\item replace stack top: assign to \verb+$$+
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Calculator example: \lex\ code}
\footnotesize
\begin{verbatim}
%{
#include "calc1.h"
void yyerror(char*);
extern int yylval;
%}
%%
[ \t]+ ;
[0-9]+ {yylval = atoi(yytext);
return INTEGER;}
[-+*/] {return *yytext;}
"(" {return *yytext;}
")" {return *yytext;}
\n {return *yytext;}
. {char msg[25];
sprintf(msg,"%s <%s>","invalid character",yytext);
yyerror(msg);}
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Calculator example: \yacc\ code}
\footnotesize
\begin{verbatim}
%{
int yylex(void);
#include "calc1.h"
%}
%token INTEGER
%%
program:
line program
| line
line:
expr '\n' { printf("%d\n",$1); }
| 'n'
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Calculator example: \yacc\ code, cont'd}
\footnotesize
\begin{verbatim}
expr:
expr '+' mulex { $$ = $1 + $3; }
| expr '-' mulex { $$ = $1 - $3; }
| mulex { $$ = $1; }
mulex:
mulex '*' term { $$ = $1 * $3; }
| mulex '/' term { $$ = $1 / $3; }
| term { $$ = $1; }
term:
'(' expr ')' { $$ = $2; }
| INTEGER { $$ = $1; }
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Calculator with variables}
\begin{itemize}
\item Simple case: single letter variables
\item more complicated: names
\item Extra rule: assignments
\item \lex\ returns
\begin{itemize}
\item \n{double} values \item \n{int} index of variable
\end{itemize}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Multiple return types}
\footnotesize
\begin{itemize}
\item Declare possible return types:
\begin{verbatim}
%union {int ival; double dval;}
\end{verbatim}
\item Connect types to return tokens:
\begin{verbatim}
%token <ival> NAME
%token <dval> NUMBER
\end{verbatim}
\item The types of non-terminals need to be given:
\begin{verbatim}
%type <dval> expr
%type <dval> mulex
%type <dval> term
\end{verbatim}
\item In \n{.h}~file will now have
\begin{verbatim}
#define name 258
#define NUMBER 259
typedef union {int ival; double dval;} YYSTYPE;
extern YYSTYPE yylval;
\end{verbatim}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Multiple return types: \lex\ code}
\begin{verbatim}
[ \t]+ ;
(([0-9]+(\.[0-9]*)?)|([0-9]*\.[0-9]+)) {
yylval.dval = atof(yytext);
return DOUBLE;}
[-+*/=] {return *yytext;}
"(" {return *yytext;}
")" {return *yytext;}
[a-z] {yylval.ivar = *yytext - 'a';
return NAME;} /* more later */
\n {return *yytext;}
. {char msg[25];
sprintf(msg,"%s <%s>","invalid character",yytext);
yyerror(msg);}
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Example: calculator with variables}
Tokens are \n{double} numbers, or variables (\n{int} index in table)
\begin{verbatim}
%{
#define NVARS 100
char *vars[NVARS]; double vals[NVARS]; int nvars=0;
%}
%union { double dval; int ivar; }
%token <dval> DOUBLE
%token <ivar> NAME
%type <dval> expr
%type <dval> mulex
%type <dval> term
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Symbol table handling}
\begin{itemize}
\item \lex\ parses variable names:
\begin{verbatim}
[a-z][a-z0-9]* {
yylval.ivar = varindex(yytext);
return NAME;}
\end{verbatim}
\item names are dynamically stored:
\begin{verbatim}
int varindex(char *var)
{
int i;
for (i=0; i<nvars; i++)
if (strcmp(var,vars[i])==0) return i;
vars[nvars] = strdup(var);
return nvars++;
}
\end{verbatim}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Arithmetic}
Largely as before:
\begin{verbatim}
expr:
expr '+' mulex { $$ = $1 + $3; }
| expr '-' mulex { $$ = $1 - $3; }
| mulex { $$ = $1; }
mulex:
mulex '*' term { $$ = $1 * $3; }
| mulex '/' term { $$ = $1 / $3; }
| term { $$ = $1; }
term:
'(' expr ')' { $$ = $2; }
| NAME { $$ = vals[$1]; }
| DOUBLE { $$ = $1; }
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Assignments}
\begin{verbatim}
line:
expr '\n' { printf("%g\n",$1); }
| NAME '=' expr '\n' { vals[$1] = $3; }
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Operator precedence and associativity}
Increasing precedence order:
\begin{verbatim}
%left '+' '-'
%left '*' '/'
%right '^'
%%
expr:
expr '+' expr ;
expr '-' expr ;
expr '*' expr ;
expr '/' expr ;
expr '^' expr ;
number ;
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Unary operators}
Declare non-associative;\\
indicate presence in rule
\begin{verbatim}
%left '-' '+'
%nonassoc UMINUS
%
expression : expression '+' expression
| expression '-' expression
| '-' expression %prec UMINUS
\end{verbatim}
}
\frame[containsverbatim]{
\frametitle{Error handling}
\footnotesize
\begin{itemize}
\item Default: \n{yyerror} prints \n{syntax error}
\item Better:\\
\lex\ code:
\begin{verbatim}
\n lineno++;
\end{verbatim}
\yacc\ code:
\begin{verbatim}
void yyerror(char *s)
{
printf("Parsing failed in line %d because of %s\n",lineno,s);
return;
}
\end{verbatim}
\item Your own error messages:
\begin{verbatim}
expr : name '[' name ']'
{if (!is_array($1) yyerror("array name expected");
\end{verbatim}
\end{itemize}
}
\frame[containsverbatim]{
\frametitle{Error recovery}
\begin{itemize}
\item Use of \n{error} token:
\begin{verbatim}
foo : bar baz ;
| error baz printf("Hope for the best\n");
\end{verbatim}
\end{itemize}
}
\end{document}
\frame[containsverbatim]{
\frametitle{}
\begin{itemize}
\item
\end{itemize}
}
|
{"hexsha": "9928d7a3543550607d11432c4008c5c0ee9c4018", "size": 38265, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "slides/parsing.tex", "max_stars_repo_name": "wvqusrai/the-science-of-tex-and-latex", "max_stars_repo_head_hexsha": "a96fd5cd0f7a6b9208675ba38ddcaec0264a9e31", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-07T08:21:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-07T08:21:41.000Z", "max_issues_repo_path": "slides/parsing.tex", "max_issues_repo_name": "wvqusrai/the-science-of-tex-and-latex", "max_issues_repo_head_hexsha": "a96fd5cd0f7a6b9208675ba38ddcaec0264a9e31", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slides/parsing.tex", "max_forks_repo_name": "wvqusrai/the-science-of-tex-and-latex", "max_forks_repo_head_hexsha": "a96fd5cd0f7a6b9208675ba38ddcaec0264a9e31", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4610668302, "max_line_length": 88, "alphanum_fraction": 0.6584607344, "num_tokens": 13046}
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tflite_runtime.interpreter import Interpreter
from PIL import Image
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Rock, paper and scissors')
parser.add_argument('--filename', type=str, help='Specify the filename', required=True)
parser.add_argument('--model_path', type=str, help='Specify the model path', required=True)
args = parser.parse_args()
filename = args.filename
model_path = args.model_path
labels = ['Rock', 'Paper', 'Scissors']
# Load TFLite model and allocate tensors
interpreter = Interpreter(model_path=model_path)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Read image with Pillow
img = Image.open(filename).convert('RGB')
# Get input size
input_shape = input_details[0]['shape']
size = input_shape[:2] if len(input_shape) == 3 else input_shape[1:3]
# Preprocess image
img = img.resize(size)
img = np.array(img, dtype=np.float32)
img = img / 255.
# Add a batch dimension
input_data = np.expand_dims(img, axis=0)
# Point the data to be used for testing and run the interpreter
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# Obtain results and print the predicted category
predictions = interpreter.get_tensor(output_details[0]['index'])
predicted_label = np.argmax(predictions)
print(labels[predicted_label])
|
{"hexsha": "6a672a4e442f6e37b0dac6adb421f03934a13267", "size": 2044, "ext": "py", "lang": "Python", "max_stars_repo_path": "practice/courses/dlaicourse-master/TensorFlow Deployment/Course 2 - TensorFlow Lite/Week 4/Exercise/TFLite_Week4_Exercise_Answer.py", "max_stars_repo_name": "ItamarRocha/AI", "max_stars_repo_head_hexsha": "134e4e39f7034657472d7996ce70f37ff7a6e74b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3266, "max_stars_repo_stars_event_min_datetime": "2017-08-06T16:51:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:34:24.000Z", "max_issues_repo_path": "deep-learning/Tensorflow-2.x/Browser-Based-Models/TensorFlow Deployment/Course 2 - TensorFlow Lite/Week 4/Exercise/TFLite_Week4_Exercise_Answer.py", "max_issues_repo_name": "Yasin-Shah/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials", "max_issues_repo_head_hexsha": "243a2a744ced81b69438e08e981249d7629a1f03", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 150, "max_issues_repo_issues_event_min_datetime": "2017-08-28T14:59:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:21:35.000Z", "max_forks_repo_path": "deep-learning/Tensorflow-2.x/Browser-Based-Models/TensorFlow Deployment/Course 2 - TensorFlow Lite/Week 4/Exercise/TFLite_Week4_Exercise_Answer.py", "max_forks_repo_name": "Yasin-Shah/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials", "max_forks_repo_head_hexsha": "243a2a744ced81b69438e08e981249d7629a1f03", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1449, "max_forks_repo_forks_event_min_datetime": "2017-08-06T17:40:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:03:24.000Z", "avg_line_length": 32.9677419355, "max_line_length": 91, "alphanum_fraction": 0.7725048924, "include": true, "reason": "import numpy", "num_tokens": 447}
|
*###[ ffxdb1:
subroutine ffxdb1(cdb1, p, m1, m2, ier)
***#[*comment:***********************************************************
* *
* DB1 function (derivative of B1) *
* *
* algorithm adapted from Ansgar Denner's bcanew.f *
* *
***#]*comment:***********************************************************
* #[ declarations:
implicit none
*
* arguments
*
DOUBLE COMPLEX cdb1
DOUBLE PRECISION p, m1, m2
integer ier
DOUBLE COMPLEX ffpvf, ffypvf
external ffpvf, ffypvf
DOUBLE COMPLEX xp, xm, yp, ym, r
include 'ff.h'
logical initir
save initir
data initir /.FALSE./
if(abs(p) .gt. precx*(m1 + m2)) then
* IR divergent case
if(m2 .eq. 0 .and. p .eq. m1) then
if(.not. initir) then
initir = .TRUE.
print *, "ffxdb1: IR divergent B1', ",
+ "using cutoff ", lambda2
endif
cdb1 = .5D0*(3 + log(lambda2/p))/p
return
endif
call ffroots(p, m1, m2, xp, xm, yp, ym, r)
if(abs(xp - xm) .gt. sqrt(precx)*abs(xp + xm)) then
cdb1 = (ffypvf(2, xp, yp) - ffypvf(2, xm, ym))/r
else if(abs(xp) .gt. 10) then
cdb1 = DBLE( (2/3D0 +
+ (2 - 3*xp)*ffpvf(3, xp, yp))/xp**2 )/p
else if(abs(yp) .gt. precx) then
cdb1 = DBLE( (3/2D0 +
+ (2 - 3*xp)*ffpvf(1, xp, yp)) )/p
else
call fferr(101, ier)
cdb1 = 999D300
endif
* zero momentum case
else if(abs(m1 - m2) .gt. precx*(m1 + m2)) then
xm = DCMPLX(1D0, -1D-5*precx)*m1/(m1 - m2)
ym = DCMPLX(1D0, -1D-5*precx)*m2/(m2 - m1)
if(abs(xm) .lt. 10) then
cdb1 = -(1/3D0 + ffypvf(2, xm, ym))/(m1 - m2)
else
cdb1 = -(1/3D0 + ffypvf(3, xm, ym))/m1
endif
else
cdb1 = -1/12D0/m1
endif
end
*###[ ffxdb11:
subroutine ffxdb11(cdb11, p, m1, m2, ier)
***#[*comment:***********************************************************
* *
* DB11 function (derivative of B11) *
* *
***#]*comment:***********************************************************
* #[ declarations:
implicit none
*
* arguments
*
DOUBLE COMPLEX cdb11
DOUBLE PRECISION p, m1, m2
integer ier
DOUBLE COMPLEX ffpvf, ffypvf
external ffpvf, ffypvf
DOUBLE COMPLEX xp, xm, yp, ym, r
include 'ff.h'
if(abs(p) .gt. precx*(m1 + m2)) then
call ffroots(p, m1, m2, xp, xm, yp, ym, r)
if(abs(xp - xm) .gt. sqrt(precx)*abs(xp + xm)) then
cdb11 = (ffypvf(3, xm, ym) - ffypvf(3, xp, yp))/r
else if(abs(xp) .gt. 10) then
cdb11 = DBLE( (-3/4D0 +
+ (4*xp - 3)*ffpvf(4, xp, yp))/xp**2 )/p
else if(abs(yp) .gt. precx) then
cdb11 = DBLE( (-4/3D0 +
+ (4*xp - 3)*ffpvf(2, xp, yp))/p )
else
call fferr(102, ier)
cdb11 = 999D300
endif
* zero momentum case
else
call fferr(102, ier)
cdb11 = 999D300
endif
end
*###[ ffroots
subroutine ffroots(p, m1, m2, xp, xm, yp, ym, r)
***#[*comment:***********************************************************
* *
* roots of quadratic equation *
* p*x^2 + (m2 - m1 - p)*x + m2 - eps = *
* p*(x - xp)*(x - xm) = p*(x - 1 + yp)*(x - 1 + ym) *
* i.e. x[pm] = 1 - y[pm] *
* *
***#]*comment:***********************************************************
* #[ declarations:
implicit none
*
* arguments
*
DOUBLE PRECISION p, m1, m2
DOUBLE COMPLEX xp, xm, yp, ym, r
DOUBLE PRECISION q
include 'ff.h'
r = sqrt(dcmplx(p*(p - 2*(m1 + m2)) + (m1 - m2)**2))
q = p + m1 - m2
xp = (q + r)/2D0/p
xm = (q - r)/2D0/p
if(abs(xm) .gt. abs(xp)) then
xp = m1/p/xm
else if(abs(xp) .gt. abs(xm)) then
xm = m1/p/xp
endif
xp = xp + DCMPLX(0D0, abs(p*xp)/p*1D-5*precx)
xm = xm + DCMPLX(0D0, -abs(p*xm)/p*1D-5*precx)
q = p - m1 + m2
ym = (q + r)/2D0/p
yp = (q - r)/2D0/p
if(abs(ym) .gt. abs(yp)) then
yp = m2/p/ym
else if(abs(yp) .gt. abs(ym)) then
ym = m2/p/yp
endif
yp = yp + DCMPLX(0D0, -abs(p*yp)/p*1D-5*precx)
ym = ym + DCMPLX(0D0, abs(p*ym)/p*1D-5*precx)
end
*###[ ffpvf
DOUBLE COMPLEX function ffpvf(n, x, y)
***#[*comment:***********************************************************
* *
* Passarino-Veltman function f(n, x) *
* here third arg y = 1 - x *
* *
***#]*comment:***********************************************************
* #[ declarations:
implicit none
*
* arguments
*
integer n
DOUBLE COMPLEX x, y
integer m
DOUBLE COMPLEX xm
include 'ff.h'
if(abs(x) .lt. 10) then
if(n .eq. 0) then
ffpvf = -log(-y/x)
else if(x .eq. 0) then
ffpvf = -1D0/n
else
ffpvf = 0
xm = 1
do 10 m = 0, n - 1
ffpvf = ffpvf - xm/(n - m)
xm = xm*x
10 continue
ffpvf = ffpvf - xm*log(-y/x)
endif
else
ffpvf = 0
xm = 1
do 20 m = 1, 30
xm = xm/x
ffpvf = ffpvf + xm/(m + n)
if(abs(xm/ffpvf) .lt. precx) return
20 continue
endif
end
*###[ ffypvf
DOUBLE COMPLEX function ffypvf(n, x, y)
***#[*comment:***********************************************************
* *
* y*ffpvf(n, x, y) *
* *
***#]*comment:***********************************************************
* #[ declarations:
implicit none
*
* arguments
*
integer n
DOUBLE COMPLEX x, y
DOUBLE COMPLEX ffpvf
external ffpvf
if(abs(y) .eq. 0) then
ffypvf = 0
else
ffypvf = y*ffpvf(n, x, y)
endif
end
|
{"hexsha": "c7d68cb9b7a98a70e37a5f0b16fec2470ce03b63", "size": 5277, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Tauola1_1_5/SANC/LoopTools-2.1/ff/ffxdb1.f", "max_stars_repo_name": "klendathu2k/StarGenerator", "max_stars_repo_head_hexsha": "7dd407c41d4eea059ca96ded80d30bda0bc014a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-24T19:37:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T06:57:20.000Z", "max_issues_repo_path": "Tauola1_1_5/SANC/LoopTools-2.1/ff/ffxdb1.f", "max_issues_repo_name": "klendathu2k/StarGenerator", "max_issues_repo_head_hexsha": "7dd407c41d4eea059ca96ded80d30bda0bc014a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tauola1_1_5/SANC/LoopTools-2.1/ff/ffxdb1.f", "max_forks_repo_name": "klendathu2k/StarGenerator", "max_forks_repo_head_hexsha": "7dd407c41d4eea059ca96ded80d30bda0bc014a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-15T12:22:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-15T12:22:30.000Z", "avg_line_length": 21.5387755102, "max_line_length": 74, "alphanum_fraction": 0.4728065189, "num_tokens": 2094}
|
push!(LOAD_PATH,"../src/")
using Documenter
using DocumenterCitations
using Plots
using HOODESolver
ENV["GKSwstype"] = "100"
bib = CitationBibliography(joinpath(@__DIR__, "references.bib"))
makedocs(
bib,
sitename = "HOODESolver.jl",
authors="Yves Mocquard",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://ymocquar.github.io/HOODESolver.jl",
assets=String[],
),
modules = [HOODESolver],
pages = ["Documentation" => "index.md",
"Numerical Method" => "numerical_method.md",
"Quickstart" => "quickstart.md",
"Charged Particle" => "charged_particle.md",
"Future work" => "future_work.md",
"Types" => "types.md",
"Functions" => "functions.md"],
repo = "https://github.com/ymocquar/HOODESolver.jl/blob/{commit}{path}#{line}"
)
deploydocs(;
branch = "gh-pages",
devbranch = "master",
repo="github.com/ymocquar/HOODESolver.jl",
)
|
{"hexsha": "d9ff0819f47f608cfdfaf83007ccab6eda03f09c", "size": 1044, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "crouseilles/HOODESolver.jl", "max_stars_repo_head_hexsha": "c9087aaf35b7469839a47131c63489759e9da69e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "crouseilles/HOODESolver.jl", "max_issues_repo_head_hexsha": "c9087aaf35b7469839a47131c63489759e9da69e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "crouseilles/HOODESolver.jl", "max_forks_repo_head_hexsha": "c9087aaf35b7469839a47131c63489759e9da69e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0, "max_line_length": 82, "alphanum_fraction": 0.5890804598, "num_tokens": 288}
|
#!/usr/bin/env python
import re
import numpy as np
import pandas as pd
from pd_validator.validator import *
def _fmt_inval_rpt(df, col, schema, invals):
"""
Format rpt rows for column values that violate
schema rules.
Parameters
----------
df : pd.DataFrame
col : str
pd.DataFrame column name
schema : dict
Validation rules
invals : dict
Invalid values by rule violated
Returns
-------
pd.DataFrame
Rpt rows for invalid column values
"""
rows = pd.DataFrame()
for k, v in invals.items():
for inval in v:
row = df[df[col] == inval]
# Add rpt cols
row['inval_line'] = (row.index+1).astype(int)
row['inval_col'] = col
row['inval_val'] = inval
row['err_msg'] = schema[col][k]['err_msg']
rows = rows.append(row, ignore_index=True)
return rows
def _fmt_missing_rpt(col, schema, missing):
"""
Format rpt rows for missing required column values.
Parameters
----------
col : str
Column name
schema : dict
Validation rules
missing : pd.DataFrame
Subset missing required column value
Returns
-------
pd.DataFrame
Rpt rows for missing required values
"""
missing['inval_line'] = (missing.index+1).astype(int)
missing['inval_col'] = col
missing['inval_val'] = np.nan
missing['err_msg'] = schema[col]['required']['err_msg']
return missing
def _fmt_col_rpt(col):
"""
Format rpt row for missing required column.
Parameters
----------
col : str
pd.DataFrame column name
Returns
-------
dict
Rpt row for missing required column
"""
return {
'inval_line': 'All',
'inval_col': col,
'inval_val': 'All',
'error': 'Column %s is missing' % (col)
}
class Report(object):
"""
Formatted validation rpt for pd.DataFrame objects.
Attributes
----------
df : pd.DataFrame
schema : dict
Validation rules
Methods
-------
__call__
Get rpt of invalid and missing values/columns
>>> schema = Schema(rules=rules)
>>> rpt = Report(df=df, schema=schema())
>>> rpt()
col_1 col_2 inval_line inval_col inval_val error
A B 1 col_1 A Invalid 'dtype': int required
1 BC 2 col_2 BC Invalid 'length': 1 required
"""
def __init__(self, df, schema):
self.df = df
self.schema = schema
def __call__(self):
rpt = pd.DataFrame()
for col in self.schema.keys():
try:
# get invalid vals in col
invals = self._get_invals(
col,
self.schema[col]['dtype']['rule'],
self.schema[col]['length']['rule'],
self.schema[col]['range']['rule'],
self.schema[col]['codes']['rule'],
self.schema[col]['regex']['rule']
)
# get missing vals in col if required
rule = self.schema[col]['required']['rule']
missing = self._get_missing(col, rule)
except KeyError:
# add missing col to rpt
rows = _fmt_col_rpt(col)
rpt = rpt.append(rows, ignore_index=True)
else:
if invals:
# add invalid rows to report
rows = _fmt_inval_rpt(self.df, col, self.schema, invals)
rpt = rpt.append(rows, ignore_index=True)
# `get_missing` returns df, else None
if isinstance(missing, pd.DataFrame):
# add missing rows to report
rows = _fmt_missing_rpt(col, self.schema, missing)
rpt = rpt.append(rows, ignore_index=True)
return rpt
def _get_invals(self, col, schema_dtype, schema_length=False,
schema_range=False, schema_codes=False,
schema_regex=False):
invals = {}
if self.df[col].dtype != schema_dtype:
invals['dtype'] = check_dtype(self.df, col, schema_dtype)
if schema_length:
invals['length'] = check_length(self.df, col, schema_length)
if schema_range:
invals['range'] = check_range(self.df, col, schema_range)
if schema_codes:
invals['codes'] = check_codes(self.df, col, schema_codes)
if schema_regex:
invals['regex'] = check_regex(self.df, col, schema_regex)
return invals
def _get_missing(self, col, schema_required=False):
if schema_required and self.df[col].isnull().values.any():
return check_missing(self.df, col)
|
{"hexsha": "28e5bedba534e0ed025e6add295511d760f7c276", "size": 4919, "ext": "py", "lang": "Python", "max_stars_repo_path": "pd_validator/report.py", "max_stars_repo_name": "nrbontha/df-validator", "max_stars_repo_head_hexsha": "3d9b09bbc7f83f65b486bb84b91164b22f715297", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-06-10T20:46:28.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-21T01:53:41.000Z", "max_issues_repo_path": "pd_validator/report.py", "max_issues_repo_name": "nrbontha/df-validator", "max_issues_repo_head_hexsha": "3d9b09bbc7f83f65b486bb84b91164b22f715297", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-06-05T01:05:32.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-05T17:21:32.000Z", "max_forks_repo_path": "pd_validator/report.py", "max_forks_repo_name": "nrbontha/data_validator", "max_forks_repo_head_hexsha": "3d9b09bbc7f83f65b486bb84b91164b22f715297", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0264550265, "max_line_length": 81, "alphanum_fraction": 0.5364911567, "include": true, "reason": "import numpy", "num_tokens": 1098}
|
"""
The MIT License (MIT)
Copyright (c) 2021 NVIDIA
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
# Using Keras Tokenizer for simplicity
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.text \
import text_to_word_sequence
from tensorflow.keras.preprocessing.sequence \
import pad_sequences
import numpy as np
import matplotlib.pyplot as plt
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
EPOCHS = 20
BATCH_SIZE = 64
MAX_WORDS = 8
EMBEDDING_WIDTH = 4
# Load MNIST dataset.
trainset = MNIST(root='./pt_data', train=True, download=True)
testset = MNIST(root='./pt_data', train=False, download=True)
# Convert to numpy arrays to enable us to create a richer dataset.
train_images = trainset.data.numpy().astype(np.float32)
train_labels = trainset.targets.numpy()
test_images = testset.data.numpy().astype(np.float32)
test_labels = testset.targets.numpy()
# Standardize the data.
mean = np.mean(train_images)
stddev = np.std(train_images)
train_images = (train_images - mean) / stddev
test_images = (test_images - mean) / stddev
# Function to create question and answer text.
def create_question_answer(tokenizer, labels):
text = []
answers = np.zeros(len(labels))
for i, label in enumerate(labels):
question_num = i % 4
if question_num == 0:
text.append('lower half')
if label < 5:
answers[i] = 1.0
elif question_num == 1:
text.append('upper half')
if label >= 5:
answers[i] = 1.0
elif question_num == 2:
text.append('even number')
if label % 2 == 0:
answers[i] = 1.0
elif question_num == 3:
text.append('odd number')
if label % 2 == 1:
answers[i] = 1.0
text = tokenizer.texts_to_sequences(text)
text = pad_sequences(text).astype(np.int_)
answers = answers.reshape((len(labels), 1))
return text, answers
# Create second modality for training and test set.
vocabulary = ['lower', 'upper', 'half', 'even', 'odd', 'number']
tokenizer = Tokenizer(num_words=MAX_WORDS)
tokenizer.fit_on_texts(vocabulary)
train_text, train_answers = create_question_answer(tokenizer,
train_labels)
test_text, test_answers = create_question_answer(tokenizer,
test_labels)
# Create datasets.
trainset = TensorDataset(torch.from_numpy(train_images),
torch.from_numpy(train_text),
torch.from_numpy(train_labels),
torch.from_numpy(train_answers))
testset = TensorDataset(torch.from_numpy(test_images),
torch.from_numpy(test_text),
torch.from_numpy(test_labels),
torch.from_numpy(test_answers))
# Define model.
class MultiTaskModel(nn.Module):
def __init__(self):
super().__init__()
self.embedding_layer = nn.Embedding(MAX_WORDS, EMBEDDING_WIDTH)
nn.init.uniform_(self.embedding_layer.weight, -0.05, 0.05) # Default is -1, 1.
self.lstm_layers = nn.LSTM(EMBEDDING_WIDTH, 8, num_layers=1, batch_first=True)
self.linear_layer = nn.Linear(784+8, 25)
self.relu_layer = nn.ReLU()
self.class_output_layer = nn.Linear(25, 10)
self.answer_output_layer = nn.Linear(25, 1)
def forward(self, inputs):
image_input = inputs[0]
text_input = inputs[1]
# Process textual data.
x0 = self.embedding_layer(text_input)
x0 = self.lstm_layers(x0)
# Process image data.
# Flatten the image.
x1 = image_input.view(-1, 784)
# Concatenate input branches and build shared trunk.
x = torch.cat((x0[1][0][0], x1), dim=1)
x = self.linear_layer(x)
x = self.relu_layer(x)
# Define two heads.
class_output = self.class_output_layer(x)
answer_output = self.answer_output_layer(x)
return [class_output, answer_output]
model = MultiTaskModel()
# Loss function and optimizer
optimizer = torch.optim.Adam(model.parameters())
loss_function0 = nn.CrossEntropyLoss()
loss_function1 = nn.BCEWithLogitsLoss()
# Training loop for multi-modal multi-task model.
# Transfer model to GPU.
model.to(device)
# Create dataloaders.
trainloader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)
testloader = DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False)
for i in range(EPOCHS):
model.train() # Set model in training mode.
class_train_loss = 0.0
class_train_correct = 0
answer_train_loss = 0.0
answer_train_correct = 0
train_batches = 0
for image_inputs, text_inputs, class_targets, answer_targets in trainloader:
# Move data to GPU.
image_inputs = image_inputs.to(device)
text_inputs = text_inputs.to(device)
class_targets = class_targets.to(device)
answer_targets = answer_targets.to(device)
# Zero the parameter gradients.
optimizer.zero_grad()
# Forward pass.
outputs = model([image_inputs, text_inputs])
class_loss = loss_function0(outputs[0], class_targets)
answer_loss = loss_function1(outputs[1], answer_targets)
loss = 0.5*class_loss + 0.5*answer_loss
# Accumulate metrics.
_, indices = torch.max(outputs[0].data, 1)
class_train_correct += (indices == class_targets).sum().item()
answer_train_correct += ((outputs[1].data > 0.0) == answer_targets).sum().item()
train_batches += 1
class_train_loss += class_loss.item()
answer_train_loss += answer_loss.item()
# Backward pass and update.
loss.backward()
optimizer.step()
class_train_loss = class_train_loss / train_batches
class_train_acc = class_train_correct / (train_batches * BATCH_SIZE)
answer_train_loss = answer_train_loss / train_batches
answer_train_acc = answer_train_correct / (train_batches * BATCH_SIZE)
# Evaluate the model on the test dataset.
model.eval() # Set model in inference mode.
class_test_loss = 0.0
class_test_correct = 0
answer_test_loss = 0.0
answer_test_correct = 0
test_batches = 0
for image_inputs, text_inputs, class_targets, answer_targets in testloader:
image_inputs = image_inputs.to(device)
text_inputs = text_inputs.to(device)
class_targets = class_targets.to(device)
answer_targets = answer_targets.to(device)
outputs = model([image_inputs, text_inputs])
class_loss = loss_function0(outputs[0], class_targets)
answer_loss = loss_function1(outputs[1], answer_targets)
loss = 0.5*class_loss + 0.5*answer_loss
_, indices = torch.max(outputs[0].data, 1)
class_test_correct += (indices == class_targets).sum().item()
answer_test_correct += ((outputs[1].data > 0.0) == answer_targets).sum().item()
test_batches += 1
class_test_loss += class_loss.item()
answer_test_loss += answer_loss.item()
class_test_loss = class_test_loss / test_batches
class_test_acc = class_test_correct / (test_batches * BATCH_SIZE)
answer_test_loss = answer_test_loss / test_batches
answer_test_acc = answer_test_correct / (test_batches * BATCH_SIZE)
print(f'Epoch {i+1}/{EPOCHS} class loss: {class_train_loss:.4f} - answer loss: {answer_train_loss:.4f} - class acc: {class_train_acc:0.4f} - answer acc: {answer_train_acc:0.4f} - class val_loss: {class_test_loss:.4f} - answer val_loss: {answer_test_loss:.4f} - class val_acc: {class_test_acc:0.4f} - answer val_acc: {answer_test_acc:0.4f}')
|
{"hexsha": "0adb617c4b707520ac14f7806f9b2da67b64c4cf", "size": 9007, "ext": "py", "lang": "Python", "max_stars_repo_path": "pt_framework/c17e3_multi_modal_multi_task.py", "max_stars_repo_name": "jpgacrama/DeepLearning", "max_stars_repo_head_hexsha": "db0f7edc918d28d330f4926ef1961dbd01ec9012", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 56, "max_stars_repo_stars_event_min_datetime": "2021-07-17T16:36:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:39:02.000Z", "max_issues_repo_path": "pt_framework/c17e3_multi_modal_multi_task.py", "max_issues_repo_name": "jpgacrama/DeepLearning", "max_issues_repo_head_hexsha": "db0f7edc918d28d330f4926ef1961dbd01ec9012", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-09-17T03:52:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T05:50:10.000Z", "max_forks_repo_path": "pt_framework/c17e3_multi_modal_multi_task.py", "max_forks_repo_name": "jpgacrama/DeepLearning", "max_forks_repo_head_hexsha": "db0f7edc918d28d330f4926ef1961dbd01ec9012", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2021-08-11T09:00:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T19:19:33.000Z", "avg_line_length": 39.6784140969, "max_line_length": 344, "alphanum_fraction": 0.6852448096, "include": true, "reason": "import numpy", "num_tokens": 2076}
|
import pandas as pd
import seaborn as sns
import numpy as np
from indp import *
import os.path
import operator
import networkx as nx
from infrastructure import *
from indputils import *
import copy
from gurobipy import *
import itertools
import scipy
import sys
def run_judgment_call(params,layers,T=1,saveJC=True,print_cmd=True,saveJCModel=False,validate=False):
""" Solves an INDP problem with specified parameters using a decentralized hueristic called Judgment Call . Outputs to directory specified in params['OUTPUT_DIR'].
:param params: Global parameters.
:param layers: Layers to consider in the infrastructure network.
:param T: Number of time steps per analyses (1 for D-iINDP and T>1 for D-tdINDP)
:param saveJC: If true, the results are saved to files
:param print_cmd: If true, the results are printed to console
:param saveJCModel: If true, optimization models and their solutions are printed to file
:param validate: (Currently not used.)
"""
judgment_type = params["JUDGMENT_TYPE"]
auction_type = params["AUCTION_TYPE"]
valuation_type = params["VALUATION_TYPE"]
# Initialize failure scenario.
InterdepNet=None
if "N" not in params:
InterdepNet=initialize_network(BASE_DIR="../data/INDP_7-20-2015/",sim_number=params['SIM_NUMBER'],magnitude=params["MAGNITUDE"])
else:
InterdepNet=params["N"]
if "NUM_ITERATIONS" not in params:
params["NUM_ITERATIONS"]=1
num_iterations = params["NUM_ITERATIONS"]
v_r=params["V"]
if isinstance(v_r, (int, long)):
v_r = [v_r]
if auction_type:
output_dir = params["OUTPUT_DIR"]+'_L'+`len(layers)`+'_m'+`params["MAGNITUDE"]`+"_v"+`sum(v_r)`+'_auction_'+auction_type+'_'+valuation_type
else:
output_dir = params["OUTPUT_DIR"]+'_L'+`len(layers)`+'_m'+`params["MAGNITUDE"]`+"_v"+`sum(v_r)`+'_uniform_alloc'
Dindp_results={P:INDPResults() for P in layers}
Dindp_results_Real={P:INDPResults() for P in layers}
currentTotalCost = {}
if T == 1:
if auction_type:
print "\n--Running Judgment Call with type "+judgment_type +" with auction "+auction_type+ ' & valuation '+ valuation_type
else:
print "\n--Running Judgment Call with type "+judgment_type +" with uniform allocation "
if print_cmd:
print "Num iters=",params["NUM_ITERATIONS"]
# Initial calculations.
indp_results_initial=indp(InterdepNet,0,1,layers,controlled_layers=layers)
# Initial costs are just saved to cost_#_sum.csv not to cost_#_P.csv (to be corrected)
# Initial components are not saved (to be corrected)
# Components of all layers are not saved to components_#_sum.csv(to be corrected)
# Actions of all layers are not saved to actions_#_sum.csv(to be corrected)
# Percolation of all layers are not saved to percolation_#_sum.csv(to be corrected)
for P in layers:
Dindp_results[P].add_cost(0,'Total',0.0) #Add a zero entry for t=0 for each layer
Dindp_results_Real[P].add_cost(0,'Total',0.0) #Add a zero entry for t=0 for each layer
res_allocate = {}
PoA = {}
valuations={}
for i in range(num_iterations):
print "\n-Iteration "+`i`+"/"+`num_iterations-1`
v_r_applied = []
if auction_type:
res_allocate[i],PoA[i],valuations[i]=auction_resources(sum(v_r),params,
layers=layers,T=1,print_cmd=print_cmd,judgment_type=judgment_type,
auction_type=auction_type,valuation_type=valuation_type)
for key, value in res_allocate[i].items():
v_r_applied.append(len(value))
elif len(v_r)!=len(layers):
v_r_applied = [v_r[0]/len(layers) for x in layers]
for x in range(v_r[0]%len(layers)):
v_r_applied[x]+=1
res_allocate[i] = {P:[] for P in layers}
for P in layers:
res_allocate[i][P]=range(1,1+v_r_applied[P-1])
else:
v_r_applied = v_r
res_allocate[i] = {P:[] for P in layers}
for P in layers:
res_allocate[i][P]=range(1,1+v_r_applied[P-1])
functionality = {p:{} for p in layers}
uncorrectedResults = {}
if print_cmd:
print "Judgment: "
for P in layers:
if print_cmd:
print "Layer-%d"%(P)
negP=[x for x in layers if x != P]
functionality[P] = create_judgment_matrix(InterdepNet,T,negP,v_r_applied,
actions=None,judgment_type=judgment_type)
# Make decision based on judgments before communication
indp_results = indp(InterdepNet,v_r_applied[P-1],1,layers=layers,
controlled_layers=[P],functionality= functionality[P],
print_cmd=print_cmd)
# Save models for re-evaluation after communication
uncorrectedResults[P] = indp_results[1]
# Save results of decisions based on judgments
Dindp_results[P].extend(indp_results[1],t_offset=i+1)
# Save models to file
if saveJCModel:
save_INDP_model_to_file(indp_results[0],output_dir+"/Model",i+1,P)
# Modify network to account for recovery and calculate components.
apply_recovery(InterdepNet,Dindp_results[P],i+1)
Dindp_results[P].add_components(i+1,INDPComponents.calculate_components(indp_results[0],InterdepNet,layers=[P]))
# Re-evaluate judgments based on other agents' decisions
if print_cmd:
print "Re-evaluation: "
for P in layers:
if print_cmd:
print "Layer-%d"%(P)
indp_results_Real,realizations = Decentralized_INDP_Realized_Performance(InterdepNet,i+1,
uncorrectedResults[P],functionality= functionality[P],
T=1,layers=layers,controlled_layers=[P],
print_cmd=print_cmd,saveJCModel=saveJCModel)
Dindp_results_Real[P].extend(indp_results_Real[1],t_offset=i+1)
if saveJCModel:
save_INDP_model_to_file(indp_results_Real[0],output_dir+"/Model",i+1,P,suffix='Real')
output_dir_judgments= output_dir + '/judgments'
write_judgments_csv(output_dir_judgments,realizations,
sample_num=params["SIM_NUMBER"],
agent=P,time=i+1,suffix="")
# Calculate sum of costs
Dindp_results_sum = INDPResults()
Dindp_results_Real_sum = INDPResults()
cost_types = Dindp_results[1][0]['costs'].keys()
for i in range(num_iterations+1):
for cost_type in cost_types:
sumTemp = 0.0
sumTemp_Real = 0.0
if i==0:
sumTemp = indp_results_initial[1][0]['costs'][cost_type]
sumTemp_Real = indp_results_initial[1][0]['costs'][cost_type]
else:
for P in layers:
sumTemp += Dindp_results[P][i]['costs'][cost_type]
sumTemp_Real += Dindp_results_Real[P][i]['costs'][cost_type]
Dindp_results_sum.add_cost(i,cost_type,sumTemp)
Dindp_results_Real_sum.add_cost(i,cost_type,sumTemp_Real)
for P in layers:
for a in Dindp_results[P][i]['actions']:
Dindp_results_sum.add_action(i,a)
output_dir_auction = output_dir + '/auctions'
if auction_type:
write_auction_csv(output_dir_auction,res_allocate,PoA,valuations,sample_num=params["SIM_NUMBER"],suffix="")
else:
write_auction_csv(output_dir_auction,res_allocate,sample_num=params["SIM_NUMBER"],suffix="")
# Save results of D-iINDP run to file.
if saveJC:
output_dir_agents = output_dir + '/agents'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir_agents):
os.makedirs(output_dir_agents)
for P in layers:
Dindp_results[P].to_csv(output_dir_agents,params["SIM_NUMBER"],suffix=`P`)
Dindp_results_Real[P].to_csv(output_dir_agents,params["SIM_NUMBER"],suffix='Real_'+`P`)
Dindp_results_sum.to_csv(output_dir,params["SIM_NUMBER"],suffix='sum')
Dindp_results_Real_sum.to_csv(output_dir,params["SIM_NUMBER"],suffix='Real_sum')
else:
# td-INDP formulations. Includes "DELTA_T" parameter for sliding windows to increase
# efficiency.
# Edit 2/8/16: "Sliding window" now overlaps.
print 'hahahaha'
'''
This function computes the realized values of flow cost, unbalanced cost, and
demand deficit at the end of each step according to the what the other agent
actually decides (as opposed to according to the guess)
For output items, look at the description of Decentralized_INDP()
'''
def Decentralized_INDP_Realized_Performance(N,iteration,indp_results,functionality,
layers,T=1,controlled_layers=[1],
print_cmd=False,saveJCModel=False):
G_prime_nodes = [n[0] for n in N.G.nodes_iter(data=True) if n[1]['data']['inf_data'].net_id in layers]
G_prime = N.G.subgraph(G_prime_nodes)
interdep_nodes={}
for u,v,a in G_prime.edges_iter(data=True):
if a['data']['inf_data'].is_interdep and G_prime.node[v]['data']['inf_data'].net_id in controlled_layers:
if u not in interdep_nodes:
interdep_nodes[u]=[]
interdep_nodes[u].append(v)
list_interdep_nodes = interdep_nodes.keys()
functionality_realized = copy.deepcopy(functionality)
realizations = {t:{} for t in range(T)}
for t in range(T):
realCount = 0
for u, value in functionality[t].iteritems():
if u in list_interdep_nodes:
realCount += 1
vValues = [G_prime.node[x]['data']['inf_data'].functionality for x in interdep_nodes[u]]
realizations[t][realCount]={'vNames':interdep_nodes[u],'uName':u,
'uJudge':functionality[t][u],'uCorrected':False,'vValues':vValues}
if functionality[t][u]==1.0 and G_prime.node[u]['data']['inf_data'].functionality==0.0:
functionality_realized[t][u] = 0.0
realizations[t][realCount]['uCorrected'] = True
if print_cmd:
print 'Correct '+`u`+' to 0 (affect. '+`vValues`+')'
indp_results_Real = indp(N,v_r=0,T=1,layers=layers,controlled_layers=controlled_layers,
functionality=functionality_realized,
print_cmd=print_cmd)
for t in range(T):
costs = indp_results.results[t]['costs']
nodeCost=costs["Node"]
indp_results_Real[1][t]['costs']["Node"]=nodeCost
arcCost=costs["Arc"]
indp_results_Real[1][t]['costs']["Arc"]=arcCost
spacePrepCost=costs["Space Prep"]
indp_results_Real[1][t]['costs']["Space Prep"]=spacePrepCost
flowCost=indp_results_Real[1][t]['costs']["Flow"]
overSuppCost=indp_results_Real[1][t]['costs']["Over Supply"]
underSuppCost=indp_results_Real[1][t]['costs']["Under Supply"]
# Calculate total costs.
indp_results_Real[1][t]['costs']["Total"]=flowCost+arcCost+nodeCost+overSuppCost+underSuppCost+spacePrepCost
indp_results_Real[1][t]["Total no disconnection"]=spacePrepCost+arcCost+flowCost+nodeCost
return indp_results_Real,realizations
def create_judgment_matrix(N,T,layers,v_r=[],actions=[],judgment_type="OPTIMISTIC"):
"""Creates a functionality map for input into the functionality parameter in the indp function.
:param N: An InfrastructureNetwork instance (created in infrastructure.py)
:param T: Number of timesteps to optimize over.
:param layers: Layer IDs of N included in the optimization.
:param actions: An array of actions from a previous optimization. Likely taken from an INDPResults variable 'indp_result[t]['actions']'.
:param judgment_type: If no actions are provided, assigns a default functionality. Options are: "OPTIMISTIC", "PESSIMISTIC" or ...
:returns: A functionality dictionary used for input into indp.
"""
functionality={}
G_prime_nodes = [n[0] for n in N.G.nodes_iter(data=True) if n[1]['data']['inf_data'].net_id in layers]
G_prime = N.G.subgraph(G_prime_nodes)
N_prime = [n for n in G_prime.nodes_iter(data=True) if n[1]['data']['inf_data'].functionality==0.0]
N_prime_nodes = [n[0] for n in G_prime.nodes_iter(data=True) if n[1]['data']['inf_data'].functionality==0.0]
for t in range(T):
functionality[t]={}
functional_nodes = []
# Generate resoration probabilities and corresponding bernoulli experiments
# for demand-based and deterministic demand-based judgments
# Updates the bernoulli experiments (not the resoration probabilities) for each t
interdepSrc = []
detPriority = []
if judgment_type == 'DEMAND' or judgment_type == 'DET-DEMAND':
priorityList = demand_based_priority_List(N,layers)
if judgment_type == 'DET-DEMAND':
sortedpriorityList = sorted(priorityList.items(),
key=operator.itemgetter(1), reverse=True)
num_layers = 1 #len(layers)
if isinstance(v_r, (int, long)):
resCap = int(v_r/num_layers)
else:
resCap = int(sum(v_r)/num_layers)
for u,v,a in N.G.edges_iter(data=True):
if a['data']['inf_data'].is_interdep and u[1] in layers:
interdepSrc.append(u)
for i in sortedpriorityList:
if (i[0] in N_prime_nodes) and (len(detPriority)<(t+1)*resCap) and (i[0] in interdepSrc):
detPriority.append(i[0])
# Nodes that are judged/known to be functional for t_p<t
for t_p in range(t):
for key in functionality[t_p]:
if functionality[t_p][key]==1.0:
functional_nodes.append(key)
for n,d in G_prime.nodes_iter(data=True):
#print "layers=",layers,"n=",n
if d['data']['inf_data'].net_id in layers:
# Undamged Nodes
if G_prime.has_node(n) and (n,d) not in N_prime:
functionality[t][n]=1.0
# Nodes that are judged/known to be functional for t_p<t
elif n in functional_nodes:
functionality[t][key]=1.0
# Judgments
else:
if judgment_type == "OPTIMISTIC":
functionality[t][n]=1.0
elif judgment_type == "PESSIMISTIC":
functionality[t][n]=0.0
elif judgment_type == "DEMAND":
functionality[t][n]=priorityList[n][1]
elif judgment_type == "DET-DEMAND":
if n in detPriority:
functionality[t][n]=1.0
else:
functionality[t][n]=0.0
elif judgment_type == "RANDOM":
functionality[t][n]=np.random.choice([0, 1], p=[0.5, 0.5])
elif judgment_type == "REALISTIC":
functionality[t][n]=d['data']['inf_data'].functionality
else:
if not n in functionality[t]:
functionality[t][n]=0.0
return functionality
'''
This function generates the prioirt list for the demand-based guess
Here, an agent guesses that the (dependee) node in the
other network is repaired until the next time step with the probability that
is equal to the demand/supply value of the node divided by the maximum value
of demand/supply in the other network
Also, based on the above probability, a guess is generated for the node in the
other network.
The output of this functon is employed by guess_generator()
'''
def demand_based_priority_List(N,layers):
G_prime_nodes = {}
G_prime = {}
com={}
maxValues = {}
prob = {}
for P in layers:
com[P] = [0] #assuming single commodity for all layers
for l in com[P]:
G_prime_nodes[P] = [n[0] for n in N.G.nodes_iter(data=True) if n[1]['data']['inf_data'].net_id==P]
G_prime[P] = N.G.subgraph(G_prime_nodes[P])
maxValues[P,l,'Demand'] = min([n[1]['data']['inf_data'].demand for n in G_prime[P].nodes_iter(data=True)])
maxValues[P,l,'Supply'] = max([n[1]['data']['inf_data'].demand for n in G_prime[P].nodes_iter(data=True)])
for n in G_prime[P].nodes_iter(data=True):
if not n[0] in prob.keys():
p = []
for l in com[P]:
value = n[1]['data']['inf_data'].demand
if value>0:
p.append(value/maxValues[P,l,'Supply'])
elif value<=0:
p.append(value/maxValues[P,l,'Demand'])
else:
print 'Are you kidding me???'
prob[n[0]] = [max(p),np.random.choice([0, 1], p=[1-max(p), max(p)])]
return prob
def auction_resources(v_r,params,layers,T=1,print_cmd=True,judgment_type="OPTIMISTIC",auction_type="MDA",valuation_type='DTC_uniform'):
""" allocate resources based on different types of auction and valuatoin.
:param auction_type: Type of the auction: MDA(Multiunit Descending (First-price, Dutch) auction), MAA(Multiunit Ascending (Second-price, English) auction), MCA(Multiunit Combinatorial auction).
:param valuation_type: Type of the valuation: DTC (Differential Total Cost), DTC_unifrom (DTC with uniform distribution), MDDN (Max Demand Damaged Nodes).
"""
# Initialize failure scenario.
InterdepNet=None
if "N" not in params:
InterdepNet=initialize_network(BASE_DIR="../data/INDP_7-20-2015/",sim_number=params['SIM_NUMBER'],magnitude=params["MAGNITUDE"])
else:
InterdepNet=params["N"]
if "NUM_ITERATIONS" not in params:
params["NUM_ITERATIONS"]=1
num_iterations = params["NUM_ITERATIONS"]
#Compute Valuations
if print_cmd:
print "Compute Valuations (" + valuation_type + ")"
valuation, optimal_valuation = compute_valuations(v_r,InterdepNet,layers=layers,
T=1,print_cmd=print_cmd,judgment_type=judgment_type,
valuation_type=valuation_type)
#Auctioning
if print_cmd:
print "Auction (" + auction_type + ")"
resource_allocation = {P:[] for P in layers}
PoA = {}
PoA['optimal'] = optimal_valuation
PoA['winner'] = []
sum_valuation = 0
if auction_type=="MDA":
cur_valuation = {v+1:{} for v in range(v_r)}
for v in range(v_r):
if print_cmd:
print "Resource-%d"%(v+1),
for P in layers:
cur_valuation[v+1][P]= valuation[P][len(resource_allocation[P])]
winner = max(cur_valuation[v+1].iteritems(), key=operator.itemgetter(1))[0]
PoA['winner'].append(cur_valuation[v+1][winner])
# if cur_valuation[v+1][winner]==0:
# for x in layers:
# if len(resource_allocation[x])==0:
# winner = x
# break
## if print_cmd:
# print "Player %d wins (generously)!" % winner
# sum_valuation += cur_valuation[v+1][winner]
# resource_allocation[winner].append(v+1)
if cur_valuation[v+1][winner]>0:
if print_cmd:
print "Player %d wins!" % winner
sum_valuation += cur_valuation[v+1][winner]
resource_allocation[winner].append(v+1)
else:
if print_cmd:
print "No auction winner!"
if auction_type=="MAA":
all_valuations = []
for p,value in valuation.items():
all_valuations.extend(value)
all_valuations.sort()
Q = {0:v_r*len(layers)}
q = {P:{0:v_r} for P in layers}
p = {0: 0.0}
t = 0
while Q[t]>v_r:
t += 1
p[t] = all_valuations[t-1]
Q[t] = 0.0
for P in layers:
q[P][t] = 0
for i in range(len(valuation[P])):
if valuation[P][i] > p[t]:
q[P][t] += 1
else:
break
Q[t] += q[P][t]
sum_valuation = p[t]*Q[t]
PoA['winner'] = [p[t] for v in range(int(Q[t]))]
if Q[t]<v_r:
for v in range(int(v_r-Q[t])):
PoA['winner'].append(0.0)
if print_cmd:
print "No auction winner for resource %d!" %(Q[t]+v+1)
for P in layers:
resource_allocation[P] = range(1,q[P][t]+1)
if auction_type=="MCA":
m=Model('auction')
m.setParam('OutputFlag',False)
# Add allocation variables and populate objective function.
for P in layers:
for v in range(v_r):
m.addVar(name='y_'+`v+1`+","+`P`,vtype=GRB.BINARY,
obj=sum([-valuation[P][vv] for vv in range(v+1)]))
m.update()
# Add constraints
numAllocatedResources=LinExpr()
for P in layers:
eachBidderAllocation=LinExpr()
for v in range(v_r):
numAllocatedResources+=m.getVarByName('y_'+`v+1`+","+`P`)*(v+1)
eachBidderAllocation+=m.getVarByName('y_'+`v+1`+","+`P`)
m.addConstr(eachBidderAllocation,GRB.LESS_EQUAL,1.0,"Bidder "+`P`+" allocation")
m.addConstr(numAllocatedResources,GRB.LESS_EQUAL,v_r,"Total number of resources")
# print "Solving..."
m.update()
m.optimize()
for P in layers:
for v in range(v_r):
if m.getVarByName('y_'+`v+1`+","+`P`).x==1:
resource_allocation[P] = range(1,v+2)
for vv in range(v+1):
PoA['winner'].append(valuation[P][vv])
sum_valuation = sum(PoA['winner'])
# m.write('model.lp')
# m.write('model.sol')
if sum_valuation!=0:
PoA['poa'] = optimal_valuation/sum_valuation
else:
PoA['poa'] = -10
return resource_allocation,PoA,valuation
def compute_valuations(v_r,InterdepNet,layers,T=1,print_cmd=True,judgment_type="OPTIMISTIC",valuation_type='DTC_uniform'):
""" computes bidders' valuations for different number of resources
:param valuation_type: Type of the valuation: DTC (Differential Total Cost), DTC_unifrom (DTC with uniform distribution), MDDN (Max Demand Damaged Nodes).
"""
""" Calculating current total cost """
currentTotalCost={}
for P in layers:
'''!!! check what v_r must be for det demand JC'''
indp_results = indp(InterdepNet,v_r=0,T=1,layers=layers,
controlled_layers=[P])
currentTotalCost[P] = indp_results[1][0]['costs']['Total']
""" Optimal Valuation """
indp_results = indp(InterdepNet,v_r=0,T=1,layers=layers,
controlled_layers=layers)
optimal_total_cost_current = indp_results[1][0]['costs']['Total']
indp_results = indp(InterdepNet,v_r=v_r,T=1,layers=layers,
controlled_layers=layers)
optimal_total_cost = indp_results[1][0]['costs']['Total']
optimal_valuation = optimal_total_cost_current - optimal_total_cost
valuation={P:[] for P in layers}
if T == 1: # For iterative INDP formulation
for P in layers:
if print_cmd:
print "Bidder-%d"%(P)
if valuation_type=='DTC':
for v in range(v_r):
indp_results={}
negP=[x for x in layers if x != P]
functionality = create_judgment_matrix(InterdepNet,T,negP,v_r,
actions=None,judgment_type=judgment_type)
'''!!! check what v_r must be for det demand JC'''
indp_results = indp(InterdepNet,v_r=v+1,
T=1,layers=layers,controlled_layers=[P],
functionality=functionality,
print_cmd=print_cmd)
newTotalCost = indp_results[1][0]['costs']['Total']
if indp_results[1][0]['actions']!=[]:
valuation[P].append(currentTotalCost[P]-newTotalCost)
currentTotalCost[P] = newTotalCost
else:
valuation[P].append(0.0)
elif valuation_type=='DTC_uniform':
for v in range(v_r):
indp_results={}
totalCostBounds = []
for jt in ["PESSIMISTIC","OPTIMISTIC"]:
negP=[x for x in layers if x != P]
functionality = create_judgment_matrix(InterdepNet,T,negP,v_r,
actions=None,judgment_type=jt)
'''!!! check what v_r must be for det demand JC'''
indp_results = indp(InterdepNet,v_r=v+1,
T=1,layers=layers,controlled_layers=[P],
functionality=functionality,
print_cmd=print_cmd)
totalCostBounds.append(indp_results[1][0]['costs']['Total'])
newTotalCost = np.random.uniform(min(totalCostBounds),
max(totalCostBounds),1)[0]
if indp_results[1][0]['actions']!=[]:
valuation[P].append(currentTotalCost[P]-newTotalCost)
else:
valuation[P].append(0.0)
elif valuation_type=='MDDN':
G_prime_nodes = [n[0] for n in InterdepNet.G.nodes_iter(data=True) if n[1]['data']['inf_data'].net_id==P]
G_prime = InterdepNet.G.subgraph(G_prime_nodes)
dem_damaged_nodes = [abs(n[1]['data']['inf_data'].demand) for n in G_prime.nodes_iter(data=True) if n[1]['data']['inf_data'].repaired==0.0]
dem_damaged_nodes_reverse_sorted = np.sort(dem_damaged_nodes)[::-1]
if len(dem_damaged_nodes)>=v_r:
valuation[P] = dem_damaged_nodes_reverse_sorted[0:v_r].tolist()
if len(dem_damaged_nodes)<v_r:
valuation[P] = dem_damaged_nodes_reverse_sorted[:].tolist()
for vv in range(v_r-len(dem_damaged_nodes)):
valuation[P].append(0.0)
else:
sys.exit( "Wrong valuation type!!!")
return valuation, optimal_valuation
def write_auction_csv(outdir,res_allocate,PoA=None,valuations=None,sample_num=1,suffix=""):
if not os.path.exists(outdir):
os.makedirs(outdir)
auction_file=outdir+"/auctions_"+`sample_num`+"_"+suffix+".csv"
header = "t,"
for key,value in res_allocate[0].items():
header += "P"+`key`+","
header += "PoA,optimal_val,winner_val"
if valuations:
for p,value in valuations[0].items():
header += ",bidder_"+`p`+"_valuation"
with open(auction_file,'w') as f:
f.write(header+"\n")
for t,value in res_allocate.items():
row = `t+1`+","
for p,pvalue in value.items():
row += `len(pvalue)`+','
if valuations:
row += `PoA[t]['poa']`+','+`PoA[t]['optimal']`+','
for pitem in PoA[t]['winner']:
row += `pitem`+"|"
for p,pvalue in valuations[t].items():
row += ','
for pitem in pvalue:
row += `pitem`+"|"
f.write(row+"\n")
def read_resourcec_allocation(df,combinations,optimal_combinations,ref_method='indp',suffix="",root_result_dir='../results/'):
cols=['t','resource','decision_type','auction_type','valuation_type','sample','Magnitude','layer','no_resources','normalized_resources','PoA']
T = max(df.t.unique().tolist())
df_res = pd.DataFrame(columns=cols, dtype=int)
print '\nResource allocation\n',
for idx,x in enumerate(optimal_combinations):
compare_to_dir= root_result_dir+x[4]+'_results_L'+`x[2]`+'_m'+`x[0]`+'_v'+`x[3]`
for t in range(T):
for P in range(1,x[2]+1):
df_res=df_res.append({'t':t+1,'resource':0.0,'normalized_resource':0.0,
'decision_type':x[4],'auction_type':'','valuation_type':'','sample':x[1],
'Magnitude':x[0],'layer':P,'no_resources':x[3],'PoA':1}, ignore_index=True)
# Read optimal resource allocation based on the actions
action_file=compare_to_dir+"/actions_"+`x[1]`+"_"+suffix+".csv"
if os.path.isfile(action_file):
with open(action_file) as f:
lines=f.readlines()[1:]
for line in lines:
data=string.split(str.strip(line),",")
t=int(data[0])
action=str.strip(data[1])
P = int(action[-1])
if '/' in action:
addition = 0.5
else:
addition = 1.0
row = (df_res['t']==t)&(df_res['decision_type']==x[4])&(df_res['sample']==x[1])&(df_res['Magnitude']==x[0])&(df_res['layer']==P)&(df_res['no_resources']==x[3])
df_res.loc[row,'resource']+=addition
df_res.loc[row,'normalized_resource']+=addition/float(x[3])
if idx%(len(combinations+optimal_combinations)/100+1)==0:
update_progress(idx+1,len(optimal_combinations)+len(combinations))
# Read resource allocation based on auction results
for idx,x in enumerate(combinations):
if x[5] in ['Uniform']:
outdir= root_result_dir+x[4]+'_results_L'+`x[2]`+'_m'+`x[0]`+'_v'+`x[3]`+'_uniform_alloc/auctions'
else:
outdir= root_result_dir+x[4]+'_results_L'+`x[2]`+'_m'+`x[0]`+'_v'+`x[3]`+'_auction_'+x[5]+'_'+x[6]+'/auctions'
auction_file=outdir+"/auctions_"+`x[1]`+"_"+suffix+".csv"
if os.path.isfile(auction_file):
with open(auction_file) as f:
lines=f.readlines()[1:]
for line in lines:
data=string.split(str.strip(line),",")
t=int(data[0])
for P in range(1,x[2]+1):
if x[5] in ['Uniform']:
poa = 0.0
else:
poa = float(data[x[2]+1])
df_res=df_res.append({'t':t,'resource':float(data[P]),
'normalized_resource':float(data[P])/float(x[3]),
'decision_type':x[4],'auction_type':x[5],'valuation_type':x[6],'sample':x[1],
'Magnitude':x[0],'layer':P,'no_resources':x[3],'PoA':poa}, ignore_index=True)
if idx%(len(combinations+optimal_combinations)/100+1)==0:
update_progress(len(optimal_combinations)+idx+1,len(optimal_combinations)+len(combinations))
update_progress(len(optimal_combinations)+idx+1,len(optimal_combinations)+len(combinations))
cols=['decision_type','auction_type','valuation_type','sample','Magnitude','layer','no_resources','distance_to_optimal','norm_distance_to_optimal']
T = max(df.t.unique().tolist())
df_res_rel = pd.DataFrame(columns=cols, dtype=int)
print '\nRelative allocation\n',
for idx,x in enumerate(combinations+optimal_combinations):
# Construct vector of resource allocation of reference method
if x[4]!=ref_method:
vector_res_ref = {P:np.zeros(T) for P in range(1,x[2]+1)}
for P in range(1,x[2]+1):
for t in range(T):
vector_res_ref[P][t]= df_res.loc[(df_res['t']==t+1)&
(df_res['decision_type']==ref_method)&
(df_res['sample']==x[1])&(df_res['Magnitude']==x[0])&
(df_res['layer']==P)&(df_res['no_resources']==x[3]),'resource']
# Compute distance of resource allocation vectors
vector_res = {P:np.zeros(T) for P in range(1,x[2]+1)}
for P in range(1,x[2]+1):
row = (df_res['decision_type']==x[4])&(df_res['sample']==x[1])&(df_res['Magnitude']==x[0])&(df_res['layer']==P)&(df_res['no_resources']==x[3])&(df_res['auction_type']==x[5])&(df_res['valuation_type']==x[6])
for t in range(T):
vector_res[P][t] = df_res.loc[(df_res['t']==t+1)&row,'resource']
distance = np.linalg.norm(vector_res[P]-vector_res_ref[P]) #L2 norm
norm_distance = np.linalg.norm(vector_res[P]/float(x[3])-vector_res_ref[P]/float(x[3]))
# distance = sum(abs(vector_res[P]-vector_res_ref[P])) #L1 norm
# distance = 1-scipy.stats.pearsonr(vector_res[P],vector_res_ref[P])[0] # correlation distance
df_res_rel=df_res_rel.append({'decision_type':x[4],'auction_type':x[5],
'valuation_type':x[6],'sample':x[1],'Magnitude':x[0],'layer':P,'no_resources':x[3],
'distance_to_optimal':distance/float(vector_res[P].shape[0]),
'norm_distance_to_optimal':norm_distance/float(vector_res[P].shape[0])}, ignore_index=True)
if idx%(len(combinations+optimal_combinations)/100+1)==0:
update_progress(idx+1,len(combinations+optimal_combinations))
update_progress(idx+1,len(combinations+optimal_combinations))
return df_res,df_res_rel
def write_judgments_csv(outdir,realizations,sample_num=1,agent=1,time=0,suffix=""):
if not os.path.exists(outdir):
os.makedirs(outdir)
judge_file=outdir+'/judge_'+`sample_num`+"_agent"+`agent`+'_time'+`time`+'_'+suffix+".csv"
header = "no.,src node,src layer,src judge,if src corr.,dest Names,dest init. funcs"
with open(judge_file,'w') as f:
f.write(header+"\n")
for t,timeValue in realizations.iteritems():
if timeValue:
for c,Value in timeValue.iteritems():
row = `c`+','+`Value['uName']`+','+`Value['uJudge']`+','+`Value['uCorrected']`+','\
+`Value['vNames']`+','+`Value['vValues']`
f.write(row+'\n')
else:
print '<><><> No judgment by agent '+`agent`+' t:'+`time`+' step:'+`t`
def read_and_aggregate_results(combinations,optimal_combinations,suffixes,root_result_dir='../results/'):
columns = ['t','Magnitude','cost_type','decision_type','auction_type','valuation_type','no_resources','sample','cost','normalized_cost']
optimal_method = ['tdindp','indp','sample_indp_12Node']
agg_results = pd.DataFrame(columns=columns, dtype=int)
print "\nAggregating Results"
joinedlist = combinations + optimal_combinations
for idx,x in enumerate(joinedlist):
if x[4] in optimal_method:
full_suffix = '_L'+`x[2]`+'_m'+`x[0]`+'_v'+`x[3]`
elif x[5]=='Uniform':
full_suffix = '_L'+`x[2]`+'_m'+`x[0]`+'_v'+`x[3]`+'_uniform_alloc'
else:
full_suffix = '_L'+`x[2]`+'_m'+`x[0]`+'_v'+`x[3]`+'_auction_'+x[5]+'_'+x[6]
result_dir = root_result_dir+x[4]+'_results'+full_suffix
if os.path.exists(result_dir):
# Save all results to Pandas dataframe
sample_result = INDPResults()
for suf in suffixes:
if os.path.exists(result_dir+"/costs_" +`x[1]`+"_"+suf+".csv"):
sample_result=sample_result.from_csv(result_dir,x[1],suffix=suf)
intitial_cost = {}
norm_cost = 0
for t in sample_result.results:
for c in sample_result.cost_types:
if t==0:
norm_cost = 1.0
intitial_cost[c] = sample_result[t]['costs'][c]
elif intitial_cost[c]!=0.0:
norm_cost = sample_result[t]['costs'][c]/intitial_cost[c]
else:
norm_cost = -1.0
values = [t,x[0],c,x[4],x[5],x[6],x[3],x[1],
float(sample_result[t]['costs'][c]),norm_cost]
agg_results = agg_results.append(dict(zip(columns,values)), ignore_index=True)
if idx%(len(joinedlist)/100+1)==0:
update_progress(idx+1,len(joinedlist))
else:
sys.exit('Error: The combination or folder does not exist')
update_progress(idx+1,len(joinedlist))
return agg_results
def correct_tdindp_results(df,optimal_combinations):
# correct total cost of td-indp
print '\nCorrecting td-INDP Results\n',
tVector = df['t'].unique().tolist()
for t in tVector:
for idx,x in enumerate(optimal_combinations):
if x[4]=='tdindp':
rows = df[(df['t']==t)&(df['Magnitude']==x[0])&
(df['decision_type']=='tdindp')&(df['no_resources']==x[3])&
(df['sample']==x[1])]
if t!=int(tVector[-1]) and t!=0:
rowsNext = df[(df['t']==t+1)&(df['Magnitude']==x[0])&
(df['decision_type']=='tdindp')&(df['no_resources']==x[3])&
(df['sample']==x[1])]
nodeCost=rows[rows['cost_type']=='Node']['cost'].values
arcCost=rows[rows['cost_type']=='Arc']['cost'].values
flowCost=rowsNext[rowsNext['cost_type']=='Flow']['cost'].values
overSuppCost=rowsNext[rowsNext['cost_type']=='Over Supply']['cost'].values
underSuppCost=rowsNext[rowsNext['cost_type']=='Under Supply']['cost'].values
spacePrepCost=rows[rows['cost_type']=='Space Prep']['cost'].values
totalCost = flowCost+arcCost+nodeCost+overSuppCost+underSuppCost+spacePrepCost
df.loc[(df['t']==t)&(df['Magnitude']==x[0])&(df['decision_type']=='tdindp')&
(df['no_resources']==x[3])&(df['sample']==x[1])&
(df['cost_type']=='Total'),'cost'] = totalCost
initial_cost=df[(df['t']==0)&(df['Magnitude']==x[0])&(df['decision_type']=='tdindp')&
(df['no_resources']==x[3])&(df['sample']==x[1])&
(df['cost_type']=='Total')]['cost'].values
df.loc[(df['t']==t)&(df['Magnitude']==x[0])&(df['decision_type']=='tdindp')&
(df['no_resources']==x[3])&(df['sample']==x[1])&
(df['cost_type']=='Total'),'normalized_cost'] = totalCost/initial_cost
update_progress(t+1,len(tVector))
return df
def relative_performance(df,combinations,optimal_combinations,ref_method='indp',ref_at='',ref_vt='',cost_type='Total'):
columns = ['Magnitude','cost_type','decision_type','auction_type','valuation_type','no_resources','sample','Area','lambda_TC']
lambda_df = pd.DataFrame(columns=columns, dtype=int)
# Computing reference area for lambda
# Check if the method in optimal combination is the reference method #!!!
print '\nRef area calculation\n',
for idx,x in enumerate(optimal_combinations):
if x[4]==ref_method:
rows = df[(df['Magnitude']==x[0])&(df['decision_type']==ref_method)&
(df['sample']==x[1])&(df['cost_type']==cost_type)&
(df['auction_type']==ref_at)&(df['valuation_type']==ref_vt)&
(df['no_resources']==x[3])]
if not rows.empty:
area = np.trapz(rows.cost[:20],dx=1)
values = [x[0],cost_type,x[4],ref_at,ref_vt,x[3],x[1],area,'nan']
lambda_df = lambda_df.append(dict(zip(columns,values)), ignore_index=True)
if idx%(len(optimal_combinations)/100+1)==0:
update_progress(idx+1,len(optimal_combinations))
update_progress(idx+1,len(optimal_combinations))
# Computing areaa and lambda
print '\nLambda calculation\n',
for idx,x in enumerate(combinations+optimal_combinations):
if x[4]!=ref_method:
# Check if reference area exists
cond = ((lambda_df['Magnitude']==x[0])&(lambda_df['decision_type']==ref_method)&
(lambda_df['auction_type']==ref_at)&(lambda_df['valuation_type']==ref_vt)&
(lambda_df['cost_type']==cost_type)&(lambda_df['sample']==x[1])&
(lambda_df['no_resources']==x[3]))
if not cond.any():
sys.exit('Error:Reference type is not here! for %s m %d|resource %d' %(x[4],x[0],x[3]))
ref_area=float(lambda_df.loc[cond==True,'Area'])
rows = df[(df['Magnitude']==x[0])&(df['decision_type']==x[4])&
(df['sample']==x[1])&(df['cost_type']==cost_type)&
(df['auction_type']==x[5])&(df['valuation_type']==x[6])&
(df['no_resources']==x[3])]
if not rows.empty:
area = np.trapz(rows.cost[:20],dx=1)
lambda_TC = 'nan'
if ref_area != 0.0 and area != 'nan':
lambda_TC = (ref_area-float(area))/ref_area
elif area == 0.0:
lambda_TC = 0.0
else:
pass
values = [x[0],cost_type,x[4],x[5],x[6],x[3],x[1],area,lambda_TC]
lambda_df = lambda_df.append(dict(zip(columns,values)), ignore_index=True)
else:
sys.exit('Error: No entry for %s %s %s m %d|resource %d,...' %(x[4],x[5],x[6],x[0],x[3]))
if idx%(len(combinations+optimal_combinations)/100+1)==0:
update_progress(idx+1,len(combinations+optimal_combinations))
update_progress(idx+1,len(combinations+optimal_combinations))
return lambda_df
def generate_combinations(database,mags,sample,layers,no_resources,decision_type,auction_type,valuation_type,listHDadd=None,synthetic_dir=None):
combinations = []
optimal_combinations = []
optimal_method = ['tdindp','indp','sample_indp_12Node']
print '\nCombination Generation\n',
idx=0
no_total = len(mags)*len(sample)
if database=='shelby':
if listHDadd:
listHD = pd.read_csv(listHDadd)
L = len(layers)
for m,s in itertools.product(mags,sample):
if listHDadd==None or len(listHD.loc[(listHD.set == s) & (listHD.sce == m)].index):
for rc in no_resources:
for dt,at,vt in itertools.product(decision_type,auction_type,valuation_type):
if (dt in optimal_method) and not [m,s,L,rc,dt,'',''] in optimal_combinations:
optimal_combinations.append([m,s,L,rc,dt,'',''])
elif (dt not in optimal_method) and (at not in ['Uniform']):
combinations.append([m,s,L,rc,dt,at,vt])
elif (dt not in optimal_method) and (at in ['Uniform']):
combinations.append([m,s,L,rc,dt,at,''])
idx+=1
update_progress(idx,no_total)
elif database=='synthetic':
# Read net configurations
if synthetic_dir==None:
sys.exit('Error: Provide the address of the synthetic databse')
with open(synthetic_dir+'List_of_Configurations.txt') as f:
config_data = pd.read_csv(f, delimiter='\t',header=None)
for m,s in itertools.product(mags,sample):
config_param = config_data.iloc[m]
L = 2 #!!! int(config_param.loc[' No. Layers']) #!!!
no_resources = [int(config_param[5])] #!!! int(config_param.loc[' Resource Cap'])
for rc in no_resources:
for dt,at,vt in itertools.product(decision_type,auction_type,valuation_type):
if (dt in optimal_method) and not [m,s,L,rc,dt,'',''] in optimal_combinations:
optimal_combinations.append([m,s,L,rc,dt,'',''])
elif (dt not in optimal_method) and (at not in ['Uniform']):
combinations.append([m,s,L,rc,dt,at,vt])
elif (dt not in optimal_method) and (at in ['Uniform']):
combinations.append([m,s,L,rc,dt,at,''])
idx+=1
update_progress(idx,no_total)
else:
sys.exit('Error: Wrong database type')
return combinations,optimal_combinations
def update_progress(progress,total):
print '\r[%s] %1.1f%%' % ('#'*int(progress/float(total)*20), (progress/float(total)*100)),
sys.stdout.flush()
|
{"hexsha": "9f9bc12fdecde6bc96b659401d996e95a596ae5a", "size": 47129, "ext": "py", "lang": "Python", "max_stars_repo_path": "codes/Archive/Dindp_old_synthetic_nets.py", "max_stars_repo_name": "htalebiyan/Dec2py", "max_stars_repo_head_hexsha": "8c4181eb92d6e52aef8cc804c485865516cee200", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "codes/Archive/Dindp_old_synthetic_nets.py", "max_issues_repo_name": "htalebiyan/Dec2py", "max_issues_repo_head_hexsha": "8c4181eb92d6e52aef8cc804c485865516cee200", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codes/Archive/Dindp_old_synthetic_nets.py", "max_forks_repo_name": "htalebiyan/Dec2py", "max_forks_repo_head_hexsha": "8c4181eb92d6e52aef8cc804c485865516cee200", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.1915836102, "max_line_length": 222, "alphanum_fraction": 0.5502132445, "include": true, "reason": "import numpy,import scipy,import networkx", "num_tokens": 11248}
|
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator, clone
from sklearn.linear_model import LogisticRegression
from mne.parallel import parallel_func
from nose.tools import assert_true
class _BaseEstimator(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def baseline(X, mode, tslice):
if X.shape[-1] > 0:
mean = np.mean(X[..., tslice], axis=-1)[..., None]
else:
mean = 0 # otherwise we get an ugly nan
if mode == 'mean':
X -= mean
if mode == 'logratio':
X /= mean
X = np.log10(X) # a value of 1 means 10 times bigger
if mode == 'ratio':
X /= mean
elif mode == 'zscore':
std = np.std(X[..., tslice], axis=-1)[..., None]
X -= mean
X /= std
elif mode == 'percent':
X -= mean
X /= mean
elif mode == 'zlogratio':
X /= mean
X = np.log10(X)
std = np.std(X[..., tslice], axis=-1)[..., None]
X /= std
return X
class EpochsBaseliner(_BaseEstimator):
def __init__(self, tslice=None, mode='mean'):
self.mode = mode
self.tslice = slice(None) if tslice is None else tslice
assert_true(self.mode in ['mean', 'logratio', 'ratio', 'zscore',
'percent', 'zlogratio'])
assert_true(isinstance(self.tslice, (slice, int)))
def transform(self, X):
return baseline(X, self.mode, self.tslice)
class TimeFreqBaseliner(_BaseEstimator):
def __init__(self, tslice=None, mode='mean'):
self.mode = mode
self.tslice = slice(None) if tslice is None else tslice
assert_true(self.mode in ['mean', 'logratio', 'ratio', 'zscore',
'percent', 'zlogratio'])
def transform(self, X):
return baseline(X, self.mode, self.tslice)
class TimePadder(_BaseEstimator):
"""Padd time before and after epochs"""
def __init__(self, n_sample, value=0.):
self.n_sample = n_sample
assert_true(isinstance(self.n_sample, int))
self.value = value
assert_true(isinstance(value, (int, float)) or (value == 'median'))
def transform(self, X):
if self.value == 'median':
coefs = np.median(X, axis=2)
else:
coefs = self.value * np.ones(X.shape[:2])
coefs = np.tile(coefs, [self.n_sample, 1, 1]).transpose([1, 2, 0])
X = np.concatenate((coefs, X, coefs), axis=2)
return X
def inverse_transform(self, X):
X = X[:, :, self.n_sample:-self.n_sample]
return X
class TimeSelector(_BaseEstimator):
"""Padd time before and after epochs"""
def __init__(self, tslice):
self.tslice = tslice
assert_true(isinstance(self.tslice, (slice, int)))
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X):
X = X[:, :, self.tslice]
return X
class TimeFreqSelector(_BaseEstimator):
"""Padd time before and after epochs"""
def __init__(self, tslice=None, fslice=None):
self.tslice = slice(None) if tslice is None else tslice
self.fslice = slice(None) if fslice is None else fslice
assert_true(isinstance(self.tslice, (slice, int)))
assert_true(isinstance(self.fslice, (slice, int)))
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X):
X = X[:, :, :, self.tslice]
X = X[:, :, self.fslice, :]
return X
class MyXDawn(_BaseEstimator):
"""Wrapper for pyriemann Xdawn + robust.
Will eventually need to clean both MNE and pyriemann with refactorings"""
def __init__(self, n_filter=4, estimator='scm'):
from pyriemann.estimation import Xdawn
self.n_filter = n_filter
assert_true(isinstance(self.n_filter, int))
self.estimator = estimator
assert_true(isinstance(estimator, str))
self._xdawn = Xdawn(nfilter=n_filter, estimator=estimator)
def fit(self, X, y):
# only apply on channels who std > 0 across time on at least one trial
self.picks_ = np.where(np.mean(np.std(X, axis=2) ** 2, axis=0))[0]
self._xdawn.fit(X[:, self.picks_, :], y)
return self
def transform(self, X):
return self._xdawn.transform(X[:, self.picks_, :])
def fit_transform(self, X, y=None):
self.fit(X, y)
return self.transform(X)
class SpatialFilter(_BaseEstimator):
def __init__(self, estimator):
self.estimator = estimator
assert_true(isinstance(estimator, TransformerMixin))
def fit(self, X, y=None):
n_epoch, n_chan, n_time = X.shape
# trial as time
X = np.transpose(X, [1, 0, 2]).reshape([n_chan, n_epoch * n_time]).T
self.estimator.fit(X)
return self
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X):
n_epoch, n_chan, n_time = X.shape
# trial as time
X = np.transpose(X, [1, 0, 2]).reshape([n_chan, n_epoch * n_time]).T
X = self.estimator.transform(X)
X = np.reshape(X.T, [-1, n_epoch, n_time]).transpose([1, 0, 2])
return X
class Reshaper(_BaseEstimator):
"""Transpose, concatenate and/or reshape data.
Parameters
----------
concatenate : int | None
Reshaping feature dimension e.g. np.concatenate(X, axis=concatenate).
Defaults to None.
transpose : array of int, shape(1 + n_dims) | None
Reshaping feature dimension e.g. X.transpose(transpose).
Defaults to None.
reshape : array, shape(n_dims) | None
Reshaping feature dimension e.g. X.reshape(np.r_[len(X), shape]).
Defaults to -1 if concatenate or transpose is None, else defaults
to None.
"""
def __init__(self, reshape=None, transpose=None, concatenate=None,
verbose=False):
if (reshape is None) and (transpose is None) and (concatenate is None):
reshape = [-1]
self.reshape = reshape
self.transpose = transpose
self.concatenate = concatenate
self.verbose = verbose
def fit(self, X, y=None):
self.shape_ = X.shape[1:]
return self
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def transform(self, X, y=None):
if self.transpose is not None:
X = X.transpose(self.transpose)
if self.concatenate:
X = np.concatenate(X, self.concatenate)
if self.reshape is not None:
X = np.reshape(X, np.hstack((X.shape[0], self.reshape)))
if self.verbose:
print(self.shape_, '->', (X.shape[1:]))
return X
class LightTimeDecoding(_BaseEstimator):
def __init__(self, estimator=None, method='predict', n_jobs=1):
self.estimator = (LogisticRegression() if estimator is None
else estimator)
self.method = method
assert_true(self.method in ['predict', 'predict_proba'])
assert_true(hasattr(self.estimator, method))
self.n_jobs = n_jobs
assert_true(isinstance(self.n_jobs, int))
def fit_transform(self, X, y):
return self.fit(X, y).transform(X)
def fit(self, X, y):
self.estimators_ = list()
parallel, p_func, n_jobs = parallel_func(_fit, self.n_jobs)
estimators = parallel(
p_func(self.estimator, split, y)
for split in np.array_split(X, n_jobs, axis=2))
self.estimators_ = np.concatenate(estimators, 0)
return self
def transform(self, X):
parallel, p_func, n_jobs = parallel_func(_predict_decod, self.n_jobs)
X_splits = np.array_split(X, n_jobs, axis=2)
est_splits = np.array_split(self.estimators_, n_jobs)
y_pred = parallel(
p_func(est_split, x_split, self.method)
for (est_split, x_split) in zip(est_splits, X_splits))
if n_jobs > 1:
y_pred = np.concatenate(y_pred, axis=1)
else:
y_pred = y_pred[0]
return y_pred
def predict(self, X):
return self.transform(X)
def predict_proba(self, X):
return self.transform(X)
def _fit(estimator, X, y):
estimators_ = list()
for ii in range(X.shape[2]):
est = clone(estimator)
est.fit(X[:, :, ii], y)
estimators_.append(est)
return estimators_
def _predict_decod(estimators, X, method):
n_sample, n_chan, n_time = X.shape
y_pred = np.array((n_sample, n_time))
for ii, est in enumerate(estimators):
if method == 'predict':
_y_pred = est.predict(X[:, :, ii])
elif method == 'predict_proba':
_y_pred = est.predict_proba(X[:, :, ii])
# init
if ii == 0:
y_pred = _init_pred(_y_pred, X)
y_pred[:, ii, ...] = _y_pred
return y_pred
def _init_pred(y_pred, X):
n_sample, n_chan, n_time = X.shape
if y_pred.ndim == 2:
y_pred = np.zeros((n_sample, n_time, y_pred.shape[-1]))
else:
y_pred = np.zeros((n_sample, n_time))
return y_pred
class LightGAT(LightTimeDecoding):
def transform(self, X):
parallel, p_func, n_jobs = parallel_func(_predict_gat, self.n_jobs)
y_pred = parallel(
p_func(self.estimators_, x_split, self.method)
for x_split in np.array_split(X, n_jobs, axis=2))
y_pred = np.concatenate(y_pred, axis=2)
return y_pred
def _predict_gat(estimators, X, method):
n_sample, n_chan, n_time = X.shape
for ii, est in enumerate(estimators):
X_stack = np.transpose(X, [1, 0, 2])
X_stack = np.reshape(X_stack, [n_chan, n_sample * n_time]).T
if method == 'predict':
_y_pred = est.predict(X_stack)
_y_pred = np.reshape(_y_pred, [n_sample, n_time])
elif method == 'predict_proba':
_y_pred = est.predict_proba(X_stack)
n_dim = _y_pred.shape[-1]
_y_pred = np.reshape(_y_pred, [n_sample, n_time, n_dim])
# init
if ii == 0:
y_pred = _init_pred_gat(_y_pred, X, len(estimators))
y_pred[:, ii, ...] = _y_pred
return y_pred
def _init_pred_gat(y_pred, X, n_train):
n_sample, n_chan, n_time = X.shape
if y_pred.ndim == 3:
y_pred = np.zeros((n_sample, n_train, n_time, y_pred.shape[-1]))
else:
y_pred = np.zeros((n_sample, n_train, n_time))
return y_pred
class CustomEnsemble(TransformerMixin):
def __init__(self, estimators, method='predict'):
self.estimators = estimators
self.method = method
assert_true(method in ['predict', 'predict_proba'])
def fit(self, X, y=None):
for estimator in self.estimators:
estimator.fit(X, y)
return self
def fit_transform(self, X, y=None):
self.fit(X, y)
return self.transform(X)
def transform(self, X):
all_Xt = list()
for estimator in self.estimators:
if self.method == 'predict':
Xt = estimator.predict(X)
elif self.method == 'predict_proba':
Xt = estimator.predict_proba(X)
all_Xt.append(Xt)
all_Xt = np.c_[all_Xt].T
return all_Xt
def get_params(self, deep=True):
return dict(estimators=self.estimators, method=self.method)
class GenericTransformer(_BaseEstimator):
def __init__(self, function, **fit_params):
self.function = function
self.fit_params = fit_params
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return self.function(X, **self.fit_params)
def fit_transform(self, X, y=None):
return self.transform(X, y)
class TimeEmbedder(_BaseEstimator):
def __init__(self, delays=2):
self.delays = delays
def transform(self, X, y=None):
if not isinstance(X, np.ndarray):
epochs = X
X = epochs._data
if isinstance(self.delays, int):
delays = range(1, self.delays)
else:
delays = self.delays
X2 = []
for x in X:
tmp = x
for d in delays:
tmp = np.r_[tmp, np.roll(x, d, axis=-1)]
X2.append(tmp)
X2 = np.array(X2)
return X2
def fit_transform(self, X, y=None):
return self.fit(X).transform(X, y)
class Windower(TransformerMixin, BaseEstimator):
"""To make sliding windows
Parameters
----------
size : int
The window size.
step : int
The window step.
vectorize : bool
Returns arrays or vector.
"""
def __init__(self, size=1, step=1, vectorize=False):
self.size = size
self.step = step
self.vectorize = vectorize
def fit(self, X, y=None):
"""Does nothing, for sklearn compatibility purposes
Parameters
----------
X : ndarray, shape(n_epochs, n_times, n_features)
The target data.
y : None | array, shape(n_epochs,)
Returns
-------
self : self
"""
if X.ndim != 3:
raise ValueError('expects 3D array')
return self
def transform(self, X, y=None):
"""Generate windows from X.
Parameters
----------
X : ndarray, shape(n_epochs, n_times, n_features)
The target data.
y : None | array, shape(n_epochs,)
Returns
-------
Xt : ndarray, shape(n_epochs, n_features, n_window_times, n_windows)
The transformed data. If vectorize is True, then shape is
(n_epochs, -1).
"""
Xt = list()
for time in range(0, X.shape[2] - self.size, self.step):
Xt.append(X[:, :, time:(time + self.size)])
Xt = np.transpose(Xt, [1, 2, 3, 0]) # trial chan window time
if self.vectorize:
Xt = Xt.reshape([len(Xt), -1, Xt.shape[-1]])
return Xt
def fit_transform(self, X, y=None):
"""Generate windows from X.
Parameters
----------
X : ndarray, shape(n_epochs, n_times, n_features)
The target data.
y : None | array, shape(n_epochs,)
Returns
-------
Xt : ndarray, shape(n_epochs, n_features, n_window_times, n_windows)
The transformed data. If vectorize is True, then shape is
(n_epochs, -1).
"""
return self.fit(X).transform(X)
def test_windower():
Windower(3, 2, False).transform(np.zeros((2, 30, 100))).shape
|
{"hexsha": "b47764e575cf6c12e504f8ca0e52b1b67a83a41d", "size": 14857, "ext": "py", "lang": "Python", "max_stars_repo_path": "jr/gat/transformers.py", "max_stars_repo_name": "kingjr/jr-tools", "max_stars_repo_head_hexsha": "8a4c9c42a9e36e224279566945e798869904c4c8", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-01-21T22:41:28.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-07T12:55:18.000Z", "max_issues_repo_path": "jr/gat/transformers.py", "max_issues_repo_name": "kingjr/jr-tools", "max_issues_repo_head_hexsha": "8a4c9c42a9e36e224279566945e798869904c4c8", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-12-12T14:25:47.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-07T18:57:42.000Z", "max_forks_repo_path": "jr/gat/transformers.py", "max_forks_repo_name": "kingjr/jr-tools", "max_forks_repo_head_hexsha": "8a4c9c42a9e36e224279566945e798869904c4c8", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2016-03-15T17:34:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-15T00:31:14.000Z", "avg_line_length": 30.7598343685, "max_line_length": 79, "alphanum_fraction": 0.5807363532, "include": true, "reason": "import numpy", "num_tokens": 3736}
|
{-# OPTIONS --universe-polymorphism #-}
module Categories.Free where
open import Categories.Category
open import Categories.Free.Core
open import Categories.Free.Functor
open import Categories.Graphs.Underlying
open import Categories.Functor
using (Functor)
open import Graphs.Graph
open import Graphs.GraphMorphism
using (GraphMorphism; module GraphMorphism)
open import Data.Star
-- Exports from other modules:
-- Free₀, Free₁ and Free
open Categories.Free.Core public
using (Free₀)
open Categories.Free.Functor public
using (Free)
-- TODO:
-- Prove Free⊣Underlying : Adjunction Free Underlying
-- Define Adjunction.left and Adjunction.right as conveniences
-- (or whatever other names make sense for the hom-set maps
-- C [ F _ , _ ] → D [ _ , G _ ] and inverse, respectively)
-- Let Cata = Adjunction.left Free⊣Underlying
Cata : ∀{o₁ ℓ₁ e₁}{G : Graph o₁ ℓ₁ e₁}
{o₂ ℓ₂ e₂}{C : Category o₂ ℓ₂ e₂}
→ (F : GraphMorphism G (Underlying₀ C))
→ Functor (Free₀ G) C
Cata {G = G} {C = C} F = record
{ F₀ = F₀
; F₁ = F₁*
; identity = refl
; homomorphism = λ{X}{Y}{Z}{f}{g} → homomorphism {X}{Y}{Z}{f}{g}
; F-resp-≡ = F₁*-resp-≡
}
where
open Category C
open GraphMorphism F
open Equiv
open HomReasoning
open PathEquality using (ε-cong; _◅-cong_)
F₁* : ∀ {A B} → Free₀ G [ A , B ] → C [ F₀ A , F₀ B ]
F₁* ε = id
F₁* (f ◅ fs) = F₁* fs ∘ F₁ f
.homomorphism : ∀ {X Y Z} {f : Free₀ G [ X , Y ]} {g : Free₀ G [ Y , Z ]}
→ C [ F₁* (Free₀ G [ g ∘ f ]) ≡ C [ F₁* g ∘ F₁* f ] ]
homomorphism {f = ε} = sym identityʳ
homomorphism {f = f ◅ fs}{gs} =
begin
F₁* (fs ◅◅ gs) ∘ F₁ f
↓⟨ homomorphism {f = fs}{gs} ⟩∘⟨ refl ⟩
(F₁* gs ∘ F₁* fs) ∘ F₁ f
↓⟨ assoc ⟩
F₁* gs ∘ F₁* fs ∘ F₁ f
∎
.F₁*-resp-≡ : ∀ {A B} {f g : Free₀ G [ A , B ]} → Free₀ G [ f ≡ g ] → C [ F₁* f ≡ F₁* g ]
F₁*-resp-≡ {f = ε}{.ε} ε-cong = refl
F₁*-resp-≡ {f = f ◅ fs}{g ◅ gs} (f≈g ◅-cong fs≈gs) =
begin
F₁* fs ∘ F₁ f
↓⟨ F₁*-resp-≡ fs≈gs ⟩∘⟨ F-resp-≈ f≈g ⟩
F₁* gs ∘ F₁ g
∎
F₁*-resp-≡ {f = f ◅ fs}{ε} ()
|
{"hexsha": "acd1abd5288a6e060bb1dd458367fb0ca822a1ba", "size": 2205, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Categories/Free.agda", "max_stars_repo_name": "copumpkin/categories", "max_stars_repo_head_hexsha": "36f4181d751e2ecb54db219911d8c69afe8ba892", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 98, "max_stars_repo_stars_event_min_datetime": "2015-04-15T14:57:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T05:20:36.000Z", "max_issues_repo_path": "Categories/Free.agda", "max_issues_repo_name": "p-pavel/categories", "max_issues_repo_head_hexsha": "e41aef56324a9f1f8cf3cd30b2db2f73e01066f2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2015-05-23T06:47:10.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-09T16:31:40.000Z", "max_forks_repo_path": "Categories/Free.agda", "max_forks_repo_name": "p-pavel/categories", "max_forks_repo_head_hexsha": "e41aef56324a9f1f8cf3cd30b2db2f73e01066f2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2015-02-05T13:03:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-11T13:50:56.000Z", "avg_line_length": 30.625, "max_line_length": 93, "alphanum_fraction": 0.5523809524, "num_tokens": 870}
|
//from: "c:\cpp\boost_1_68_0\boost/spirit/home/x3\support\traits\attribute_of.hpp"
/*=============================================================================
Copyright (c) 2001-2014 Joel de Guzman
Copyright (c) 2013 Agustin Berge
http://spirit.sourceforge.net/
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#if !defined(BOOST_SPIRIT_X3_ATTRIBUTE_OF_JAN_7_2012_0914AM)
#define BOOST_SPIRIT_X3_ATTRIBUTE_OF_JAN_7_2012_0914AM
#include "../../support/utility/sfinae.hpp"
#include <boost/mpl/identity.hpp>
#include <boost/utility/enable_if.hpp>
namespace boost { namespace spirit { namespace x3 { namespace traits
{
///////////////////////////////////////////////////////////////////////////
// Get the attribute type of a component. By default, this gets the
// Component's attribute_type typedef or instantiates a nested attribute
// metafunction. Components may specialize this if such an attribute_type
// is not readily available (e.g. expensive to compute at compile time).
///////////////////////////////////////////////////////////////////////////
template <typename Component, typename Context, typename Enable = void>
struct attribute_of;
namespace detail
{
template <typename Component, typename Context, typename Enable = void>
struct default_attribute_of;
template <typename Component, typename Context>
struct default_attribute_of<Component, Context,
typename disable_if_substitution_failure<
typename Component::attribute_type>::type>
: mpl::identity<typename Component::attribute_type> {};
template <typename Component, typename Context>
struct default_attribute_of<Component, Context,
typename disable_if_substitution_failure<
typename Component::template attribute<Context>::type>::type>
: Component::template attribute<Context> {};
template <typename Component, typename Context>
struct default_attribute_of<Component, Context,
typename enable_if_c<Component::is_pass_through_unary>::type>
: attribute_of<typename Component::subject_type, Context>{};
}
template <typename Component, typename Context, typename Enable>
struct attribute_of : detail::default_attribute_of<Component, Context> {};
}}}}
#endif
|
{"hexsha": "4165dada8ff1a84e318c9e89bcc71dfe1f1bc653", "size": 2583, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "x3/support/traits/attribute_of.hpp", "max_stars_repo_name": "lakeweb/bxlreader", "max_stars_repo_head_hexsha": "9b99d79f1bac3747a9e3bb51d0ffd0004ef32f73", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "x3/support/traits/attribute_of.hpp", "max_issues_repo_name": "lakeweb/bxlreader", "max_issues_repo_head_hexsha": "9b99d79f1bac3747a9e3bb51d0ffd0004ef32f73", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2018-04-24T11:46:48.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-25T15:11:30.000Z", "max_forks_repo_path": "x3/support/traits/attribute_of.hpp", "max_forks_repo_name": "lakeweb/BXLReader", "max_forks_repo_head_hexsha": "9b99d79f1bac3747a9e3bb51d0ffd0004ef32f73", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-06-21T16:17:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-04T13:41:01.000Z", "avg_line_length": 43.7796610169, "max_line_length": 82, "alphanum_fraction": 0.6260162602, "num_tokens": 496}
|
import numpy as np
import matplotlib.pyplot as plt
ar = 0.9
br = 0.04
cr = -2
dr = 1
au = 0.1
bu = 0.02
cu = 1
du = 1
av = 0.05
bv = -0.1
cv = 0.7
dv = 1.3
#av = 0.05
#bv = 0.1
#cv = 0.7
#dv = 1.3
#av = 0.1
#bv = 0.02
#cv = 1
#dv = 1
ap = 1
bp = 0.05
cp = 2
dp = -1
def rho_a(x, y):
return ar + br*np.sin(cr*x + dr*y)
def u_a(x, y):
return au + bu*np.cos(cu*x + du*y)
def v_a(x, y):
return av + bv*np.cos(cv*x + dv*y)
def p_a(x, y):
return ap + bp*np.sin(cp*x + dp*y)
def rE_a(x, y):
return p_a(x, y)/(1.4 - 1) + 0.5*rho_a(x, y)*(u_a(x, y)**2 + v_a(x, y)**2)
def E_a(x, y):
return p_a(x, y)/(rho_a(x, y)*(1.4 - 1)) + 0.5*(u_a(x, y)**2 + v_a(x, y)**2)
#-----------------------------------------------------------
def getField(U, field):
r = U[:,0]
ru = U[:,1]
rv = U[:,2]
rE = U[:,3]
g = 1.4
s = field.lower()
if (s == 'mach'):
V = np.sqrt(ru**2 + rv**2)/r
p = (g-1.)*(rE-0.5*r*V**2)
c = np.sqrt(g*p/r)
return V/c
elif (s == 'pressure'):
V = np.sqrt(ru**2 + rv**2)/r
p = (g-1.)*(rE-0.5*r*V**2)
return p
elif (s == 'density'):
return r
elif (s == 'xmomentum'):
return ru
elif (s == 'ymomentum'):
return rv
elif (s == 'energy'):
return rE/r
elif (s == 'renergy'):
return rE
elif (s == 'xvelocity'):
return ru/r
elif (s == 'yvelocity'):
return rv/r
#-----------------------------------------------------------
def plotstate(Mesh, U, field, fname):
V = Mesh['V']
E = Mesh['E']
BE = Mesh['BE']
f = plt.figure(figsize=(12,6))
F = getField(U, field)
plt.tripcolor(V[:,0], V[:,1], triangles=E, facecolors=F, shading='flat')
for i in range(len(BE)):
x = [V[BE[i,0],0], V[BE[i,1],0]]
y = [V[BE[i,0],1], V[BE[i,1],1]]
plt.plot(x, y, '-', linewidth=2, color='black')
dosave = (len(fname) != 0)
plt.axis('equal')
plt.axis([-100, 100,-100, 100])
#plt.axis([-2, 10,-4, 4])
plt.colorbar()
#plt.clim(0, 0.7)
#plt.clim(9, 12)
plt.title(field, fontsize=16)
f.tight_layout()
plt.show(block=(not dosave))
if (dosave):
plt.savefig(fname)
plt.close(f)
|
{"hexsha": "3a433e25a62c519722f75d8d7a91cd620f35bb8f", "size": 2033, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/python/PlotUtils.py", "max_stars_repo_name": "Rob-Rau/EbbCFD", "max_stars_repo_head_hexsha": "093a562920039754f6f59c0966b4820329e6ad38", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-16T01:43:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-16T01:43:24.000Z", "max_issues_repo_path": "scripts/python/PlotUtils.py", "max_issues_repo_name": "Rob-Rau/EbbCFD", "max_issues_repo_head_hexsha": "093a562920039754f6f59c0966b4820329e6ad38", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-04-19T17:24:35.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-23T16:52:02.000Z", "max_forks_repo_path": "scripts/python/PlotUtils.py", "max_forks_repo_name": "Rob-Rau/EbbCFD", "max_forks_repo_head_hexsha": "093a562920039754f6f59c0966b4820329e6ad38", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.0840336134, "max_line_length": 77, "alphanum_fraction": 0.4953271028, "include": true, "reason": "import numpy", "num_tokens": 833}
|
import os
import numpy as np
import tensorflow as tf
import scipy.io as sio
from open3d import *
import random
from tf_ops.nn_distance import tf_nndistance
import time
import pdb
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
# view num
#view_num = 33
view_num = 40
# path
data_type = 'output'
NBV_file= "/home/cuda/Alex/trai/PC-NBV/data/Data_external/NBV_data/shapenet_33_views_640x480/"
save_dir = "/home/cuda/Alex/trai/PC-NBV/data/Data_external/NBV_data/shapenet_33_views_640x480/"+ data_type +'/'
save_type=data_type+'/'
save_dir_final= NBV_file+save_type
if not os.path.exists(save_dir_final):
os.makedirs(save_dir_final)
# for calculating surface coverage and register
model_list = os.listdir(os.path.join(save_dir))
for model in model_list:
if not os.path.exists(os.path.join(save_dir_final, model)):
os.makedirs(os.path.join(save_dir_final, model))
for i in range(1):
if not os.path.exists(os.path.join(save_dir_final, model,str(i))):
os.makedirs(os.path.join(save_dir_final, model,str(i)))
score_path = os.path.join(save_dir,model, str(i) , "0_target_value.npy")
state_path = os.path.join(save_dir,model, str(i) , "0_viewstate.npy")
Viewstate=np.load(state_path, mmap_mode='r')
pos=0
for j in range(view_num):
if(Viewstate[j]==1):
pos=j
#print(pos)
View_state_init_permuted = np.zeros(view_num, dtype=np.int)
View_state_init_permuted[0]=1
View_scores = np.load(score_path, mmap_mode='r')
View_scores_permuted = (np.roll(View_scores, -pos))
np.save(os.path.join(save_dir_final, model,str(i),"0_target_value_permuted.npy"),View_scores_permuted)
np.save(os.path.join(save_dir_final, model,str(i),"0_viewstate_permuted.npy"),View_state_init_permuted)
|
{"hexsha": "ea232457a01b2ddd7e03372a5998c06f8bc862d4", "size": 2196, "ext": "py", "lang": "Python", "max_stars_repo_path": "PC-NBV/Permutation_Outputs_synthetic_40.py", "max_stars_repo_name": "tamaslevente/trai", "max_stars_repo_head_hexsha": "4bf68463b941f305d9b25a9374b6c2a2d51a8046", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PC-NBV/Permutation_Outputs_synthetic_40.py", "max_issues_repo_name": "tamaslevente/trai", "max_issues_repo_head_hexsha": "4bf68463b941f305d9b25a9374b6c2a2d51a8046", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PC-NBV/Permutation_Outputs_synthetic_40.py", "max_forks_repo_name": "tamaslevente/trai", "max_forks_repo_head_hexsha": "4bf68463b941f305d9b25a9374b6c2a2d51a8046", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7974683544, "max_line_length": 123, "alphanum_fraction": 0.5969945355, "include": true, "reason": "import numpy,import scipy", "num_tokens": 507}
|
import sys
import time
import numpy as np
import readers.utils as utils
from readers.Mention import Mention
from readers.config import Config
from readers.vocabloader import VocabLoader
import ccg_nlpy
from ccg_nlpy.core.text_annotation import TextAnnotation
start_word = "<s>"
end_word = "<eos>"
# Reader for Text Annotations
class TextAnnoTestReader(object):
def __init__(
self,
config,
vocabloader,
num_cands,
batch_size,
strict_context=True,
pretrain_wordembed=True,
coherence=True,
nerviewname="NER_CONLL",
):
self.typeOfReader = "inference"
self.start_word = start_word
self.end_word = end_word
self.unk_word = "unk" # In tune with word2vec
self.unk_wid = "<unk_wid>"
self.tr_sup = "tr_sup"
self.tr_unsup = "tr_unsup"
self.pretrain_wordembed = pretrain_wordembed
self.coherence = coherence
self.nerviewname = nerviewname
# Word Vocab
(self.word2idx, self.idx2word) = vocabloader.getGloveWordVocab()
self.num_words = len(self.idx2word)
# Label Vocab
(self.label2idx, self.idx2label) = vocabloader.getLabelVocab()
self.num_labels = len(self.idx2label)
# Known WID Vocab
(self.knwid2idx, self.idx2knwid) = vocabloader.getKnwnWidVocab()
self.num_knwn_entities = len(self.idx2knwid)
# Wid2Wikititle Map
self.wid2WikiTitle = vocabloader.getWID2Wikititle()
# Coherence String Vocab
print("Loading Coherence Strings Dicts ... ")
(self.cohG92idx, self.idx2cohG9) = utils.load(config.cohstringG9_vocab_pkl)
self.num_cohstr = len(self.idx2cohG9)
# Crosswikis
print("Loading Crosswikis dict. (takes ~2 mins to load)")
self.crosswikis = utils.load(config.crosswikis_pruned_pkl)
print("Crosswikis loaded. Size: {}".format(len(self.crosswikis)))
if self.pretrain_wordembed:
stime = time.time()
self.word2vec = vocabloader.loadGloveVectors()
print("[#] Glove Vectors loaded!")
ttime = (time.time() - stime) / float(60)
# print("[#] Test Mentions File : {}".format(test_mens_file))
# print("[#] Loading test file and preprocessing ... ")
# with open(test_mens_file, 'r') as f:
# tajsonstr = f.read()
# ta = TextAnnotation(json_str=tajsonstr)
#
# (sentences_tokenized, modified_ner_cons_list) = self.processTestDoc(ta)
#
# self.mention_lines = self.convertSent2NerToMentionLines(
# sentences_tokenized, modified_ner_cons_list)
#
# self.mentions = []
# for line in self.mention_lines:
# m = Mention(line)
# self.mentions.append(m)
self.men_idx = 0
# self.num_mens = len(self.mentions)
self.epochs = 0
# print( "[#] Test Mentions : {}".format(self.num_mens))
self.batch_size = batch_size
print("[#] Batch Size: %d" % self.batch_size)
self.num_cands = num_cands
self.strict_context = strict_context
print("\n[#]LOADING COMPLETE")
# ******************* END __init__ *********************************
def new_test_file(self, test_mens_file):
self.test_mens_file = test_mens_file
with open(test_mens_file, "r") as f:
tajsonstr = f.read()
ta = TextAnnotation(json_str=tajsonstr)
self.textanno = ta
(sentences_tokenized, modified_ner_cons_list) = self.processTestDoc(ta)
self.mention_lines = self.convertSent2NerToMentionLines(
sentences_tokenized, modified_ner_cons_list
)
self.mentions = []
for line in self.mention_lines:
m = Mention(line)
self.mentions.append(m)
self.men_idx = 0
self.num_mens = len(self.mentions)
self.epochs = 0
def new_tajsonstr(self, tajsonstr):
""" tajsonstr is a json str of a TA """
ta = TextAnnotation(json_str=tajsonstr)
self.new_ta(ta)
def new_ta(self, ta):
self.textanno = ta
(sentences_tokenized, modified_ner_cons_list) = self.processTestDoc(ta)
self.mention_lines = self.convertSent2NerToMentionLines(
sentences_tokenized, modified_ner_cons_list
)
self.mentions = []
for line in self.mention_lines:
m = Mention(line)
self.mentions.append(m)
self.men_idx = 0
self.num_mens = len(self.mentions)
self.epochs = 0
def get_vector(self, word):
if word in self.word2vec:
return self.word2vec[word]
else:
return self.word2vec["unk"]
def reset_test(self):
self.men_idx = 0
self.epochs = 0
def processTestDoc(self, ccgdoc):
doc_tokens = ccgdoc.get_tokens
# sent_end_token_indices : contains index for the starting of the
# next sentence.
sent_end_token_indices = ccgdoc.get_sentence_end_token_indices
# List of tokenized sentences
sentences_tokenized = []
for i in range(0, len(sent_end_token_indices)):
start = sent_end_token_indices[i - 1] if i != 0 else 0
end = sent_end_token_indices[i]
sent_tokens = doc_tokens[start:end]
sentences_tokenized.append(sent_tokens)
# List of ner dicts from ccg pipeline
ner_cons_list = []
try:
ner_cons_list = ccgdoc.get_view(self.nerviewname).cons_list
if ner_cons_list is None:
ner_cons_list = []
except:
print("NO NAMED ENTITIES IN THE DOC. EXITING")
modified_ner_cons_list = []
for orig_ner in ner_cons_list:
ner = orig_ner.copy()
# ner['end'] = ner['end'] + 1
# ner['tokens'] = ' '.join(doc_tokens[ner['start']:ner['end']])
found = False
# idx = sentIdx, j = sentEndTokenIdx
for idx, j in enumerate(sent_end_token_indices):
sent_start_token = sent_end_token_indices[idx - 1] if idx != 0 else 0
# ner['end'] is the idx of the token after ner
if ner["end"] <= j:
ner["start"] = ner["start"] - sent_start_token
ner["end"] = ner["end"] - sent_start_token - 1
ner["sent_idx"] = idx
modified_ner_cons_list.append(ner)
found = True
if found:
break
return (sentences_tokenized, modified_ner_cons_list)
def convertSent2NerToMentionLines(
self, sentences_tokenized, modified_ner_cons_list
):
"""Convert NERs from document to list of mention strings"""
mentions = []
# Make Document Context String for whole document
cohStr = ""
# for sent_idx, s_nerDicts in sentidx2ners.items():
# for s, ner in s_nerDicts:
# cohStr += ner['tokens'].replace(' ', '_') + ' '
for ner_men in modified_ner_cons_list:
cohStr += ner_men["tokens"].replace(" ", "_") + " "
cohStr = cohStr.strip()
for ner_men in modified_ner_cons_list:
idx = ner_men["sent_idx"]
sentence = " ".join(sentences_tokenized[idx])
mention = "%s\t%s\t%s" % ("unk_mid", "unk_wid", "unkWT")
mention = mention + "\t" + str(ner_men["start"])
mention = mention + "\t" + str(ner_men["end"])
mention = mention + "\t" + str(ner_men["tokens"])
mention = mention + "\t" + sentence
mention = mention + "\t" + "UNK_TYPES"
mention = mention + "\t" + cohStr
mentions.append(mention)
return mentions
def bracketMentionInSentence(self, s, nerDict):
tokens = s.split(" ")
start = nerDict["start"]
end = nerDict["end"]
tokens.insert(start, "[[")
tokens.insert(end + 2, "]]")
return " ".join(tokens)
def _read_mention(self):
if self.num_mens == 0:
return None
mention = self.mentions[self.men_idx]
self.men_idx += 1
if self.men_idx == self.num_mens:
self.men_idx = 0
self.epochs += 1
return mention
def _next_batch(self):
""" Data : wikititle \t mid \t wid \t start \t end \t tokens \t labels
start and end are inclusive
"""
# Sentence = s1 ... m1 ... mN, ... sN.
# Left Batch = s1 ... m1 ... mN
# Right Batch = sN ... mN ... m1
(left_batch, right_batch) = ([], [])
coh_indices = []
coh_values = []
if self.coherence:
coh_matshape = [self.batch_size, self.num_cohstr]
else:
coh_matshape = []
# Candidate WID idxs and their cprobs
# First element is always true wid
(wid_idxs_batch, wid_cprobs_batch) = ([], [])
while len(left_batch) < self.batch_size:
batch_el = len(left_batch)
m = self._read_mention()
if m is None:
return None, None, None, None, None
# for label in m.types:
# if label in self.label2idx:
# labelidx = self.label2idx[label]
# labels_batch[batch_el][labelidx] = 1.0
cohFound = False # If no coherence mention is found, add unk
if self.coherence:
cohidxs = [] # Indexes in the [B, NumCoh] matrix
cohvals = [] # 1.0 to indicate presence
for cohstr in m.coherence:
if cohstr in self.cohG92idx:
cohidx = self.cohG92idx[cohstr]
cohidxs.append([batch_el, cohidx])
cohvals.append(1.0)
cohFound = True
if cohFound:
coh_indices.extend(cohidxs)
coh_values.extend(cohvals)
else:
cohidx = self.cohG92idx[self.unk_word]
coh_indices.append([batch_el, cohidx])
coh_values.append(1.0)
# Left and Right context includes mention surface
left_tokens = m.sent_tokens[0 : m.end_token + 1]
right_tokens = m.sent_tokens[m.start_token :][::-1]
# Strict left and right context
if self.strict_context:
left_tokens = m.sent_tokens[0 : m.start_token]
right_tokens = m.sent_tokens[m.end_token + 1 :][::-1]
# Left and Right context includes mention surface
else:
left_tokens = m.sent_tokens[0 : m.end_token + 1]
right_tokens = m.sent_tokens[m.start_token :][::-1]
if not self.pretrain_wordembed:
left_idxs = [self.convert_word2idx(word) for word in left_tokens]
right_idxs = [self.convert_word2idx(word) for word in right_tokens]
else:
left_idxs = left_tokens
right_idxs = right_tokens
left_batch.append(left_idxs)
right_batch.append(right_idxs)
# wids : [true_knwn_idx, cand1_idx, cand2_idx, ..., unk_idx]
# wid_cprobs : [cwikis probs or 0.0 for unks]
(wid_idxs, wid_cprobs) = self.make_candidates_cprobs(m)
wid_idxs_batch.append(wid_idxs)
wid_cprobs_batch.append(wid_cprobs)
coherence_batch = (coh_indices, coh_values, coh_matshape)
return (
left_batch,
right_batch,
coherence_batch,
wid_idxs_batch,
wid_cprobs_batch,
)
def print_test_batch(self, mention, wid_idxs, wid_cprobs):
print(
"Surface : {} WID : {} WT: {}".format(
mention.surface, mention.wid, self.wid2WikiTitle[mention.wid]
)
)
print(mention.wid in self.knwid2idx)
for (idx, cprob) in zip(wid_idxs, wid_cprobs):
print(
"({} : {:0.5f})".format(self.wid2WikiTitle[self.idx2knwid[idx]], cprob),
end=" ",
)
print("\n")
def make_candidates_cprobs(self, m):
# Fill num_cands now
surface = utils._getLnrm(m.surface)
wid_idxs = []
wid_cprobs = []
# print(surface)
if surface in self.crosswikis:
# Pruned crosswikis has only known wids and 30 cands at max
candwids_cprobs = self.crosswikis[surface][0 : self.num_cands - 1]
(wids, wid_cprobs) = candwids_cprobs
wid_idxs = [self.knwid2idx[wid] for wid in wids]
# All possible candidates added now. Pad with unks
# assert len(wid_idxs) == len(wid_cprobs)
remain = self.num_cands - len(wid_idxs)
wid_idxs.extend([0] * remain)
remain = self.num_cands - len(wid_cprobs)
wid_cprobs.extend([0.0] * remain)
return (wid_idxs, wid_cprobs)
def embed_batch(self, batch):
""" Input is a padded batch of left or right contexts containing words
Dimensions should be [B, padded_length]
Output:
Embed the word idxs using pretrain word embedding
"""
output_batch = []
for sent in batch:
word_embeddings = [self.get_vector(word) for word in sent]
output_batch.append(word_embeddings)
return output_batch
def embed_mentions_batch(self, mentions_batch):
""" Input is batch of mention tokens as a list of list of tokens.
Output: For each mention, average word embeddings """
embedded_mentions_batch = []
for m_tokens in mentions_batch:
outvec = np.zeros(300, dtype=float)
for word in m_tokens:
outvec += self.get_vector(word)
outvec = outvec / len(m_tokens)
embedded_mentions_batch.append(outvec)
return embedded_mentions_batch
def pad_batch(self, batch):
if not self.pretrain_wordembed:
pad_unit = self.word2idx[self.unk_word]
else:
pad_unit = self.unk_word
lengths = [len(i) for i in batch]
max_length = max(lengths)
for i in range(0, len(batch)):
batch[i].extend([pad_unit] * (max_length - lengths[i]))
return (batch, lengths)
def _next_padded_batch(self):
(
left_batch,
right_batch,
coherence_batch,
wid_idxs_batch,
wid_cprobs_batch,
) = self._next_batch()
if left_batch is None:
return (None, None, None, None, None, None, None)
(left_batch, left_lengths) = self.pad_batch(left_batch)
(right_batch, right_lengths) = self.pad_batch(right_batch)
if self.pretrain_wordembed:
left_batch = self.embed_batch(left_batch)
right_batch = self.embed_batch(right_batch)
return (
left_batch,
left_lengths,
right_batch,
right_lengths,
coherence_batch,
wid_idxs_batch,
wid_cprobs_batch,
)
def convert_word2idx(self, word):
if word in self.word2idx:
return self.word2idx[word]
else:
return self.word2idx[self.unk_word]
def next_test_batch(self):
return self._next_padded_batch()
def widIdx2WikiTitle(self, widIdx):
wid = self.idx2knwid[widIdx]
wikiTitle = self.wid2WikiTitle[wid]
return wikiTitle
if __name__ == "__main__":
sttime = time.time()
batch_size = 2
num_batch = 1000
configpath = "configs/all_mentions_config.ini"
config = Config(configpath, verbose=False)
vocabloader = VocabLoader(config)
b = TextAnnoTestReader(
config=config,
vocabloader=vocabloader,
num_cands=30,
batch_size=batch_size,
strict_context=False,
pretrain_wordembed=True,
coherence=True,
)
stime = time.time()
i = 0
total_instances = 0
while b.epochs < 1:
(
left_batch,
left_lengths,
right_batch,
right_lengths,
coherence_batch,
wid_idxs_batch,
wid_cprobs_batch,
) = b.next_test_batch()
if i % 100 == 0:
etime = time.time()
t = etime - stime
print("{} done. Time taken : {} seconds".format(i, t))
i += 1
etime = time.time()
t = etime - stime
tt = etime - sttime
print("Total Instances : {}".format(total_instances))
print(
"Batching time (in secs) to make %d batches of size %d : %7.4f seconds"
% (i, batch_size, t)
)
print(
"Total time (in secs) to make %d batches of size %d : %7.4f seconds"
% (i, batch_size, tt)
)
|
{"hexsha": "d2ed50c0fd3a69a8e92a72f4b1e7bc27c33602df", "size": 17058, "ext": "py", "lang": "Python", "max_stars_repo_path": "readers/textanno_test_reader.py", "max_stars_repo_name": "EntilZha/neural-el", "max_stars_repo_head_hexsha": "bab6659e1653909d911201cf33b340616cc59b99", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "readers/textanno_test_reader.py", "max_issues_repo_name": "EntilZha/neural-el", "max_issues_repo_head_hexsha": "bab6659e1653909d911201cf33b340616cc59b99", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "readers/textanno_test_reader.py", "max_forks_repo_name": "EntilZha/neural-el", "max_forks_repo_head_hexsha": "bab6659e1653909d911201cf33b340616cc59b99", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9800796813, "max_line_length": 88, "alphanum_fraction": 0.568237777, "include": true, "reason": "import numpy", "num_tokens": 4164}
|
module SCF
#Julia Libraries/Modules
using Printf
using LinearAlgebra
#LearnHatreeFock.jl Modules
using TypesBasis,TypesParticles
using CoulombExchange
export runscf
@doc raw"""
function scf()
description: Self-consistent field soultion approach.
The Hatree-Fock operator for non-interacting electrons in an orthonormal basis set is give as:
$H_o C = \epsilon S C$ where $C,epsilon$ are the eigenvalues,eigenvectors and $S$ is the overlap matrix.
The first step is to find the eigenvalues and vectors of the overlap matrix. This will be used to transform from
atomic centered basis-set to a orthonormal molecular basis-set, for example, $H_{MO}=
"""
function runscf(numelec::Int,overlap::Array{Float64,2},
ho::Array{Float64,2},elecelecrepul::Array{Float64,4};
maxcycle=30::Int,tolerance=1.0e-6::Float64)
convergflag = false;
#Orthogonal overlap matrix, i.e., molecular orbitals
oevals,oevecs = eigen(overlap);
oevalsmat = Diagonal(oevals);
#Try to invert eigenvalues matrix assuming non-singular, if singular
# promote datatype ComplexF64
invoevalsmat = returnevalsinvhalf(oevalsmat);
orthoverlap = oevecs*invoevalsmat*adjoint(oevecs);
orthoverlapstar = adjoint(orthoverlap);
#Fock matrix construction
fock = copy(ho);
fockprime = orthoverlapstar*fock*orthoverlap;
#HF Roothaan eq.
#Find molecular orbital single electron solutions
evals,evecsprime = eigen(fockprime);
#Go back to atomic orbitals
evecs = orthoverlap*evecsprime;
#Sort lowest eigen values of for atomic orbitals
sorteigen!(evals,evecs);
#Build electron density for Hatree potential?
density = builddensity(evecs,numelec)
totalenergy = zeros(Float64,maxcycle+1);
#SCF cycle
for cycle=2:maxcycle+1
#Get exchange and fock potential
exchangepot = getcoulexchange(density,elecelecrepul)
fock = ho + exchangepot;
#find solutions
fockprime = orthoverlapstar*fock*orthoverlap;
evals,evecsprime = eigen(fockprime);
evecs = orthoverlap*evecsprime;
sorteigen!(evals,evecs);
density = builddensity(evecs,numelec);
totalenergy[cycle] = getfockenergy(ho,fock,density);
#Check conditions
delta = abs(totalenergy[cycle]-totalenergy[cycle-1]);
printscf(cycle-1,delta,totalenergy[cycle])
if delta < tolerance
return totalenergy[cycle]
end
end #cycle=2:maxcycle+1
return totalenergy[end];
end #runscf
@doc raw"""
function returnevalsinvhalf(evals)
Try to invert eigenvalues matrix assuming non-singular, if singular promote
datatype ComplexF64
"""
function returnevalsinvhalf(evals)
try
invevalsmat = evals^-0.5e0;
catch
invevalsmat = (convert(Array{ComplexF64},evals))^(-0.5e0);
end
end #returnevalsinvhalf
@doc raw"""
function builddensity(evecs::Array{Number,2},n::)
Restricted HF density
"""
function builddensity(evecs::Matrix,numelec::Int)
#Restricted HF density
nhalf = Int(numelec/2);
esize = size(evecs)[1];
if typeof(evecs) == Array{Complex,2}
density = zeros(Complex,esize,esize)
else
density = zeros(esize,esize)
end
for n=1:esize
for m=1:esize
for i=1:nhalf
density[n,m] += evecs[n,i]*evecs[m,i];
end #i=1:nhalf
end
end #n=1:esize
return density
end #builddensity
@doc raw"""
function sortevals!(evals::Array,numelec::Int)
description: sort the eignevalues and vectors.
NOTES: I would like to rework this so that its more
succinct.
"""
function sorteigen!(evals::Vector{T},evecs::Matrix{T}) where {T<:Real}
ncol = size(evecs)[2];
#Make a shallow copy and force local scope
#CHECK: I'm not sure what the most Julianic way is to do this.
local sortedevals = copy(evals);
local sortedevecs = copy(evecs);
#Get sorted indexes for eigenvals
sortedindex = sortperm(evals);
evals[:] = sortedevals[sortedindex];
#Get sorted eigenvectors
for i=1:ncol
sortedevecs[:,i] = evecs[:,sortedindex[i]];
end
evecs[:,:] = sortedevecs[:,:];
end #sortevals
function sorteigen!(evals::Vector{T},evecs::Matrix{T}) where {T<:Complex}
ncol = size(evecs)[2];
#Make a shallow copy and force local scope
#CHECK: I'm not sure what the most Julianic way is to do this.
local sortedevals = copy(evals);
local sortedevecs = copy(evecs);
magevals = adjoint(evals)*evals;
println(magevals);
#Get sorted indexes for eigenvals
sortedindex = sortperm(real(magevals));
evals[:] = sortedevals[sortedindex];
#Get sorted eigenvectors
for i=1:ncol
sortedevecs[:,i] = evecs[:,sortedindex[i]];
end
evecs[:,:] = sortedevecs[:,:];
end #sortevals
@doc raw"""
sorteigen()
TODO
"""
function sorteigen(evals::Vector{T},evecs::Matrix{T}) where {T<:Real}
p = sortperm(evals);
return evals[p], evecs[p,:]
end
@doc raw"""
function getfockenergy()
"""
function getfockenergy(ho::Array,fock::Array,density::Array)
numbasis = size(density)[1];
energy = 0.00e0;
for n=1:numbasis
for m=1:numbasis
energy += density[n,m] * (ho[n,m]+fock[n,m]);
end
end#n=1:numbasis
return energy
end #getfockenergy
@doc raw"""
function printscf()
"""
function printscf(iteration::Int,delta::Float64,energy::Float64)
if iteration < 2
@printf(" HF SCF \n");
@printf("--------------------------------------------------\n");
@printf("Iteration ΔE [Ha] HF-Energy [Ha] \n");
@printf("--------------------------------------------------\n");
@printf("%i %0.6f %0.6e \n",
iteration,delta,energy);
else
@printf("%i %0.6f %0.6e \n",
iteration,delta,energy);
end
end #printscf
end #SCF
|
{"hexsha": "6c7d53b2c54ab93cd890e17f06f0d382fedfc420", "size": 6057, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SCF.jl", "max_stars_repo_name": "JuliaMatSci/LearnHartreeFock.jl", "max_stars_repo_head_hexsha": "ff91cff63d6ae4039b26b864f14fd86743ae6037", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-04-26T03:03:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T00:14:31.000Z", "max_issues_repo_path": "src/SCF.jl", "max_issues_repo_name": "JuliaMatSci/LearnHatreeFock.jl", "max_issues_repo_head_hexsha": "ff91cff63d6ae4039b26b864f14fd86743ae6037", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/SCF.jl", "max_forks_repo_name": "JuliaMatSci/LearnHatreeFock.jl", "max_forks_repo_head_hexsha": "ff91cff63d6ae4039b26b864f14fd86743ae6037", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-24T23:31:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T23:31:50.000Z", "avg_line_length": 26.1077586207, "max_line_length": 112, "alphanum_fraction": 0.6326564306, "num_tokens": 1709}
|
[STATEMENT]
lemma rec_unique:
"f \<circ> ctor1 = s1 \<circ> F1map id <id , f> <id , g> \<Longrightarrow>
g \<circ> ctor2 = s2 \<circ> F2map id <id , f> <id , g> \<Longrightarrow> f = rec1 s1 s2 \<and> g = rec2 s1 s2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>f \<circ> ctor1 = s1 \<circ> F1map id <id , f> <id , g>; g \<circ> ctor2 = s2 \<circ> F2map id <id , f> <id , g>\<rbrakk> \<Longrightarrow> f = rec1 s1 s2 \<and> g = rec2 s1 s2
[PROOF STEP]
unfolding rec1_def rec2_def convol_expand_snd'[OF fst_rec1_pair] convol_expand_snd'[OF fst_rec2_pair]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>f \<circ> ctor1 = s1 \<circ> F1map id <id , f> <id , g>; g \<circ> ctor2 = s2 \<circ> F2map id <id , f> <id , g>\<rbrakk> \<Longrightarrow> <id , f> = fold1 <ctor1 \<circ> F1map id fst fst , s1> <ctor2 \<circ> F2map id fst fst , s2> \<and> <id , g> = fold2 <ctor1 \<circ> F1map id fst fst , s1> <ctor2 \<circ> F2map id fst fst , s2>
[PROOF STEP]
apply (rule fold_unique)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>f \<circ> ctor1 = s1 \<circ> F1map id <id , f> <id , g>; g \<circ> ctor2 = s2 \<circ> F2map id <id , f> <id , g>\<rbrakk> \<Longrightarrow> <id , f> \<circ> ctor1 = <ctor1 \<circ> F1map id fst fst , s1> \<circ> F1map id <id , f> <id , g>
2. \<lbrakk>f \<circ> ctor1 = s1 \<circ> F1map id <id , f> <id , g>; g \<circ> ctor2 = s2 \<circ> F2map id <id , f> <id , g>\<rbrakk> \<Longrightarrow> <id , g> \<circ> ctor2 = <ctor2 \<circ> F2map id fst fst , s2> \<circ> F2map id <id , f> <id , g>
[PROOF STEP]
apply (unfold convol_o id_o o_id F1.map_comp0[symmetric] F2.map_comp0[symmetric]
F1.map_id0 F2.map_id0 o_assoc[symmetric] fst_convol)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>f \<circ> ctor1 = s1 \<circ> F1map id <id , f> <id , g>; g \<circ> ctor2 = s2 \<circ> F2map id <id , f> <id , g>\<rbrakk> \<Longrightarrow> <ctor1 , f \<circ> ctor1> = <ctor1 , s1 \<circ> F1map id <id , f> <id , g>>
2. \<lbrakk>f \<circ> ctor1 = s1 \<circ> F1map id <id , f> <id , g>; g \<circ> ctor2 = s2 \<circ> F2map id <id , f> <id , g>\<rbrakk> \<Longrightarrow> <ctor2 , g \<circ> ctor2> = <ctor2 , s2 \<circ> F2map id <id , f> <id , g>>
[PROOF STEP]
apply (erule arg_cong2[of _ _ _ _ BNF_Def.convol, OF refl])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>f \<circ> ctor1 = s1 \<circ> F1map id <id , f> <id , g>; g \<circ> ctor2 = s2 \<circ> F2map id <id , f> <id , g>\<rbrakk> \<Longrightarrow> <ctor2 , g \<circ> ctor2> = <ctor2 , s2 \<circ> F2map id <id , f> <id , g>>
[PROOF STEP]
apply (erule arg_cong2[of _ _ _ _ BNF_Def.convol, OF refl])
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 1250, "file": "BNF_Operations_LFP", "length": 6}
|
struct ScratchThermalGlobal{T}
ndim::T # model dimension
nvert::T # number of vertices per element
nnodel::T # number of nodes per element
nel::T # number of elements
nip::T # number of vertices per element
end
struct ShapeFunctionsThermal{T}
N::Vector{SMatrix{1,6,T,6}}
∇N::Vector{SMatrix{2,6,T,12}}
dN3ds::SMatrix{2,3,T,6}
w_ip::SVector{7,T}
N3::Vector{SMatrix{1,3,T,3}}
end
struct ShapeFunctionsStokes{T}
N::Vector{SMatrix{1,6,T,6}}
∇N::Vector{SMatrix{2,6,T,12}}
NP::Vector{SMatrix{1,3,Float64,3}}
dN3ds::SMatrix{2,3,T,6}
w_ip::SVector{7,T}
N3::Vector{SMatrix{1,3,T,3}}
end
struct ShapeFunctionsStress{A,B,C,D,E,F}
N::A
∇N::B
NP::C
dN3ds::D
w_ip::E
N3::F
end
@inline function _get_SF(nip, nnodel)
x_ip, w_ip = ip_triangle(nip)
# local coordinates and weights of points for integration of
# velocity/pressure matrices
N,dNds = shape_functions_triangles(x_ip,nnodel)
# velocity shape functions and their derivatives
dN3ds = @SMatrix [-1.0 1.0 0.0 # w.r.t. r
-1.0 0.0 1.0]
# derivatives of linear (3-node) shape functions; used to calculate
# each element's Jacobian
return N,dNds,dN3ds,w_ip
end
function ip_triangle(nip)
if nip === 3
ipx,ipw = ip_triangle3()
elseif nip === 6
ipx,ipw = ip_triangle6()
elseif nip === 7
ipx,ipw = ip_triangle7()
end
return ipx, ipw
end
ip_triangle(::Val{3}) = ip_triangle3()
ip_triangle(::Val{6}) = ip_triangle6()
ip_triangle(::Val{7}) = ip_triangle7()
function ip_triangle3()
# -- Integration point coordinates
ipx = @SMatrix [
1/6 1/6
2/3 1/6
1/6 2/3
]
# -- Weights
ipw = @SVector [
1/6,
1/6,
1/6,
]
return ipx, ipw
end
function ip_triangle6()
# -- Integration point coordinates
g1 = (8.0-sqrt(10.0) + sqrt(38.0-44.0*sqrt(2.0/5.0)))/18.0
g2 = (8.0-sqrt(10.0) - sqrt(38.0-44.0*sqrt(2.0/5.0)))/18.0
ipx = @SMatrix [
1.0-2.0*g1 g1
g1 1.0-2.0*g1
g1 g1
1.0-2.0*g2 g2
g2 1.0-2.0*g2
g2 g2
]
# -- Weights
w1 = (620.0 + sqrt(213125.0-53320.0*sqrt(10.0)))/3720.0;
w2 = (620.0 - sqrt(213125.0-53320.0*sqrt(10.0)))/3720.0;
ipw = @SVector [
w1, #0.223381589678011;
w1, #0.223381589678011;
w1, #0.223381589678011;
w2, #0.109951743655322;
w2, #0.109951743655322;
w2, #0.109951743655322;
]
return ipx, 0.5*ipw
end
function ip_triangle7()
# -- Integration point coordinates
g1 = (6.0 - sqrt(15.0))/21.0;
g2 = (6.0 + sqrt(15.0))/21.0;
ipx = @SMatrix [
1.0/3.0 1.0/3.0
1.0-2.0*g1 g1
g1 1.0-2.0*g1
g1 g1
1.0-2.0*g2 g2
g2 1.0-2.0*g2
g2 g2
]
# -- Weights
w1 = (155.0 - sqrt(15.0))/1200.0;
w2 = (155.0 + sqrt(15.0))/1200.0;
ipw = @SVector [
0.225,
w1, #0.223381589678011;
w1, #0.223381589678011;
w1, #0.223381589678011;
w2, #0.109951743655322;
w2, #0.109951743655322;
w2, #0.109951743655322;
]
return ipx, 0.5*ipw
end
function shape_functions_triangles(lc,nnodel)
r = lc[:,1]
s = lc[:,2]
npt = length(r)
N, dN = get_N_∇N(r,s,npt,nnodel)
return N, dN
end
function get_N_∇N(r,s,npt,::Val{3})
N = [sf_N_tri3(r[ip],s[ip]) for ip in 1:npt]
dN = [sf_dN_tri3(r[ip],s[ip]) for ip in 1:npt]
return N, dN
end
function get_N_∇N(r,s,npt,::Val{6})
N = [sf_N_tri6(r[ip],s[ip]) for ip in 1:npt]
dN = [sf_dN_tri6(r[ip],s[ip]) for ip in 1:npt]
return N, dN
end
function get_N_∇N(r,s,npt,::Val{7})
N = [sf_N_tri7(r[ip],s[ip]) for ip in 1:npt]
dN = [sf_dN_tri7(r[ip],s[ip]) for ip in 1:npt]
return N, dN
end
function sf_N_tri3(r,s)
# Find shape functions and their derivatives at given points on the
# master element for 3 node triangle
# 3-node triangle (node numbering is important)
#
# 3
# | \
# s-axis | \
# | \
# 1 - - - 2
# r axis -->
t = 1.0-r-s;
N = @SMatrix [t r s] # N3 at coordinate (r,s)
return N
end
function sf_dN_tri3(r,s)
# Find shape functions and their derivatives at given points on the
# master element for 3 node triangle
# 3-node triangle (node numbering is important)
#
# 3
# | \
# s-axis | \
# | \
# 1 - - - 2
# r axis -->
t = 1.0-r-s;
dN = @SMatrix [-1.0 1.0 0.0 # w.r.t. r
-1.0 0.0 1.0]; # w.r.t. s
return dN
end
function sf_N_tri7(r,s)
# Find shape functions and their derivatives at given points on the
# master element for a 7 node triangle
# 7-node triangle (node numbering is important)
#
# 3
# | \
# s-axis 6 5
# | 7 \
# 1 - 4 - 2
# r-axis
t = 1-r-s
N = @SMatrix [t*(2*t-1)+3*r*s*t r*(2*r-1)+3*r*s*t s*(2*s-1)+3*r*s*t 4*r*t-12*r*s*t 4*r*s-12*r*s*t 4*s*t-12*r*s*t 27*r*s*t]
return N
end
function sf_dN_tri7(r,s)
# Find shape functions and their derivatives at given points on the
# master element for a 7 node triangle
# 7-node triangle (node numbering is important)
#
# 3
# | \
# s-axis 6 5
# | 7 \
# 1 - 4 - 2
# r-axis
t = 1-r-s
dN= @SMatrix [1-4*t+3*s*t-3*r*s -1+4*r+3*s*t-3*r*s 3*s*t-3*r*s 4*t-4*r+12*r*s-12*s*t 4*s+12*r*s-12*s*t -4*s+12*r*s-12*s*t -27*r*s+27*s*t
1-4*t+3*r*t-3*r*s 3*r*t-3*r*s -1+4*s+3*r*t-3*r*s -4*r-12*r*t+12*r*s 4*r-12*r*t+12*r*s 4*t-4*s-12*r*t+12*r*s 27*r*t-27*r*s]
return dN
end
function sf_N_tri6(r,s)
# Find shape functions and their derivatives at given points on the
# master element for a 6 node triangle (node numbering is important)
#
# 3
# | \
# s-axis 6 5
# | \
# 1 - 4 - 2
# r-axis
#
t = 1.0 - r - s
# N1 at coordinate (r,s), N2 at coordinate (r,s), etc
N = @SMatrix [t*(2.0*t-1.0) r*(2.0*r-1.0) s*(2.0*s-1.0) 4.0*r*t 4.0*r*s 4.0*s*t]
# dN1 dN2 dN3 dN4 dN5 dN6
return N
end
function sf_dN_tri6(r,s)
# Find shape functions and their derivatives at given points on the
# master element for a 6 node triangle (node numbering is important)
#
# 3
# | \
# s-axis 6 5
# | \
# 1 - 4 - 2
# r-axis
#
t = 1.0 - r - s
# N1 at coordinate (r,s), N2 at coordinate (r,s), etc
# dN1 dN2 dN3 dN4 dN5 dN6
dN = @SMatrix [-(4.0*t-1.0) 4.0*r-1 0.0 4.0*(t-r) 4.0*s -4.0*s # w.r.t. r
-(4.0*t-1.0) 0.0 4.0*s-1.0 -4.0*r 4.0*r 4.0*(t -s)]; # w.r.t. s
return dN
end
|
{"hexsha": "3ca0b09e96f26dae869795fa99d034a0979d88f4", "size": 7420, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Algebra/Quadrature.jl", "max_stars_repo_name": "albert-de-montserrat/Persephone", "max_stars_repo_head_hexsha": "ddd4a7029be0fa5d5cb9c9914023fe3a6fbb1907", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Algebra/Quadrature.jl", "max_issues_repo_name": "albert-de-montserrat/Persephone", "max_issues_repo_head_hexsha": "ddd4a7029be0fa5d5cb9c9914023fe3a6fbb1907", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Algebra/Quadrature.jl", "max_forks_repo_name": "albert-de-montserrat/Persephone", "max_forks_repo_head_hexsha": "ddd4a7029be0fa5d5cb9c9914023fe3a6fbb1907", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.690647482, "max_line_length": 152, "alphanum_fraction": 0.4892183288, "num_tokens": 2843}
|
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# atlas.py
# Created by Disa Mhembere on 2014-01-08.
# Email: disa@jhu.edu
from zindex import MortonXYZ
import scipy.io as sio
import nibabel as nib
import os
class Atlas(object):
def __init__(self, atlas, label_fn=None):
"""
The use of this ctor is such that the `atlas` arg can either
be a nifti image or the path to a nifti image from which
I may obtain the nifti image and associated metadata.
@param atlas: The atlas filename or nibabel nifti object
@param label_fn: If there is a atlas region label file
"""
if isinstance(atlas, str) or isinstance(atlas, unicode):
self.data = nib.load(os.path.abspath(atlas)).get_data()
else:
self.data = atlas.get_data()
if label_fn:
label_file = open(label_fn, "rb")
self.region_names = label_file.read().splitlines()
else:
self.region_names = None
def max(self):
return self.data.max()
def get_region_num(self, vertex):
x,y,z = MortonXYZ(vertex)
return (self.data[x, y, z])
def get_region_name(self, region_num):
assert self.region_names, "No atlas region names file specified"
return self.region_names[int(region_num) - 1] # -1 for the 1-based indexing
def get_all_mappings(self, vertices):
region_nums = []
region_names = []
for vertex in vertices:
region_num = self.get_region_num(vertex)
region_nums.append(region_num)
if self.region_names: region_names.append(self.get_region_name(region_num))
return region_nums, region_names
def get_region_nums(self, vertices):
"""
Get a bunch of region numbers given vertex index
"""
keys = []
for vertex in vertices:
keys.append(self.get_region_num(int(vertex))) # TODO: Verify me
return keys
|
{"hexsha": "a5bf09869c85b8594b0ba03dd551ace04cf62aec", "size": 2409, "ext": "py", "lang": "Python", "max_stars_repo_path": "MR-OCP/mrcap/atlas.py", "max_stars_repo_name": "justi/m2g", "max_stars_repo_head_hexsha": "09e8b889889ee8d8fb08b9b6fcd726fb3d901644", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-03-11T22:07:17.000Z", "max_stars_repo_stars_event_max_datetime": "2016-01-29T21:24:29.000Z", "max_issues_repo_path": "MR-OCP/mrcap/atlas.py", "max_issues_repo_name": "youngmook/m2g", "max_issues_repo_head_hexsha": "09e8b889889ee8d8fb08b9b6fcd726fb3d901644", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 213, "max_issues_repo_issues_event_min_datetime": "2015-01-30T16:02:57.000Z", "max_issues_repo_issues_event_max_datetime": "2016-01-29T21:45:02.000Z", "max_forks_repo_path": "MR-OCP/mrcap/atlas.py", "max_forks_repo_name": "youngmook/m2g", "max_forks_repo_head_hexsha": "09e8b889889ee8d8fb08b9b6fcd726fb3d901644", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-02-04T13:58:12.000Z", "max_forks_repo_forks_event_max_datetime": "2016-01-29T21:24:46.000Z", "avg_line_length": 31.2857142857, "max_line_length": 81, "alphanum_fraction": 0.703611457, "include": true, "reason": "import scipy", "num_tokens": 591}
|
"""
Copyright (c) 2004-2016 Zementis, Inc.
Copyright (c) 2016-2021 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or its
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys, os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(BASE_DIR)
from PMML44 import *
from datetime import datetime
import metadata
import warnings
from base.constants import *
class ExponentialSmoothingToPMML:
"""
Write a PMML file using model-object, model-parameters and time series data. Models are built using Statsmodels.
Parameters:
-----------
results_obj:
Instance of HoltWintersResults from statsmodels
pmml_file_name: string
Name of the PMML
model_name : string (optional)
Name of the model
description : string (optional)
Description of the model
"""
def __init__(self, results_obj=None, pmml_file_name="from_ExponentialSmoothing.pmml", model_name=None, description=None):
def get_time_value_objs():
"""
Does not have time attribute
Parameters:
-----------
ts_data: pandas Series
Returns:
--------
time_value_objs: list
Instances of TimeValue
"""
ts_int_index = range(len(results_obj.model.endog))
time_value_objs = list()
for int_idx in ts_int_index:
time_value_objs.append(TimeValue(index=int_idx, value=results_obj.model.endog[int_idx]))
return time_value_objs
def get_pmml_datatype_optype(series_obj):
pmml_data_type = None
pmml_op_type = 'continuous'
if str(series_obj.dtype) in ['datetime64[ns]', 'datetime64[ns, tz]', 'timedelta[ns]']:
pmml_data_type = DATATYPE.DATETIME
elif str(series_obj.dtype) == 'float32':
pmml_data_type = DATATYPE.FLOAT
elif str(series_obj.dtype) == 'float64':
pmml_data_type = DATATYPE.DOUBLE
elif str(series_obj.dtype) in ['int64', 'int32']:
pmml_data_type = DATATYPE.INTEGER
return pmml_data_type, pmml_op_type
def get_data_field_objs():
"""
Create a list with instances of DataField
"""
data_field_objs = list()
index_name = results_obj.data.orig_endog.index.name
idx_data_type, idx_op_type = get_pmml_datatype_optype(results_obj.model._index)
data_field_objs.append(DataField(name=index_name, dataType=idx_data_type, optype=idx_op_type))
if results_obj.data.orig_endog.__class__.__name__ == 'DataFrame':
ts_name = results_obj.data.orig_endog.columns[0]
elif results_obj.data.orig_endog.__class__.__name__ == 'Series':
ts_name = results_obj.data.orig_endog.name
else:
ts_name = 'input'
ts_data_type, ts_op_type = get_pmml_datatype_optype(results_obj.model.endog)
data_field_objs.append(DataField(name=ts_name, dataType=ts_data_type, optype=ts_op_type))
return data_field_objs
def get_mining_field_objs():
"""
Create a list with instances of MiningField
"""
mining_field_objs = list()
if results_obj.data.orig_endog.__class__.__name__ == 'DataFrame':
ts_name = results_obj.data.orig_endog.columns[0]
elif results_obj.data.orig_endog.__class__.__name__ == 'Series':
ts_name = results_obj.data.orig_endog.name
else:
ts_name = 'input'
idx_name = results_obj.data.orig_endog.index.name
idx_usage_type = FIELD_USAGE_TYPE.ORDER
mining_field_objs.append(MiningField(name=idx_name, usageType=idx_usage_type))
ts_usage_type = FIELD_USAGE_TYPE.TARGET
mining_field_objs.append(MiningField(name=ts_name, usageType=ts_usage_type))
return mining_field_objs
n_samples = results_obj.model.nobs
n_columns = 1 # because we are dealing with Series object
function_name = MINING_FUNCTION.TIMESERIES
best_fit = TIMESERIES_ALGORITHM.EXPONENTIAL_SMOOTHING
extension_objs = list()
alpha = results_obj.params['smoothing_level'] # alpha is smoothing parameter for level
level_smooth_val = results_obj.level[-1] # smoothed level at last time-index
initial_level = results_obj.params['initial_level']
# extension_objs.append(Extension(name='initialLevel', value=initial_level))
import numpy as np
if np.isnan(results_obj.params['smoothing_slope']):
gamma = None
else:
gamma = results_obj.params['smoothing_slope'] # gamma is smoothing parameter for trend
if np.isnan(results_obj.params['smoothing_seasonal']):
delta = None
else:
delta = results_obj.params['smoothing_seasonal'] # delta is smoothing parameter for seasonality
if np.isnan(results_obj.params['damping_slope']):
phi = 1
else:
phi = results_obj.params['damping_slope'] # damping parameter; which is applied on trend/slope
if results_obj.model.trend: # model_obj.trend can take values in {'add', 'mul', None}
trend_smooth_val = results_obj.slope[-1]
initial_trend = results_obj.params['initial_slope']
if results_obj.model.trend == 'add':
if results_obj.model.damped:
trend_type = EXPONENTIAL_SMOOTHING_TREND.DAMPED_ADDITIVE
else:
trend_type = EXPONENTIAL_SMOOTHING_TREND.ADDITIVE
else: # model_obj.trend == 'mul':
if results_obj.model.damped:
trend_type = EXPONENTIAL_SMOOTHING_TREND.DAMPED_MULTIPLICATIVE
else:
trend_type = EXPONENTIAL_SMOOTHING_TREND.MULTIPLICATIVE
trend_obj = Trend_ExpoSmooth(trend=trend_type, gamma=gamma, phi=phi, smoothedValue=trend_smooth_val)
# extension_objs.append(Extension(name='initialTrend', value=initial_trend))
else:
trend_obj = None
if results_obj.model.seasonal: # model_obj.seasonal can take values in {'add', 'mul', None}
period = results_obj.model.seasonal_periods
initial_seasons = ArrayType(n=period, type_ = ARRAY_TYPE.REAL)
content_value = ' '.join([str(i) for i in results_obj.params['initial_seasons']])
initial_seasons.content_[0].value = content_value
if results_obj.model.seasonal == 'add':
seasonal_type = EXPONENTIAL_SMOOTHING_SEASONALITY.ADDITIVE
else: # model_obj.seasonal == 'mul':
seasonal_type = EXPONENTIAL_SMOOTHING_SEASONALITY.MULTIPLICATIVE
season_obj = Seasonality_ExpoSmooth(type_=seasonal_type, period=period, delta=delta,
Array=initial_seasons)
else:
season_obj = None
pmml = PMML(
version=PMML_SCHEMA.VERSION,
Header=Header(
copyright=HEADER_INFO.COPYRIGHT,
description=description if description else HEADER_INFO.DEFAULT_DESCRIPTION,
Timestamp=Timestamp(datetime.now()),
Application=Application(name=HEADER_INFO.APPLICATION_NAME,version=HEADER_INFO.APPLICATION_VERSION)
),
DataDictionary=DataDictionary(numberOfFields=n_columns, DataField=get_data_field_objs()),
TimeSeriesModel=[TimeSeriesModel(
modelName= model_name if model_name else 'simple exponential smoothing',
functionName=function_name, bestFit=best_fit, isScorable=True,
MiningSchema=MiningSchema(MiningField=get_mining_field_objs()),
TimeSeries=[TimeSeries(
usage=TIMESERIES_USAGE.ORIGINAL, startTime=0, endTime=n_samples - 1, interpolationMethod='none',
TimeValue=get_time_value_objs()
)],
ExponentialSmoothing=ExponentialSmoothing(
Level=Level(alpha=alpha, smoothedValue=level_smooth_val),
Trend_ExpoSmooth=trend_obj,
Seasonality_ExpoSmooth=season_obj,
),
# Extension=extension_objs
)]
)
pmml.export(outfile=open(pmml_file_name, "w"), level=0)
|
{"hexsha": "97b8de4846494dc70b7b08b482794398094af344", "size": 9173, "ext": "py", "lang": "Python", "max_stars_repo_path": "nyoka/statsmodels/exponential_smoothing.py", "max_stars_repo_name": "vishalbelsare/nyoka", "max_stars_repo_head_hexsha": "c08e83db2863a963d586b5853b82ef9d8cf799b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 71, "max_stars_repo_stars_event_min_datetime": "2020-08-24T07:59:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T08:36:35.000Z", "max_issues_repo_path": "nyoka/statsmodels/exponential_smoothing.py", "max_issues_repo_name": "vishalbelsare/nyoka", "max_issues_repo_head_hexsha": "c08e83db2863a963d586b5853b82ef9d8cf799b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2020-09-02T10:27:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T05:37:12.000Z", "max_forks_repo_path": "nyoka/statsmodels/exponential_smoothing.py", "max_forks_repo_name": "vishalbelsare/nyoka", "max_forks_repo_head_hexsha": "c08e83db2863a963d586b5853b82ef9d8cf799b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2020-09-17T15:01:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T03:13:25.000Z", "avg_line_length": 46.8010204082, "max_line_length": 125, "alphanum_fraction": 0.6392674152, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 1957}
|
clear all; close all; clc
n=100;
L=20; x=linspace(-L,L,n); y=x;
[X,Y]=meshgrid(x,y);
Xd=[];
for j=1:100
u=tanh(sqrt(X.^2+Y.^2)).*cos(angle(X+i*Y)-(sqrt(X.^2+Y.^2))+j/10);
f=exp(-0.01*(X.^2+Y.^2));
uf=u.*f;
Xd(:,j)=reshape(uf,n^2,1);
pcolor(x,y,uf), shading interp, colormap(hot), caxis([-1 1]), drawnow
end
%%
[U,S,V]=svd(Xd,0);
figure(2)
subplot(4,1,3)
plot(100*diag(S)/sum(diag(S)),'ko','Linewidth',[2])
subplot(4,1,4)
semilogy(100*diag(S)/sum(diag(S)),'ko','Linewidth',[2])
subplot(2,1,1)
plot(V(:,1:4),'Linewidth',[2])
legend('mode1','mode2','mode3','mode4')
set(gca,'Fontsize',[15],'Xtick',[0 20 40 60 80 100])
subplot(4,1,3), set(gca,'Fontsize',[15],'Ylim',[0 60],'Ytick',[0 20 40 60],'Xlim',[0 40],'Xtick',[0 10 20 30 40])
subplot(4,1,4), set(gca,'Fontsize',[15],'Ylim',[10^(-20) 10^2],'Ytick',[10^(-20) 10^(-10) 10^2],'Xlim',[0 40],'Xtick',[0 10 20 30 40])
figure(3)
for j=1:4
subplot(4,4,j)
mode=reshape(U(:,j),n,n);
pcolor(X,Y,mode), shading interp,caxis([-0.03 0.03]), colormap(gray)
axis off
end
%%
figure(11)
u=tanh(sqrt(X.^2+Y.^2)).*cos(angle(X+i*Y)-(sqrt(X.^2+Y.^2)));
f=exp(-0.01*(X.^2+Y.^2));
uf=u.*f;
subplot(3,3,1),pcolor(x,y,uf), shading interp, caxis([-1 1]), axis off
subplot(3,3,2),pcolor(x,y,abs(uf)), shading interp, caxis([-1 1]), axis off
subplot(3,3,3),pcolor(x,y,uf.^5), shading interp, caxis([-1 1]), axis off
colormap(gray)
%% TRANSLATION
figure(5)
n=200; L=20; x=linspace(-L,L,n); y=x; % space
m=41; T=10; t=linspace(0,T,m); % time
c=3; % wave speed
X=[];
for j=1:m
X(:,j)=exp(-(x+15-c*t(j)).^2).'; % data snapshots
end
[U,S,V]=svd(X); % SVD decomposition
%%
figure(6)
subplot(2,2,1)
waterfall(x,t,X.'),colormap([0 0 0])
view(20,75)
set(gca,'Xlim',[-20 20],'Xtick',[-20 -10 0 10 20],'Ylim',[0 10], ...
'Ytick',[0 5 10],'Zlim',[0 1],'Ztick',[0 1],'Fontsize',[12])
[U2,S2,V2]=svd(X);
subplot(4,2,2)
plot(100*diag(S2)/sum(diag(S2)),'ko','Linewidth',[2])
set(gca,'Xlim',[0 40],'Xtick',0:10:40,'Ylim',[0 8],'Ytick',[0 4 8])
subplot(4,2,4)
semilogy(100*diag(S2)/sum(diag(S2)),'ko','Linewidth',[2])
grid on
set(gca,'Xlim',[0 40],'Xtick',0:10:40,'Ylim',[10^(-3) 2*10^1],'Ytick',[10^(-3) 10^(-2) 10^(-1) 10^0 10^1])
figure(8)
subplot(2,1,1)
plot(x,U2(:,1:4),'Linewidth',[2]);
legend('mode1','mode2','mode3','mode4','Location','SouthEast')
set(gca,'Fontsize',[15],'Ylim',[-0.15 0.15],'Ytick',[-0.15 0 0.15])
subplot(2,1,2)
plot(t,V2(:,1:4),'Linewidth',[2])
set(gca,'Fontsize',[15],'Ylim',[-.3 0.3],'Ytick',[-0.3 0 0.3])
|
{"author": "dynamicslab", "repo": "databook_matlab", "sha": "d390d39d18489a4804ee87a143ae8db8a1f3010b", "save_path": "github-repos/MATLAB/dynamicslab-databook_matlab", "path": "github-repos/MATLAB/dynamicslab-databook_matlab/databook_matlab-d390d39d18489a4804ee87a143ae8db8a1f3010b/CH12/old_extra/POD_invariance.m"}
|
[STATEMENT]
lemma all_irr_GIrrRep_repset :
assumes "of_nat (card G) \<noteq> (0::'f::field)"
shows "\<forall>U\<in>(GIrrRep_repset::('f,'g) aezfun set set).
IrrFinGroupRepresentation G (*) U"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>U\<in>GIrrRep_repset. IrrFinGroupRepresentation G (*) U
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>U. U \<in> GIrrRep_repset \<Longrightarrow> IrrFinGroupRepresentation G (*) U
[PROOF STEP]
fix U :: "('f,'g) aezfun set"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>U. U \<in> GIrrRep_repset \<Longrightarrow> IrrFinGroupRepresentation G (*) U
[PROOF STEP]
assume "U \<in> GIrrRep_repset"
[PROOF STATE]
proof (state)
this:
U \<in> GIrrRep_repset
goal (1 subgoal):
1. \<And>U. U \<in> GIrrRep_repset \<Longrightarrow> IrrFinGroupRepresentation G (*) U
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
of_nat (card G) \<noteq> (0::'f)
U \<in> GIrrRep_repset
[PROOF STEP]
show "IrrFinGroupRepresentation G (*) U"
[PROOF STATE]
proof (prove)
using this:
of_nat (card G) \<noteq> (0::'f)
U \<in> GIrrRep_repset
goal (1 subgoal):
1. IrrFinGroupRepresentation G (*) U
[PROOF STEP]
using trivial_IrrFinGroupRepresentation_in_FG GIrrRep_repset_def
set_remisodups FG_constituents_irr
[PROOF STATE]
proof (prove)
using this:
of_nat (card G) \<noteq> (0::'f)
U \<in> GIrrRep_repset
of_nat (card G) \<noteq> (0::?'f) \<Longrightarrow> IrrFinGroupRepresentation G (*) 0
GIrrRep_repset \<equiv> 0 \<union> set (remisodups FG_constituents)
set (remisodups ?Us) \<subseteq> set ?Us
of_nat (card G) \<noteq> (0::?'f) \<Longrightarrow> \<forall>U\<in>set FG_constituents. IrrFinGroupRepresentation G (*) U
goal (1 subgoal):
1. IrrFinGroupRepresentation G (*) U
[PROOF STEP]
by (cases "U = 0") auto
[PROOF STATE]
proof (state)
this:
IrrFinGroupRepresentation G (*) U
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 833, "file": "Rep_Fin_Groups_Rep_Fin_Groups", "length": 8}
|
from pymoo.util.termination.max_eval import MaximumFunctionCallTermination
try:
from scipy.optimize import minimize as scipy_minimize, NonlinearConstraint, LinearConstraint
except:
raise Exception("Please install SciPy: pip install scipy")
import warnings
import numpy as np
from pymoo.algorithms.base.local import LocalSearch
from pymoo.core.individual import Individual
from pymoo.core.population import Population
from pymoo.util.display import SingleObjectiveDisplay
from pymoo.util.termination.max_gen import MaximumGenerationTermination
from pymoo.util.termination.no_termination import NoTermination
# ---------------------------------------------------------------------------------------------------------
# Interface
# ---------------------------------------------------------------------------------------------------------
class Optimizer(LocalSearch):
def __init__(self, method, with_bounds=False, with_constr=False, require_jac=False,
use_bounds=True, use_constr=True, estm_gradients=True, disp=False, show_warnings=False, **kwargs):
super().__init__(display=SingleObjectiveDisplay(), **kwargs)
self.method, self.with_bounds, self.with_constr, self.require_jac = method, with_bounds, with_constr, require_jac
self.show_warnings = show_warnings
self.use_bounds = use_bounds
self.use_constr = use_constr
self.estm_gradients = estm_gradients
self.options = {
'maxiter': int(1e8), # because of C code interfacing this can not be inf
'disp': disp}
def _setup(self, problem, **kwargs):
if isinstance(self.termination, MaximumGenerationTermination):
self.options["maxiter"] = self.termination.n_max_gen
elif isinstance(self.termination, MaximumFunctionCallTermination):
self.options["maxfev"] = self.termination.n_max_evals
self.termination = NoTermination()
self.return_least_infeasible = True
def _advance(self, **kwargs):
problem, evaluator = self.problem, self.evaluator
# add the box constraints defined in the problem
bounds = None
if self.use_bounds:
xl, xu = self.problem.bounds()
if self.with_bounds:
bounds = np.column_stack([xl, xu])
else:
if xl is not None or xu is not None:
raise Exception(f"Error: Boundary constraints can not be handled by {self.method}")
# define the actual constraints if supported by the algorithm
constr = []
if self.use_constr:
constr = [LinearConstraint(np.eye(self.problem.n_var), xl, xu)]
if problem.has_constraints():
if self.with_constr:
def fun_constr(x):
g, cv = problem.evaluate(x, return_values_of=["G", "CV"])
return cv[0]
non_lin_constr = NonlinearConstraint(fun_constr, -float("inf"), 0)
constr.append(non_lin_constr)
else:
raise Exception(f"Error: Constraint handling is not supported by {self.method}")
# the objective function to be optimized and add gradients if available
if self.estm_gradients:
jac = None
def fun_obj(x):
f = problem.evaluate(x, return_values_of=["F"])[0]
evaluator.n_eval += 1
return f
else:
jac = True
def fun_obj(x):
f, df = problem.evaluate(x, return_values_of=["F", "dF"])
if df is None:
raise Exception("If the gradient shall not be estimate, please set out['dF'] in _evaluate. ")
evaluator.n_eval += 1
return f[0], df[0]
# the arguments to be used
kwargs = dict(args=(), method=self.method, bounds=bounds, constraints=constr, jac=jac, options=self.options)
# the starting solution found by sampling beforehand
x0 = self.opt[0].X
# actually run the optimization
if not self.show_warnings:
warnings.simplefilter("ignore")
res = scipy_minimize(fun_obj, x0, **kwargs)
opt = Population.create(Individual(X=res.x))
self.evaluator.eval(self.problem, opt, algorithm=self)
self.pop, self.off = opt, opt
self.termination.force_termination = True
if hasattr("res", "nit"):
self.n_gen = res.nit + 1
# ---------------------------------------------------------------------------------------------------------
# Object Oriented Interface
# ---------------------------------------------------------------------------------------------------------
# +++++++++++++++++++++++++++++++++++++++++
# UNCONSTRAINED
# +++++++++++++++++++++++++++++++++++++++++
class NelderMead(Optimizer):
def __init__(self, **kwargs):
super().__init__("Nelder-Mead", **kwargs)
class CG(Optimizer):
def __init__(self, **kwargs):
super().__init__("CG", require_jac=True, **kwargs)
class NewtonCG(Optimizer):
def __init__(self, **kwargs):
super().__init__("Newton-CG", require_jac=True, **kwargs)
class BFGS(Optimizer):
def __init__(self, **kwargs):
super().__init__("BFGS", **kwargs)
class Powell(Optimizer):
def __init__(self, **kwargs):
super().__init__("Powell", **kwargs)
class Dogleg(Optimizer):
def __init__(self, **kwargs):
super().__init__("dogleg", require_jac=True, **kwargs)
class TrustNCG(Optimizer):
def __init__(self, **kwargs):
super().__init__("trust-ncg", require_jac=True, **kwargs)
class TrustExact(Optimizer):
def __init__(self, **kwargs):
super().__init__("trust-exact", require_jac=True, **kwargs)
class TrustKrylov(Optimizer):
def __init__(self, **kwargs):
super().__init__("trust-krylov", require_jac=True, **kwargs)
# +++++++++++++++++++++++++++++++++++++++++
# BOX CONSTRAINS
# +++++++++++++++++++++++++++++++++++++++++
class LBFGSB(Optimizer):
def __init__(self, **kwargs):
super().__init__("L-BFGS-B", with_bounds=True, **kwargs)
class TNC(Optimizer):
def __init__(self, **kwargs):
super().__init__("TNC", with_bounds=True, **kwargs)
# +++++++++++++++++++++++++++++++++++++++++
# NON-LINEAR CONSTRAINTS
# +++++++++++++++++++++++++++++++++++++++++
class COBYLA(Optimizer):
def __init__(self, **kwargs):
super().__init__("COBYLA", with_bounds=False, with_constr=True, **kwargs)
class SLSQP(Optimizer):
def __init__(self, **kwargs):
super().__init__("SLSQP", with_bounds=True, with_constr=True, **kwargs)
class TrustConstr(Optimizer):
def __init__(self, **kwargs):
super().__init__("trust-constr", with_bounds=True, with_constr=True, **kwargs)
|
{"hexsha": "7c23c8019a70a632abe754a6f5867488f1fc7d55", "size": 6892, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymoo/vendor/vendor_scipy.py", "max_stars_repo_name": "jarreguit/pymoo", "max_stars_repo_head_hexsha": "0496a3c6765826148d8bab21650736760517dd25", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 762, "max_stars_repo_stars_event_min_datetime": "2018-06-05T20:56:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T09:09:42.000Z", "max_issues_repo_path": "pymoo/vendor/vendor_scipy.py", "max_issues_repo_name": "jarreguit/pymoo", "max_issues_repo_head_hexsha": "0496a3c6765826148d8bab21650736760517dd25", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 176, "max_issues_repo_issues_event_min_datetime": "2018-09-05T18:37:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-14T01:18:43.000Z", "max_forks_repo_path": "pymoo/vendor/vendor_scipy.py", "max_forks_repo_name": "jarreguit/pymoo", "max_forks_repo_head_hexsha": "0496a3c6765826148d8bab21650736760517dd25", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 160, "max_forks_repo_forks_event_min_datetime": "2018-08-05T05:31:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-14T09:09:45.000Z", "avg_line_length": 30.2280701754, "max_line_length": 121, "alphanum_fraction": 0.5729831689, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1468}
|
using ClobberingReload
using Base.Test
cp("F1.jl", "F.jl", remove_destination=true)
push!(LOAD_PATH, dirname(Base.source_path()))
@ausing AA
@ausing DD
@ausing BB <: (AA, DD)
@test something == "happy"
@test likes == "happy banana cards"
cp("F2.jl", "F.jl", remove_destination=true)
# ... This is kinda silly, but: we're making sure the time-stamps are different.
# I've had intermittent failures without these three lines. Not sure what the
# proper way of doing it, but it doesn't matter much anyway, it's just a test.
touch("F.jl")
sleep(0.5)
touch("F.jl")
areload()
@test something == "green"
@test likes == "green banana cards"
################################################################################
if VERSION >= v"0.6.0"
using ParametricTypeAlias
creload_strip("ParametricTypeAlias")
@test isa(Int[1,2,3], ParametricTypeAlias.MyVector{Int})
end
|
{"hexsha": "9ed8ca4630b3ddbdb026ec963a44d38c9e01c0f4", "size": 887, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirror/ClobberingReload.jl-0d51577d-51b9-51d5-9c9b-f56d3e616bfa", "max_stars_repo_head_hexsha": "c42b0d5462badae070bae3b22450b3d91b21adde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2016-09-20T22:29:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T21:20:48.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirror/ClobberingReload.jl-0d51577d-51b9-51d5-9c9b-f56d3e616bfa", "max_issues_repo_head_hexsha": "c42b0d5462badae070bae3b22450b3d91b21adde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2016-09-25T17:28:09.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T08:30:46.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirror/ClobberingReload.jl-0d51577d-51b9-51d5-9c9b-f56d3e616bfa", "max_forks_repo_head_hexsha": "c42b0d5462badae070bae3b22450b3d91b21adde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2016-10-02T07:58:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T12:14:36.000Z", "avg_line_length": 22.7435897436, "max_line_length": 80, "alphanum_fraction": 0.6392333709, "num_tokens": 229}
|
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
h : IsMatching M
v w : V
hv : v ∈ M.verts
hvw : Adj M v w
⊢ toEdge h { val := v, property := hv } = { val := Quotient.mk (Sym2.Rel.setoid V) (v, w), property := hvw }
[PROOFSTEP]
simp only [IsMatching.toEdge, Subtype.mk_eq_mk]
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
h : IsMatching M
v w : V
hv : v ∈ M.verts
hvw : Adj M v w
⊢ Quotient.mk (Sym2.Rel.setoid V) (v, Exists.choose (_ : ∃! w, Adj M (↑{ val := v, property := hv }) w)) =
Quotient.mk (Sym2.Rel.setoid V) (v, w)
[PROOFSTEP]
congr
[GOAL]
case e_a.e_snd
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
h : IsMatching M
v w : V
hv : v ∈ M.verts
hvw : Adj M v w
⊢ Exists.choose (_ : ∃! w, Adj M (↑{ val := v, property := hv }) w) = w
[PROOFSTEP]
exact ((h (M.edge_vert hvw)).choose_spec.2 w hvw).symm
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
h : IsMatching M
⊢ Function.Surjective (toEdge h)
[PROOFSTEP]
rintro ⟨e, he⟩
[GOAL]
case mk
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
h : IsMatching M
e : Sym2 V
he : e ∈ edgeSet M
⊢ ∃ a, toEdge h a = { val := e, property := he }
[PROOFSTEP]
refine Sym2.ind (fun x y he => ?_) e he
[GOAL]
case mk
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
h : IsMatching M
e : Sym2 V
he✝ : e ∈ edgeSet M
x y : V
he : Quotient.mk (Sym2.Rel.setoid V) (x, y) ∈ edgeSet M
⊢ ∃ a, toEdge h a = { val := Quotient.mk (Sym2.Rel.setoid V) (x, y), property := he }
[PROOFSTEP]
exact ⟨⟨x, M.edge_vert he⟩, h.toEdge_eq_of_adj _ he⟩
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
v w : V
h : IsMatching M
hv : v ∈ M.verts
hw : w ∈ M.verts
ha : Adj M v w
⊢ toEdge h { val := v, property := hv } = toEdge h { val := w, property := hw }
[PROOFSTEP]
rw [h.toEdge_eq_of_adj hv ha, h.toEdge_eq_of_adj hw (M.symm ha), Subtype.mk_eq_mk, Sym2.eq_swap]
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
h : IsMatching M
⊢ support M = M.verts
[PROOFSTEP]
refine M.support_subset_verts.antisymm fun v hv => ?_
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
h : IsMatching M
v : V
hv : v ∈ M.verts
⊢ v ∈ support M
[PROOFSTEP]
obtain ⟨w, hvw, -⟩ := h hv
[GOAL]
case intro.intro
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
h : IsMatching M
v : V
hv : v ∈ M.verts
w : V
hvw : Adj M v w
⊢ v ∈ support M
[PROOFSTEP]
exact ⟨_, hvw⟩
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : (v : V) → Fintype ↑(neighborSet M v)
⊢ IsMatching M ↔ ∀ (v : V), v ∈ M.verts → degree M v = 1
[PROOFSTEP]
simp only [degree_eq_one_iff_unique_adj, IsMatching]
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h : IsMatching M
⊢ Even (Finset.card (Set.toFinset M.verts))
[PROOFSTEP]
classical
rw [isMatching_iff_forall_degree] at h
use M.coe.edgeFinset.card
rw [← two_mul, ← M.coe.sum_degrees_eq_twice_card_edges]
-- Porting note: `SimpleGraph.Subgraph.coe_degree` does not trigger because it uses
-- instance arguments instead of implicit arguments for the first `Fintype` argument.
-- Using a `convert_to` to swap out the `Fintype` instance to the "right" one.
convert_to _ = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v using 3
simp [h, Finset.card_univ]
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h : IsMatching M
⊢ Even (Finset.card (Set.toFinset M.verts))
[PROOFSTEP]
rw [isMatching_iff_forall_degree] at h
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Even (Finset.card (Set.toFinset M.verts))
[PROOFSTEP]
use M.coe.edgeFinset.card
[GOAL]
case h
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Finset.card (Set.toFinset M.verts) =
Finset.card (edgeFinset (Subgraph.coe M)) + Finset.card (edgeFinset (Subgraph.coe M))
[PROOFSTEP]
rw [← two_mul, ← M.coe.sum_degrees_eq_twice_card_edges]
-- Porting note: `SimpleGraph.Subgraph.coe_degree` does not trigger because it uses
-- instance arguments instead of implicit arguments for the first `Fintype` argument.
-- Using a `convert_to` to swap out the `Fintype` instance to the "right" one.
[GOAL]
case h
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Finset.card (Set.toFinset M.verts) = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v
[PROOFSTEP]
convert_to _ = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v using 3
[GOAL]
case h.convert_2
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype ↑M.verts
h✝ : IsMatching M
h : ∀ (v : V), v ∈ M.verts → degree M v = 1
⊢ Finset.card (Set.toFinset M.verts) = Finset.sum Finset.univ fun v => SimpleGraph.degree (Subgraph.coe M) v
[PROOFSTEP]
simp [h, Finset.card_univ]
[GOAL]
V : Type u
G : SimpleGraph V
M : Subgraph G
⊢ IsPerfectMatching M ↔ ∀ (v : V), ∃! w, Adj M v w
[PROOFSTEP]
refine' ⟨_, fun hm => ⟨fun v _ => hm v, fun v => _⟩⟩
[GOAL]
case refine'_1
V : Type u
G : SimpleGraph V
M : Subgraph G
⊢ IsPerfectMatching M → ∀ (v : V), ∃! w, Adj M v w
[PROOFSTEP]
rintro ⟨hm, hs⟩ v
[GOAL]
case refine'_1.intro
V : Type u
G : SimpleGraph V
M : Subgraph G
hm : IsMatching M
hs : IsSpanning M
v : V
⊢ ∃! w, Adj M v w
[PROOFSTEP]
exact hm (hs v)
[GOAL]
case refine'_2
V : Type u
G : SimpleGraph V
M : Subgraph G
hm : ∀ (v : V), ∃! w, Adj M v w
v : V
⊢ v ∈ M.verts
[PROOFSTEP]
obtain ⟨w, hw, -⟩ := hm v
[GOAL]
case refine'_2.intro.intro
V : Type u
G : SimpleGraph V
M : Subgraph G
hm : ∀ (v : V), ∃! w, Adj M v w
v w : V
hw : Adj M v w
⊢ v ∈ M.verts
[PROOFSTEP]
exact M.edge_vert hw
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : (v : V) → Fintype ↑(neighborSet M v)
⊢ IsPerfectMatching M ↔ ∀ (v : V), degree M v = 1
[PROOFSTEP]
simp [degree_eq_one_iff_unique_adj, isPerfectMatching_iff]
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype V
h : IsPerfectMatching M
⊢ Even (Fintype.card V)
[PROOFSTEP]
classical simpa only [h.2.card_verts] using IsMatching.even_card h.1
[GOAL]
V : Type u
G : SimpleGraph V
M✝ M : Subgraph G
inst✝ : Fintype V
h : IsPerfectMatching M
⊢ Even (Fintype.card V)
[PROOFSTEP]
simpa only [h.2.card_verts] using IsMatching.even_card h.1
|
{"mathlib_filename": "Mathlib.Combinatorics.SimpleGraph.Matching", "llama_tokens": 2928}
|
# coding=utf-8
# Copyright 2019 Gabriele Valvano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def log_safe(x):
"""The same as tf.math.log(x), but clamps the input to prevent NaNs."""
return tf.math.log(tf.minimum(x, tf.cast(3e37, x.dtype)))
def log1p_safe(x):
"""The same as tf.math.log1p(x), but clamps the input to prevent NaNs."""
return tf.math.log1p(tf.minimum(x, tf.cast(3e37, x.dtype)))
def exp_safe(x):
"""The same as tf.math.exp(x), but clamps the input to prevent NaNs."""
return tf.math.exp(tf.minimum(x, tf.cast(87.5, x.dtype)))
def expm1_safe(x):
"""The same as tf.math.expm1(x), but clamps the input to prevent NaNs."""
return tf.math.expm1(tf.minimum(x, tf.cast(87.5, x.dtype)))
def inv_softplus(y):
"""The inverse of tf.nn.softplus()."""
return tf.where(y > 87.5, y, tf.math.log(tf.math.expm1(y)))
def logit(y):
"""The inverse of tf.nn.sigmoid()."""
return -tf.math.log(1. / y - 1.)
def affine_sigmoid(real, lo=0, hi=1):
"""Maps reals to (lo, hi), where 0 maps to (lo+hi)/2."""
if not lo < hi:
raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))
alpha = tf.sigmoid(real) * (hi - lo) + lo
return alpha
def inv_affine_sigmoid(alpha, lo=0, hi=1):
"""The inverse of affine_sigmoid(., lo, hi)."""
if not lo < hi:
raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))
real = logit((alpha - lo) / (hi - lo))
return real
def affine_softplus(real, lo=0, ref=1):
"""Maps real numbers to (lo, infinity), where 0 maps to ref."""
if not lo < ref:
raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))
shift = inv_softplus(tf.cast(1., real.dtype))
scale = (ref - lo) * tf.nn.softplus(real + shift) + lo
return scale
def inv_affine_softplus(scale, lo=0, ref=1):
"""The inverse of affine_softplus(., lo, ref)."""
if not lo < ref:
raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))
shift = inv_softplus(tf.cast(1., scale.dtype))
real = inv_softplus((scale - lo) / (ref - lo)) - shift
return real
def students_t_nll(x, df, scale):
"""The NLL of a Generalized Student's T distribution (w/o including TFP)."""
return 0.5 * ((df + 1.) * tf.math.log1p(
(x / scale) ** 2. / df) + tf.math.log(df)) + tf.math.log(
tf.abs(scale)) + tf.math.lgamma(
0.5 * df) - tf.math.lgamma(0.5 * df + 0.5) + 0.5 * np.log(np.pi)
# A constant scale that makes tf.image.rgb_to_yuv() volume preserving.
_VOLUME_PRESERVING_YUV_SCALE = 1.580227820074
def rgb_to_syuv(rgb):
"""A volume preserving version of tf.image.rgb_to_yuv().
By "volume preserving" we mean that rgb_to_syuv() is in the "special linear
group", or equivalently, that the Jacobian determinant of the transformation
is 1.
Args:
rgb: A tensor whose last dimension corresponds to RGB channels and is of
size 3.
Returns:
A scaled YUV version of the input tensor, such that this transformation is
volume-preserving.
"""
return _VOLUME_PRESERVING_YUV_SCALE * tf.image.rgb_to_yuv(rgb)
def syuv_to_rgb(yuv):
"""A volume preserving version of tf.image.yuv_to_rgb().
By "volume preserving" we mean that rgb_to_syuv() is in the "special linear
group", or equivalently, that the Jacobian determinant of the transformation
is 1.
Args:
yuv: A tensor whose last dimension corresponds to scaled YUV channels and is
of size 3 (ie, the output of rgb_to_syuv()).
Returns:
An RGB version of the input tensor, such that this transformation is
volume-preserving.
"""
return tf.image.yuv_to_rgb(yuv / _VOLUME_PRESERVING_YUV_SCALE)
def image_dct(image):
"""Does a type-II DCT (aka "The DCT") on axes 1 and 2 of a rank-3 tensor."""
dct_y = tf.transpose(tf.spectral.dct(image, type=2, norm='ortho'), [0, 2, 1])
dct_x = tf.transpose(tf.spectral.dct(dct_y, type=2, norm='ortho'), [0, 2, 1])
return dct_x
def image_idct(dct_x):
"""Inverts image_dct(), by performing a type-III DCT."""
dct_y = tf.spectral.idct(tf.transpose(dct_x, [0, 2, 1]), type=2, norm='ortho')
image = tf.spectral.idct(tf.transpose(dct_y, [0, 2, 1]), type=2, norm='ortho')
return image
def compute_jacobian(f, x):
"""Computes the Jacobian of function `f` with respect to input `x`."""
x_ph = tf.placeholder(tf.float32, x.shape)
vec = lambda x: tf.reshape(x, [-1])
jacobian = tf.stack(
[vec(tf.gradients(vec(f(x_ph))[d], x_ph)[0]) for d in range(x.size)], 1)
with tf.Session() as sess:
jacobian = sess.run(jacobian, {x_ph: x})
return jacobian
def get_resource_as_file(path):
"""A uniform interface for internal/open-source files."""
class NullContextManager(object):
def __init__(self, dummy_resource=None):
self.dummy_resource = dummy_resource
def __enter__(self):
return self.dummy_resource
def __exit__(self, *args):
pass
return NullContextManager('./' + path)
def get_resource_filename(path):
"""A uniform interface for internal/open-source filenames."""
return './' + path
|
{"hexsha": "863130f8ae9986927406cce911625034bdce3e88", "size": 6391, "ext": "py", "lang": "Python", "max_stars_repo_path": "losses/general_adaptive_loss/utils.py", "max_stars_repo_name": "gvalvano/idas", "max_stars_repo_head_hexsha": "e1b112c8d0cd17b2b8486435dfe9de477bca2221", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-07-04T00:04:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T01:49:34.000Z", "max_issues_repo_path": "idas/losses/general_adaptive_loss/utils.py", "max_issues_repo_name": "gvalvano/unet_crf_as_rnn", "max_issues_repo_head_hexsha": "31b79741b77614764dcf3d2690fe0b0fab44934d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-31T14:41:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-21T18:16:19.000Z", "max_forks_repo_path": "losses/general_adaptive_loss/utils.py", "max_forks_repo_name": "gvalvano/idas", "max_forks_repo_head_hexsha": "e1b112c8d0cd17b2b8486435dfe9de477bca2221", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-10-21T01:02:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-14T16:52:18.000Z", "avg_line_length": 32.7743589744, "max_line_length": 82, "alphanum_fraction": 0.6623376623, "include": true, "reason": "import numpy", "num_tokens": 1797}
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
The GymEnvironment base class.
"""
import gym
from gym import spaces
import numpy as np
from mindspore.ops import operations as P
from mindspore_rl.environment.environment import Environment
from mindspore_rl.environment.space import Space
class GymEnvironment(Environment):
"""
The GymEnvironment class provides the functions to interact with
different environments.
Args:
params (dict): A dictionary contains all the parameters which are used to create the
instance of GymEnvironment, such as name of environment.
env_id (int): A integer which is used to set the seed of this environment.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> env_params = {'name': 'CartPole-v0'}
>>> environment = GymEnvironment(env_params, 0)
>>> print(environment)
GymEnvironment<>
"""
def __init__(self, params, env_id=0):
super(GymEnvironment, self).__init__()
self.params = params
self._name = params['name']
self._env = gym.make(self._name)
if 'seed' in params:
self._env.seed(params['seed'] + env_id * 1000)
self._observation_space = self._space_adapter(self._env.observation_space)
self._action_space = self._space_adapter(self._env.action_space)
self._reward_space = Space((1,), np.float32)
self._done_space = Space((1,), np.bool_, low=0, high=2)
# reset op
reset_input_type = []
reset_input_shape = []
reset_output_type = [self._observation_space.ms_dtype,]
reset_output_shape = [self._observation_space.shape,]
self._reset_op = P.PyFunc(self._reset, reset_input_type,
reset_input_shape, reset_output_type, reset_output_shape)
# step op
step_input_type = (self._action_space.ms_dtype,)
step_input_shape = (self._action_space.shape,)
step_output_type = (self.observation_space.ms_dtype, self._reward_space.ms_dtype, self._done_space.ms_dtype)
step_output_shape = (self._observation_space.shape, self._reward_space.shape, self._done_space.shape)
self._step_op = P.PyFunc(
self._step, step_input_type, step_input_shape, step_output_type, step_output_shape)
self.action_dtype = self._action_space.ms_dtype
self.cast = P.Cast()
def reset(self):
"""
Reset the environment to the initial state. It is always used at the beginning of each
episode. It will return the value of initial state.
Returns:
A tensor which states for the initial state of environment.
"""
return self._reset_op()[0]
def step(self, action):
r"""
Execute the environment step, which means that interact with environment once.
Args:
action (Tensor): A tensor that contains the action information.
Returns:
- state (Tensor), the environment state after performing the action.
- reward (Tensor), the reward after performing the action.
- done (mindspore.bool\_), whether the simulation finishes or not.
"""
# Add cast ops for mixed precision case. Redundant cast ops will be eliminated automatically.
action = self.cast(action, self.action_dtype)
return self._step_op(action)
@property
def observation_space(self):
"""
Get the state space of the environment.
Returns:
A tuple which states for the space of state
"""
return self._observation_space
@property
def action_space(self):
"""
Get the action space of the environment.
Returns:
A tuple which states for the space of action
"""
return self._action_space
@property
def reward_space(self):
return self._reward_space
@property
def done_space(self):
return self._done_space
@property
def config(self):
return {}
def _reset(self):
"""
The python(can not be interpreted by mindspore interpreter) code of resetting the
environment. It is the main body of reset function. Due to Pyfunc, we need to
capsule python code into a function.
Returns:
A numpy array which states for the initial state of environment.
"""
s0 = self._env.reset()
# In some gym version, the obvervation space is announced to be float32, but get float64 from reset and step.
s0 = s0.astype(self.observation_space.np_dtype)
return s0
def _step(self, action):
"""
The python(can not be interpreted by mindspore interpreter) code of interacting with the
environment. It is the main body of step function. Due to Pyfunc, we need to
capsule python code into a function.
Args:
action(int or float): The action which is calculated by policy net. It could be integer
or float, according to different environment
Returns:
- s1 (numpy.array), the environment state after performing the action.
- r1 (numpy.array), the reward after performing the action.
- done (boolean), whether the simulation finishes or not.
"""
s, r, done, _ = self._env.step(action)
# In some gym version, the obvervation space is announced to be float32, but get float64 from reset and step.
s = s.astype(self.observation_space.np_dtype)
r = np.array([r]).astype(np.float32)
done = np.array([done])
return s, r, done
def _space_adapter(self, gym_space):
"""Transfer gym dtype to the dtype that is suitable for MindSpore"""
shape = gym_space.shape
gym_type = gym_space.dtype.type
# The dtype get from gym.space is np.int64, but step() accept np.int32 actually.
if gym_type == np.int64:
dtype = np.int32
# The float64 is not supported, cast to float32
elif gym_type == np.float64:
dtype = np.float32
else:
dtype = gym_type
if isinstance(gym_space, spaces.Discrete):
return Space(shape, dtype, low=0, high=gym_space.n)
return Space(shape, dtype, low=gym_space.low, high=gym_space.high)
|
{"hexsha": "f4778468f3cbe18cb715785c3066204c533469c2", "size": 7025, "ext": "py", "lang": "Python", "max_stars_repo_path": "mindspore_rl/environment/gym_environment.py", "max_stars_repo_name": "mindspore-ai/reinforcement", "max_stars_repo_head_hexsha": "6a1c02f2f8dec0773dcd82ddb660856c6582cffb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-10T01:10:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T01:29:14.000Z", "max_issues_repo_path": "mindspore_rl/environment/gym_environment.py", "max_issues_repo_name": "mindspore-ai/reinforcement", "max_issues_repo_head_hexsha": "6a1c02f2f8dec0773dcd82ddb660856c6582cffb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mindspore_rl/environment/gym_environment.py", "max_forks_repo_name": "mindspore-ai/reinforcement", "max_forks_repo_head_hexsha": "6a1c02f2f8dec0773dcd82ddb660856c6582cffb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0256410256, "max_line_length": 117, "alphanum_fraction": 0.6439857651, "include": true, "reason": "import numpy", "num_tokens": 1512}
|
[STATEMENT]
lemma ta_nf_lang_complete:
assumes linear: "\<forall> l |\<in>| R. linear_term l"
and ground: "ground (t :: ('f, 'v) term)" and sig: "funas_term t \<subseteq> fset \<F>"
and nf: "\<And>C \<sigma> l. l |\<in>| R \<Longrightarrow> C\<langle>l\<cdot>\<sigma>\<rangle> \<noteq> t"
shows "t \<in> ta_lang (fstates R) (nf_ta R \<F>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t \<in> ta_lang (fstates R) (nf_ta R \<F>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. t \<in> ta_lang (fstates R) (nf_ta R \<F>)
[PROOF STEP]
from nf
[PROOF STATE]
proof (chain)
picking this:
?l |\<in>| R \<Longrightarrow> ?C\<langle>?l \<cdot> ?\<sigma>\<rangle> \<noteq> t
[PROOF STEP]
have "\<And> l s. l |\<in>| R \<Longrightarrow> t \<unrhd> s \<Longrightarrow> \<not> l\<^sup>\<bottom> \<le>\<^sub>b s\<^sup>\<bottom>"
[PROOF STATE]
proof (prove)
using this:
?l |\<in>| R \<Longrightarrow> ?C\<langle>?l \<cdot> ?\<sigma>\<rangle> \<noteq> t
goal (1 subgoal):
1. \<And>l s. \<lbrakk>l |\<in>| R; t \<unrhd> s\<rbrakk> \<Longrightarrow> (l\<^sup>\<bottom>, s\<^sup>\<bottom>) \<notin> {\<le>\<^sub>b}
[PROOF STEP]
using bless_eq_to_instance linear
[PROOF STATE]
proof (prove)
using this:
?l |\<in>| R \<Longrightarrow> ?C\<langle>?l \<cdot> ?\<sigma>\<rangle> \<noteq> t
\<lbrakk>?s\<^sup>\<bottom> \<le>\<^sub>b ?t\<^sup>\<bottom>; linear_term ?s\<rbrakk> \<Longrightarrow> \<exists>\<sigma>. ?s \<cdot> \<sigma> = ?t
\<forall>l|\<in>|R. linear_term l
goal (1 subgoal):
1. \<And>l s. \<lbrakk>l |\<in>| R; t \<unrhd> s\<rbrakk> \<Longrightarrow> (l\<^sup>\<bottom>, s\<^sup>\<bottom>) \<notin> {\<le>\<^sub>b}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<lbrakk>?l |\<in>| R; t \<unrhd> ?s\<rbrakk> \<Longrightarrow> (?l\<^sup>\<bottom>, ?s\<^sup>\<bottom>) \<notin> {\<le>\<^sub>b}
goal (1 subgoal):
1. t \<in> ta_lang (fstates R) (nf_ta R \<F>)
[PROOF STEP]
from ta_nf_sound2[OF linear ground sig] this
[PROOF STATE]
proof (chain)
picking this:
(\<And>l s. \<lbrakk>l |\<in>| R; t \<unrhd> s\<rbrakk> \<Longrightarrow> (l\<^sup>\<bottom>, s\<^sup>\<bottom>) \<notin> {\<le>\<^sub>b}) \<Longrightarrow> \<exists>q. q |\<in>| ta_der (nf_ta R \<F>) (adapt_vars t)
\<lbrakk>?l |\<in>| R; t \<unrhd> ?s\<rbrakk> \<Longrightarrow> (?l\<^sup>\<bottom>, ?s\<^sup>\<bottom>) \<notin> {\<le>\<^sub>b}
[PROOF STEP]
obtain q where "q |\<in>| ta_der (nf_ta R \<F>) (adapt_vars t)"
[PROOF STATE]
proof (prove)
using this:
(\<And>l s. \<lbrakk>l |\<in>| R; t \<unrhd> s\<rbrakk> \<Longrightarrow> (l\<^sup>\<bottom>, s\<^sup>\<bottom>) \<notin> {\<le>\<^sub>b}) \<Longrightarrow> \<exists>q. q |\<in>| ta_der (nf_ta R \<F>) (adapt_vars t)
\<lbrakk>?l |\<in>| R; t \<unrhd> ?s\<rbrakk> \<Longrightarrow> (?l\<^sup>\<bottom>, ?s\<^sup>\<bottom>) \<notin> {\<le>\<^sub>b}
goal (1 subgoal):
1. (\<And>q. q |\<in>| ta_der (nf_ta R \<F>) (adapt_vars t) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
q |\<in>| ta_der (nf_ta R \<F>) (adapt_vars t)
goal (1 subgoal):
1. t \<in> ta_lang (fstates R) (nf_ta R \<F>)
[PROOF STEP]
from this ta_nf_tr_to_state[OF ground this] ground
[PROOF STATE]
proof (chain)
picking this:
q |\<in>| ta_der (nf_ta R \<F>) (adapt_vars t)
q |\<in>| fstates R
ground t
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
q |\<in>| ta_der (nf_ta R \<F>) (adapt_vars t)
q |\<in>| fstates R
ground t
goal (1 subgoal):
1. t \<in> ta_lang (fstates R) (nf_ta R \<F>)
[PROOF STEP]
by (intro ta_langI) (auto simp add: nf_ta_def)
[PROOF STATE]
proof (state)
this:
t \<in> ta_lang (fstates R) (nf_ta R \<F>)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1609, "file": "FO_Theory_Rewriting_Primitives_NF", "length": 12}
|
#==============================================================================#
# ApplicationAutoScaling.jl
#
# This file is generated from:
# https://github.com/aws/aws-sdk-js/blob/master/apis/application-autoscaling-2016-02-06.normal.json
#==============================================================================#
__precompile__()
module ApplicationAutoScaling
using AWSCore
"""
using AWSSDK.ApplicationAutoScaling.delete_scaling_policy
delete_scaling_policy([::AWSConfig], arguments::Dict)
delete_scaling_policy([::AWSConfig]; PolicyName=, ServiceNamespace=, ResourceId=, ScalableDimension=)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "DeleteScalingPolicy", arguments::Dict)
application_autoscaling([::AWSConfig], "DeleteScalingPolicy", PolicyName=, ServiceNamespace=, ResourceId=, ScalableDimension=)
# DeleteScalingPolicy Operation
Deletes the specified Application Auto Scaling scaling policy.
Deleting a policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.
To create a scaling policy or update an existing one, see [PutScalingPolicy](@ref).
# Arguments
## `PolicyName = ::String` -- *Required*
The name of the scaling policy.
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `ResourceId = ::String` -- *Required*
The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"` -- *Required*
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
# Returns
`DeleteScalingPolicyResponse`
# Exceptions
`ValidationException`, `ObjectNotFoundException`, `ConcurrentUpdateException` or `InternalServiceException`.
# Example: To delete a scaling policy
This example deletes a scaling policy for the Amazon ECS service called web-app, which is running in the default cluster.
Input:
```
[
"PolicyName" => "web-app-cpu-lt-25",
"ResourceId" => "service/default/web-app",
"ScalableDimension" => "ecs:service:DesiredCount",
"ServiceNamespace" => "ecs"
]
```
Output:
```
Dict(
)
```
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DeleteScalingPolicy)
"""
@inline delete_scaling_policy(aws::AWSConfig=default_aws_config(); args...) = delete_scaling_policy(aws, args)
@inline delete_scaling_policy(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "DeleteScalingPolicy", args)
@inline delete_scaling_policy(args) = delete_scaling_policy(default_aws_config(), args)
"""
using AWSSDK.ApplicationAutoScaling.delete_scheduled_action
delete_scheduled_action([::AWSConfig], arguments::Dict)
delete_scheduled_action([::AWSConfig]; ServiceNamespace=, ScheduledActionName=, ResourceId=, <keyword arguments>)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "DeleteScheduledAction", arguments::Dict)
application_autoscaling([::AWSConfig], "DeleteScheduledAction", ServiceNamespace=, ScheduledActionName=, ResourceId=, <keyword arguments>)
# DeleteScheduledAction Operation
Deletes the specified Application Auto Scaling scheduled action.
# Arguments
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `ScheduledActionName = ::String` -- *Required*
The name of the scheduled action.
## `ResourceId = ::String` -- *Required*
The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"`
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
# Returns
`DeleteScheduledActionResponse`
# Exceptions
`ValidationException`, `ObjectNotFoundException`, `ConcurrentUpdateException` or `InternalServiceException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DeleteScheduledAction)
"""
@inline delete_scheduled_action(aws::AWSConfig=default_aws_config(); args...) = delete_scheduled_action(aws, args)
@inline delete_scheduled_action(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "DeleteScheduledAction", args)
@inline delete_scheduled_action(args) = delete_scheduled_action(default_aws_config(), args)
"""
using AWSSDK.ApplicationAutoScaling.deregister_scalable_target
deregister_scalable_target([::AWSConfig], arguments::Dict)
deregister_scalable_target([::AWSConfig]; ServiceNamespace=, ResourceId=, ScalableDimension=)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "DeregisterScalableTarget", arguments::Dict)
application_autoscaling([::AWSConfig], "DeregisterScalableTarget", ServiceNamespace=, ResourceId=, ScalableDimension=)
# DeregisterScalableTarget Operation
Deregisters a scalable target.
Deregistering a scalable target deletes the scaling policies that are associated with it.
To create a scalable target or update an existing one, see [RegisterScalableTarget](@ref).
# Arguments
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `ResourceId = ::String` -- *Required*
The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"` -- *Required*
The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
# Returns
`DeregisterScalableTargetResponse`
# Exceptions
`ValidationException`, `ObjectNotFoundException`, `ConcurrentUpdateException` or `InternalServiceException`.
# Example: To deregister a scalable target
This example deregisters a scalable target for an Amazon ECS service called web-app that is running in the default cluster.
Input:
```
[
"ResourceId" => "service/default/web-app",
"ScalableDimension" => "ecs:service:DesiredCount",
"ServiceNamespace" => "ecs"
]
```
Output:
```
Dict(
)
```
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DeregisterScalableTarget)
"""
@inline deregister_scalable_target(aws::AWSConfig=default_aws_config(); args...) = deregister_scalable_target(aws, args)
@inline deregister_scalable_target(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "DeregisterScalableTarget", args)
@inline deregister_scalable_target(args) = deregister_scalable_target(default_aws_config(), args)
"""
using AWSSDK.ApplicationAutoScaling.describe_scalable_targets
describe_scalable_targets([::AWSConfig], arguments::Dict)
describe_scalable_targets([::AWSConfig]; ServiceNamespace=, <keyword arguments>)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "DescribeScalableTargets", arguments::Dict)
application_autoscaling([::AWSConfig], "DescribeScalableTargets", ServiceNamespace=, <keyword arguments>)
# DescribeScalableTargets Operation
Gets information about the scalable targets in the specified namespace.
You can filter the results using the `ResourceIds` and `ScalableDimension` parameters.
To create a scalable target or update an existing one, see [RegisterScalableTarget](@ref). If you are no longer using a scalable target, you can deregister it using [DeregisterScalableTarget](@ref).
# Arguments
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `ResourceIds = [::String, ...]`
The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"`
The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
## `MaxResults = ::Int`
The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50.
If this parameter is used, the operation returns up to `MaxResults` results at a time, along with a `NextToken` value. To get the next set of results, include the `NextToken` value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a `NextToken` value, if applicable.
## `NextToken = ::String`
The token for the next set of results.
# Returns
`DescribeScalableTargetsResponse`
# Exceptions
`ValidationException`, `InvalidNextTokenException`, `ConcurrentUpdateException` or `InternalServiceException`.
# Example: To describe scalable targets
This example describes the scalable targets for the ecs service namespace.
Input:
```
[
"ServiceNamespace" => "ecs"
]
```
Output:
```
Dict(
"ScalableTargets" => [
Dict(
"CreationTime" => "2016-05-06T11:21:46.199Z",
"MaxCapacity" => 10,
"MinCapacity" => 1,
"ResourceId" => "service/default/web-app",
"RoleARN" => "arn:aws:iam::012345678910:role/ApplicationAutoscalingECSRole",
"ScalableDimension" => "ecs:service:DesiredCount",
"ServiceNamespace" => "ecs"
)
]
)
```
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalableTargets)
"""
@inline describe_scalable_targets(aws::AWSConfig=default_aws_config(); args...) = describe_scalable_targets(aws, args)
@inline describe_scalable_targets(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "DescribeScalableTargets", args)
@inline describe_scalable_targets(args) = describe_scalable_targets(default_aws_config(), args)
"""
using AWSSDK.ApplicationAutoScaling.describe_scaling_activities
describe_scaling_activities([::AWSConfig], arguments::Dict)
describe_scaling_activities([::AWSConfig]; ServiceNamespace=, <keyword arguments>)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "DescribeScalingActivities", arguments::Dict)
application_autoscaling([::AWSConfig], "DescribeScalingActivities", ServiceNamespace=, <keyword arguments>)
# DescribeScalingActivities Operation
Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.
You can filter the results using the `ResourceId` and `ScalableDimension` parameters.
Scaling activities are triggered by CloudWatch alarms that are associated with scaling policies. To view the scaling policies for a service namespace, see [DescribeScalingPolicies](@ref). To create a scaling policy or update an existing one, see [PutScalingPolicy](@ref).
# Arguments
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `ResourceId = ::String`
The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"`
The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
## `MaxResults = ::Int`
The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50.
If this parameter is used, the operation returns up to `MaxResults` results at a time, along with a `NextToken` value. To get the next set of results, include the `NextToken` value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a `NextToken` value, if applicable.
## `NextToken = ::String`
The token for the next set of results.
# Returns
`DescribeScalingActivitiesResponse`
# Exceptions
`ValidationException`, `InvalidNextTokenException`, `ConcurrentUpdateException` or `InternalServiceException`.
# Example: To describe scaling activities for a scalable target
This example describes the scaling activities for an Amazon ECS service called web-app that is running in the default cluster.
Input:
```
[
"ResourceId" => "service/default/web-app",
"ScalableDimension" => "ecs:service:DesiredCount",
"ServiceNamespace" => "ecs"
]
```
Output:
```
Dict(
"ScalingActivities" => [
Dict(
"ActivityId" => "e6c5f7d1-dbbb-4a3f-89b2-51f33e766399",
"Cause" => "monitor alarm web-app-cpu-lt-25 in state ALARM triggered policy web-app-cpu-lt-25",
"Description" => "Setting desired count to 1.",
"EndTime" => "2016-05-06T16:04:32.111Z",
"ResourceId" => "service/default/web-app",
"ScalableDimension" => "ecs:service:DesiredCount",
"ServiceNamespace" => "ecs",
"StartTime" => "2016-05-06T16:03:58.171Z",
"StatusCode" => "Successful",
"StatusMessage" => "Successfully set desired count to 1. Change successfully fulfilled by ecs."
)
]
)
```
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalingActivities)
"""
@inline describe_scaling_activities(aws::AWSConfig=default_aws_config(); args...) = describe_scaling_activities(aws, args)
@inline describe_scaling_activities(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "DescribeScalingActivities", args)
@inline describe_scaling_activities(args) = describe_scaling_activities(default_aws_config(), args)
"""
using AWSSDK.ApplicationAutoScaling.describe_scaling_policies
describe_scaling_policies([::AWSConfig], arguments::Dict)
describe_scaling_policies([::AWSConfig]; ServiceNamespace=, <keyword arguments>)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "DescribeScalingPolicies", arguments::Dict)
application_autoscaling([::AWSConfig], "DescribeScalingPolicies", ServiceNamespace=, <keyword arguments>)
# DescribeScalingPolicies Operation
Describes the scaling policies for the specified service namespace.
You can filter the results using the `ResourceId`, `ScalableDimension`, and `PolicyNames` parameters.
To create a scaling policy or update an existing one, see [PutScalingPolicy](@ref). If you are no longer using a scaling policy, you can delete it using [DeleteScalingPolicy](@ref).
# Arguments
## `PolicyNames = [::String, ...]`
The names of the scaling policies to describe.
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `ResourceId = ::String`
The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"`
The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
## `MaxResults = ::Int`
The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50.
If this parameter is used, the operation returns up to `MaxResults` results at a time, along with a `NextToken` value. To get the next set of results, include the `NextToken` value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a `NextToken` value, if applicable.
## `NextToken = ::String`
The token for the next set of results.
# Returns
`DescribeScalingPoliciesResponse`
# Exceptions
`ValidationException`, `FailedResourceAccessException`, `InvalidNextTokenException`, `ConcurrentUpdateException` or `InternalServiceException`.
# Example: To describe scaling policies
This example describes the scaling policies for the ecs service namespace.
Input:
```
[
"ServiceNamespace" => "ecs"
]
```
Output:
```
Dict(
"NextToken" => "",
"ScalingPolicies" => [
Dict(
"Alarms" => [
Dict(
"AlarmARN" => "arn:aws:cloudwatch:us-west-2:012345678910:alarm:web-app-cpu-gt-75",
"AlarmName" => "web-app-cpu-gt-75"
)
],
"CreationTime" => "2016-05-06T12:11:39.230Z",
"PolicyARN" => "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy:6d8972f3-efc8-437c-92d1-6270f29a66e7:resource/ecs/service/default/web-app:policyName/web-app-cpu-gt-75",
"PolicyName" => "web-app-cpu-gt-75",
"PolicyType" => "StepScaling",
"ResourceId" => "service/default/web-app",
"ScalableDimension" => "ecs:service:DesiredCount",
"ServiceNamespace" => "ecs",
"StepScalingPolicyConfiguration" => Dict(
"AdjustmentType" => "PercentChangeInCapacity",
"Cooldown" => 60,
"StepAdjustments" => [
Dict(
"MetricIntervalLowerBound" => 0,
"ScalingAdjustment" => 200
)
]
)
)
]
)
```
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalingPolicies)
"""
@inline describe_scaling_policies(aws::AWSConfig=default_aws_config(); args...) = describe_scaling_policies(aws, args)
@inline describe_scaling_policies(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "DescribeScalingPolicies", args)
@inline describe_scaling_policies(args) = describe_scaling_policies(default_aws_config(), args)
"""
using AWSSDK.ApplicationAutoScaling.describe_scheduled_actions
describe_scheduled_actions([::AWSConfig], arguments::Dict)
describe_scheduled_actions([::AWSConfig]; ServiceNamespace=, <keyword arguments>)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "DescribeScheduledActions", arguments::Dict)
application_autoscaling([::AWSConfig], "DescribeScheduledActions", ServiceNamespace=, <keyword arguments>)
# DescribeScheduledActions Operation
Describes the scheduled actions for the specified service namespace.
You can filter the results using the `ResourceId`, `ScalableDimension`, and `ScheduledActionNames` parameters.
To create a scheduled action or update an existing one, see [PutScheduledAction](@ref). If you are no longer using a scheduled action, you can delete it using [DeleteScheduledAction](@ref).
# Arguments
## `ScheduledActionNames = [::String, ...]`
The names of the scheduled actions to describe.
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `ResourceId = ::String`
The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"`
The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
## `MaxResults = ::Int`
The maximum number of scheduled action results. This value can be between 1 and 50. The default value is 50.
If this parameter is used, the operation returns up to `MaxResults` results at a time, along with a `NextToken` value. To get the next set of results, include the `NextToken` value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a `NextToken` value, if applicable.
## `NextToken = ::String`
The token for the next set of results.
# Returns
`DescribeScheduledActionsResponse`
# Exceptions
`ValidationException`, `InvalidNextTokenException`, `ConcurrentUpdateException` or `InternalServiceException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScheduledActions)
"""
@inline describe_scheduled_actions(aws::AWSConfig=default_aws_config(); args...) = describe_scheduled_actions(aws, args)
@inline describe_scheduled_actions(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "DescribeScheduledActions", args)
@inline describe_scheduled_actions(args) = describe_scheduled_actions(default_aws_config(), args)
"""
using AWSSDK.ApplicationAutoScaling.put_scaling_policy
put_scaling_policy([::AWSConfig], arguments::Dict)
put_scaling_policy([::AWSConfig]; PolicyName=, ServiceNamespace=, ResourceId=, ScalableDimension=, <keyword arguments>)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "PutScalingPolicy", arguments::Dict)
application_autoscaling([::AWSConfig], "PutScalingPolicy", PolicyName=, ServiceNamespace=, ResourceId=, ScalableDimension=, <keyword arguments>)
# PutScalingPolicy Operation
Creates or updates a policy for an Application Auto Scaling scalable target.
Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you register the scalable target using [RegisterScalableTarget](@ref).
To update a policy, specify its policy name and the parameters that you want to change. Any parameters that you don't specify are not changed by this update request.
You can view the scaling policies for a service namespace using [DescribeScalingPolicies](@ref). If you are no longer using a scaling policy, you can delete it using [DeleteScalingPolicy](@ref).
# Arguments
## `PolicyName = ::String` -- *Required*
The name of the scaling policy.
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `ResourceId = ::String` -- *Required*
The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"` -- *Required*
The scalable dimension. This string consists of the service namespace, resource type, and scaling property.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
## `PolicyType = "StepScaling" or "TargetTrackingScaling"`
The policy type. This parameter is required if you are creating a policy.
For DynamoDB, only `TargetTrackingScaling` is supported. For Amazon ECS, Spot Fleet, and Amazon RDS, both `StepScaling` and `TargetTrackingScaling` are supported. For any other service, only `StepScaling` is supported.
## `StepScalingPolicyConfiguration = [ ... ]`
A step scaling policy.
This parameter is required if you are creating a policy and the policy type is `StepScaling`.
```
StepScalingPolicyConfiguration = [
"AdjustmentType" => "ChangeInCapacity", "PercentChangeInCapacity" or "ExactCapacity",
"StepAdjustments" => [[
"MetricIntervalLowerBound" => double,
"MetricIntervalUpperBound" => double,
"ScalingAdjustment" => <required> ::Int
], ...],
"MinAdjustmentMagnitude" => ::Int,
"Cooldown" => ::Int,
"MetricAggregationType" => "Average", "Minimum" or "Maximum"
]
```
## `TargetTrackingScalingPolicyConfiguration = [ ... ]`
A target tracking policy.
This parameter is required if you are creating a policy and the policy type is `TargetTrackingScaling`.
```
TargetTrackingScalingPolicyConfiguration = [
"TargetValue" => <required> double,
"PredefinedMetricSpecification" => [
"PredefinedMetricType" => <required> "DynamoDBReadCapacityUtilization", "DynamoDBWriteCapacityUtilization", "ALBRequestCountPerTarget", "RDSReaderAverageCPUUtilization", "RDSReaderAverageDatabaseConnections", "EC2SpotFleetRequestAverageCPUUtilization", "EC2SpotFleetRequestAverageNetworkIn", "EC2SpotFleetRequestAverageNetworkOut", "SageMakerVariantInvocationsPerInstance", "ECSServiceAverageCPUUtilization" or "ECSServiceAverageMemoryUtilization",
"ResourceLabel" => ::String
],
"CustomizedMetricSpecification" => [
"MetricName" => <required> ::String,
"Namespace" => <required> ::String,
"Dimensions" => [[
"Name" => <required> ::String,
"Value" => <required> ::String
], ...],
"Statistic" => <required> "Average", "Minimum", "Maximum", "SampleCount" or "Sum",
"Unit" => ::String
],
"ScaleOutCooldown" => ::Int,
"ScaleInCooldown" => ::Int,
"DisableScaleIn" => ::Bool
]
```
# Returns
`PutScalingPolicyResponse`
# Exceptions
`ValidationException`, `LimitExceededException`, `ObjectNotFoundException`, `ConcurrentUpdateException`, `FailedResourceAccessException` or `InternalServiceException`.
# Example: To apply a scaling policy to an Amazon ECS service
This example applies a scaling policy to an Amazon ECS service called web-app in the default cluster. The policy increases the desired count of the service by 200%, with a cool down period of 60 seconds.
Input:
```
[
"PolicyName" => "web-app-cpu-gt-75",
"PolicyType" => "StepScaling",
"ResourceId" => "service/default/web-app",
"ScalableDimension" => "ecs:service:DesiredCount",
"ServiceNamespace" => "ecs",
"StepScalingPolicyConfiguration" => [
"AdjustmentType" => "PercentChangeInCapacity",
"Cooldown" => 60,
"StepAdjustments" => [
[
"MetricIntervalLowerBound" => 0,
"ScalingAdjustment" => 200
]
]
]
]
```
Output:
```
Dict(
"PolicyARN" => "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy:6d8972f3-efc8-437c-92d1-6270f29a66e7:resource/ecs/service/default/web-app:policyName/web-app-cpu-gt-75"
)
```
# Example: To apply a scaling policy to an Amazon EC2 Spot fleet
This example applies a scaling policy to an Amazon EC2 Spot fleet. The policy increases the target capacity of the spot fleet by 200%, with a cool down period of 180 seconds.",
Input:
```
[
"PolicyName" => "fleet-cpu-gt-75",
"PolicyType" => "StepScaling",
"ResourceId" => "spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3",
"ScalableDimension" => "ec2:spot-fleet-request:TargetCapacity",
"ServiceNamespace" => "ec2",
"StepScalingPolicyConfiguration" => [
"AdjustmentType" => "PercentChangeInCapacity",
"Cooldown" => 180,
"StepAdjustments" => [
[
"MetricIntervalLowerBound" => 0,
"ScalingAdjustment" => 200
]
]
]
]
```
Output:
```
Dict(
"PolicyARN" => "arn:aws:autoscaling:us-east-1:012345678910:scalingPolicy:89406401-0cb7-4130-b770-d97cca0e446b:resource/ec2/spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3:policyName/fleet-cpu-gt-75"
)
```
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/PutScalingPolicy)
"""
@inline put_scaling_policy(aws::AWSConfig=default_aws_config(); args...) = put_scaling_policy(aws, args)
@inline put_scaling_policy(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "PutScalingPolicy", args)
@inline put_scaling_policy(args) = put_scaling_policy(default_aws_config(), args)
"""
using AWSSDK.ApplicationAutoScaling.put_scheduled_action
put_scheduled_action([::AWSConfig], arguments::Dict)
put_scheduled_action([::AWSConfig]; ServiceNamespace=, ScheduledActionName=, ResourceId=, <keyword arguments>)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "PutScheduledAction", arguments::Dict)
application_autoscaling([::AWSConfig], "PutScheduledAction", ServiceNamespace=, ScheduledActionName=, ResourceId=, <keyword arguments>)
# PutScheduledAction Operation
Creates or updates a scheduled action for an Application Auto Scaling scalable target.
Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you register the scalable target using [RegisterScalableTarget](@ref).
To update an action, specify its name and the parameters that you want to change. If you don't specify start and end times, the old values are deleted. Any other parameters that you don't specify are not changed by this update request.
You can view the scheduled actions using [DescribeScheduledActions](@ref). If you are no longer using a scheduled action, you can delete it using [DeleteScheduledAction](@ref).
# Arguments
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `Schedule = ::String`
The schedule for this action. The following formats are supported:
* At expressions - `at(*yyyy*-*mm*-*dd*T*hh*:*mm*:*ss*)`
* Rate expressions - `rate(*value* *unit*)`
* Cron expressions - `cron(*fields*)`
At expressions are useful for one-time schedules. Specify the time, in UTC.
For rate expressions, *value* is a positive integer and *unit* is `minute` | `minutes` | `hour` | `hours` | `day` | `days`.
For more information about cron expressions, see [Cron Expressions](http://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) in the *Amazon CloudWatch Events User Guide*.
## `ScheduledActionName = ::String` -- *Required*
The name of the scheduled action.
## `ResourceId = ::String` -- *Required*
The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"`
The scalable dimension. This parameter is required if you are creating a scheduled action. This string consists of the service namespace, resource type, and scaling property.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
## `StartTime = timestamp`
The date and time for the scheduled action to start.
## `EndTime = timestamp`
The date and time for the scheduled action to end.
## `ScalableTargetAction = [ ... ]`
The new minimum and maximum capacity. You can set both values or just one. During the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.
```
ScalableTargetAction = [
"MinCapacity" => ::Int,
"MaxCapacity" => ::Int
]
```
# Returns
`PutScheduledActionResponse`
# Exceptions
`ValidationException`, `LimitExceededException`, `ObjectNotFoundException`, `ConcurrentUpdateException` or `InternalServiceException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/PutScheduledAction)
"""
@inline put_scheduled_action(aws::AWSConfig=default_aws_config(); args...) = put_scheduled_action(aws, args)
@inline put_scheduled_action(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "PutScheduledAction", args)
@inline put_scheduled_action(args) = put_scheduled_action(default_aws_config(), args)
"""
using AWSSDK.ApplicationAutoScaling.register_scalable_target
register_scalable_target([::AWSConfig], arguments::Dict)
register_scalable_target([::AWSConfig]; ServiceNamespace=, ResourceId=, ScalableDimension=, <keyword arguments>)
using AWSCore.Services.application_autoscaling
application_autoscaling([::AWSConfig], "RegisterScalableTarget", arguments::Dict)
application_autoscaling([::AWSConfig], "RegisterScalableTarget", ServiceNamespace=, ResourceId=, ScalableDimension=, <keyword arguments>)
# RegisterScalableTarget Operation
Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out or scale in. After you have registered a scalable target, you can use this operation to update the minimum and maximum values for its scalable dimension.
After you register a scalable target, you can create and apply scaling policies using [PutScalingPolicy](@ref). You can view the scaling policies for a service namespace using [DescribeScalableTargets](@ref). If you no longer need a scalable target, you can deregister it using [DeregisterScalableTarget](@ref).
# Arguments
## `ServiceNamespace = "ecs", "elasticmapreduce", "ec2", "appstream", "dynamodb", "rds", "sagemaker" or "custom-resource"` -- *Required*
The namespace of the AWS service that provides the resource or `custom-resource` for a resource provided by your own application or service. For more information, see [AWS Service Namespaces](http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) in the *Amazon Web Services General Reference*.
## `ResourceId = ::String` -- *Required*
The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.
* ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp`.
* Spot fleet request - The resource type is `spot-fleet-request` and the unique identifier is the Spot fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
* EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
* AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet`.
* DynamoDB table - The resource type is `table` and the unique identifier is the resource ID. Example: `table/my-table`.
* DynamoDB global secondary index - The resource type is `index` and the unique identifier is the resource ID. Example: `table/my-table/index/my-table-index`.
* Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster`.
* Amazon SageMaker endpoint variants - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering`.
* Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider.
## `ScalableDimension = "ecs:service:DesiredCount", "ec2:spot-fleet-request:TargetCapacity", "elasticmapreduce:instancegroup:InstanceCount", "appstream:fleet:DesiredCapacity", "dynamodb:table:ReadCapacityUnits", "dynamodb:table:WriteCapacityUnits", "dynamodb:index:ReadCapacityUnits", "dynamodb:index:WriteCapacityUnits", "rds:cluster:ReadReplicaCount", "sagemaker:variant:DesiredInstanceCount" or "custom-resource:ResourceType:Property"` -- *Required*
The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.
* `ecs:service:DesiredCount` - The desired task count of an ECS service.
* `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot fleet request.
* `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.
* `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.
* `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.
* `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.
* `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.
* `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.
* `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition.
* `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for an Amazon SageMaker model endpoint variant.
* `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.
## `MinCapacity = ::Int`
The minimum value to scale to in response to a scale in event. This parameter is required if you are registering a scalable target.
## `MaxCapacity = ::Int`
The maximum value to scale to in response to a scale out event. This parameter is required if you are registering a scalable target.
## `RoleARN = ::String`
Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see [Service-Linked Roles for Application Auto Scaling](http://docs.aws.amazon.com/autoscaling/application/userguide/application-autoscaling-service-linked-roles.html).
For resources that are not supported using a service-linked role, this parameter is required and must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.
# Returns
`RegisterScalableTargetResponse`
# Exceptions
`ValidationException`, `LimitExceededException`, `ConcurrentUpdateException` or `InternalServiceException`.
# Example: To register an ECS service as a scalable target
This example registers a scalable target from an Amazon ECS service called web-app that is running on the default cluster, with a minimum desired count of 1 task and a maximum desired count of 10 tasks.
Input:
```
[
"MaxCapacity" => 10,
"MinCapacity" => 1,
"ResourceId" => "service/default/web-app",
"RoleARN" => "arn:aws:iam::012345678910:role/ApplicationAutoscalingECSRole",
"ScalableDimension" => "ecs:service:DesiredCount",
"ServiceNamespace" => "ecs"
]
```
# Example: To register an EC2 Spot fleet as a scalable target
This example registers a scalable target from an Amazon EC2 Spot fleet with a minimum target capacity of 1 and a maximum of 10.
Input:
```
[
"MaxCapacity" => 10,
"MinCapacity" => 1,
"ResourceId" => "spot-fleet-request/sfr-45e69d8a-be48-4539-bbf3-3464e99c50c3",
"RoleARN" => "arn:aws:iam::012345678910:role/ApplicationAutoscalingSpotRole",
"ScalableDimension" => "ec2:spot-fleet-request:TargetCapacity",
"ServiceNamespace" => "ec2"
]
```
Output:
```
Dict(
)
```
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/RegisterScalableTarget)
"""
@inline register_scalable_target(aws::AWSConfig=default_aws_config(); args...) = register_scalable_target(aws, args)
@inline register_scalable_target(aws::AWSConfig, args) = AWSCore.Services.application_autoscaling(aws, "RegisterScalableTarget", args)
@inline register_scalable_target(args) = register_scalable_target(default_aws_config(), args)
end # module ApplicationAutoScaling
#==============================================================================#
# End of file
#==============================================================================#
|
{"hexsha": "77e2d0d66df14862c83a29a3e72ccc9dc8da652f", "size": 70702, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ApplicationAutoScaling.jl", "max_stars_repo_name": "UnofficialJuliaMirror/AWSSDK.jl-0d499d91-6ae5-5d63-9313-12987b87d5ad", "max_stars_repo_head_hexsha": "85d61d0e02c66917795cc0f539ee7a8c76e2d1fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2017-08-14T01:35:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-12T12:27:35.000Z", "max_issues_repo_path": "src/ApplicationAutoScaling.jl", "max_issues_repo_name": "UnofficialJuliaMirror/AWSSDK.jl-0d499d91-6ae5-5d63-9313-12987b87d5ad", "max_issues_repo_head_hexsha": "85d61d0e02c66917795cc0f539ee7a8c76e2d1fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2017-08-23T20:07:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-02T16:24:26.000Z", "max_forks_repo_path": "src/ApplicationAutoScaling.jl", "max_forks_repo_name": "UnofficialJuliaMirror/AWSSDK.jl-0d499d91-6ae5-5d63-9313-12987b87d5ad", "max_forks_repo_head_hexsha": "85d61d0e02c66917795cc0f539ee7a8c76e2d1fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2017-08-27T13:38:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-07T19:30:07.000Z", "avg_line_length": 53.0, "max_line_length": 460, "alphanum_fraction": 0.7597239116, "num_tokens": 16720}
|
#!python3
"""
Various utilities for working with Python and Matplotlib
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
from math import ceil, sqrt
from skimage.io import imread
def show_images(images,titles=None):
"""Display a list of images"""
n_ims = len(images)
if titles is None: titles = ['(%d)' % i for i in range(1,n_ims + 1)]
fig = plt.figure()
n = 1
for image,title in zip(images, titles):
a = fig.add_subplot(1,n_ims,n) # Make subplot
if image.ndim == 2: # Is image grayscale?
plt.gray() # Only place in this blog you can't replace 'gray' with 'grey'
plt.imshow(image)
a.set_title(title)
n += 1
fig.set_size_inches(np.array(fig.get_size_inches()) * n_ims)
plt.show()
def tile_images(lst, size=None):
"""
Given a list of images, display them in a tiled format.
"""
num_plots = len(lst)
if size is None:
cols = ceil(sqrt(num_plots))
rows = ceil(num_plots/cols)
else:
rows, cols = size
# Set up the figure
fig, axes = plt.subplots(rows, cols)
# Plot the images using `plt.imshow`
for i, x in enumerate(lst):
ax = axes[i]
# If the element is a string, assume that it's a path
if isinstance(x, str):
try:
img = imread(os.path.abspath(x))
ax.imshow(img)
except IOError:
print("Unable to load image: %s" %x)
except Exception as e:
print(e)
# Otherwise, attempt to load it as a numpy array
else:
try:
img = np.array(x)
ax.imshow(img)
except Exception as e:
print(e)
# Show the images
plt.show()
# Return the fig and axes images, in case that is wanted
return fig, axes
|
{"hexsha": "f14cf78130b63c45fa8968becbb0cd4d962abf9c", "size": 1892, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python-Programming/utils_pyplot.py", "max_stars_repo_name": "clickok/Code-Snippets", "max_stars_repo_head_hexsha": "403796256a2ec1bd37b2deb4ce5052671a39048f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python-Programming/utils_pyplot.py", "max_issues_repo_name": "clickok/Code-Snippets", "max_issues_repo_head_hexsha": "403796256a2ec1bd37b2deb4ce5052671a39048f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python-Programming/utils_pyplot.py", "max_forks_repo_name": "clickok/Code-Snippets", "max_forks_repo_head_hexsha": "403796256a2ec1bd37b2deb4ce5052671a39048f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8235294118, "max_line_length": 85, "alphanum_fraction": 0.5713530655, "include": true, "reason": "import numpy", "num_tokens": 457}
|
[STATEMENT]
lemma eadd_gfp_partial_function_mono [partial_function_mono]:
"\<lbrakk> monotone (fun_ord (\<ge>)) (\<ge>) f; monotone (fun_ord (\<ge>)) (\<ge>) g \<rbrakk>
\<Longrightarrow> monotone (fun_ord (\<ge>)) (\<ge>) (\<lambda>x. f x + g x :: enat)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>monotone (fun_ord (\<lambda>x y. y \<le> x)) (\<lambda>x y. y \<le> x) f; monotone (fun_ord (\<lambda>x y. y \<le> x)) (\<lambda>x y. y \<le> x) g\<rbrakk> \<Longrightarrow> monotone (fun_ord (\<lambda>x y. y \<le> x)) (\<lambda>x y. y \<le> x) (\<lambda>x. f x + g x)
[PROOF STEP]
by(rule mono2mono_gfp_eadd)
|
{"llama_tokens": 265, "file": null, "length": 1}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.