text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
export Storage, update_storage
# Create a structure that will hold evaluation of the basis functions,
# as well as their derivative and second derivative
"""
$(TYPEDEF)
An immutable structure to hold the evaluation of basis functions
## Fields
$(TYPEDFIELDS)
"""
struct Storage
m::Int64
Nψ::Int64
Nx::Int64
# Expanded function
f::ExpandedFunction
# Off-diagonal basis evaluation
ψoff::Array{Float64,2}
# Off-diagonal basis evaluation ⊗ Diagonal basis evaluation
ψoffψd::Array{Float64,2}
# Off-diagonal basis evaluation ⊗ Diagonal basis evaluation at x = 0
ψoffψd0::Array{Float64,2}
# Off-diagonal basis evaluation ⊗ ∂_xd ψ(x_1,...,x_d)
ψoffdψxd::Array{Float64,2}
# Store the norm of each column of ψoffψd
ψnorm::Array{Float64,1}
# Cache for ∂_c ∂_xd(f(x_{1:d-1},t)
cache_dcψxdt::Array{Float64,2}
cache_gradxd::Array{Float64,2}
# Cache for ∂_xd(f(x_{1:d-1},t)
cache_dψxd::Array{Float64,1}
# Cache integration for J and dJ
cache_integral::Array{Float64,1}
# Cache for g(∂k(f(x_{1:k})))
cache_g::Array{Float64,1}
end
function Storage(f::ExpandedFunction, X)#, hess::Bool = false)
m = f.m
Nψ = f.Nψ
Nx = f.Nx
NxX, Ne = size(X)
@assert NxX == Nx
ψoff = evaluate_offdiagbasis(f, X)
ψoffψd = evaluate_diagbasis(f, X)
ψoffψd0 = repeated_evaluate_basis(f, zeros(Ne))
ψoffdψxd = repeated_grad_xk_basis(f, X[Nx,:])
@avx for j=1:Nψ
for i=1:Ne
ψoffij = ψoff[i,j]
ψoffψd[i,j] *= ψoffij
ψoffψd0[i,j] *= ψoffij
ψoffdψxd[i,j] *= ψoffij
end
end
ψnorm = zeros(Nψ)
@inbounds for i=1:Nψ
ψnorm[i] = norm(view(ψoffψd,:,i))
end
rmul!(ψnorm, 1/sqrt(Ne))
# Cache variable
cache_dcψxdt = zero(ψoff)
cache_gradxd = zeros(Ne, maximum(f.idx[:,end])+1)
cache_dψxd = zeros(Ne)
cache_integral = zeros(Ne + Ne*Nψ)
cache_g = zeros(Ne)
return Storage(m, Nψ, Nx, f, ψoff, ψoffψd, ψoffψd0, ψoffdψxd, ψnorm, cache_dcψxdt, cache_gradxd, cache_dψxd, cache_integral, cache_g)
end
# function update_storage(S::Storage{m, Nψ, k}, X::Array{Float64,2}, addedidx::Array{Int64,2}) where {m, Nψ, k}
"""
$(TYPEDSIGNATURES)
Updates the `Storage` `S` with the new set of features `addedidx`.
"""
function update_storage(S::Storage, X, addedidx::Array{Int64,2})
NxX, Ne = size(X)
Nψ = S.Nψ
@assert NxX == S.Nx "Wrong dimension of the sample X"
addedNψ = size(addedidx,1)
newNψ = addedNψ + Nψ
fnew = ExpandedFunction(S.f.B, vcat(S.f.idx, addedidx), vcat(S.f.coeff, zeros(addedNψ)))
oldmaxj = maximum(S.f.idx[:,end])
newmaxj = maximum(fnew.idx[:,end])
@assert newmaxj >= oldmaxj "Error in the adaptive procedure, the set is not downward closed"
# Update off-diagonal component
addedψoff = evaluate_offdiagbasis(fnew, X, addedidx)
# Update ψd
addedψoffψd = evaluate_diagbasis(fnew, X, addedidx)
# Update ψd0
addedψoffψd0 = repeated_evaluate_basis(fnew, zeros(Ne), addedidx)
# Update dψxd
addedψoffdψxd = repeated_grad_xk_basis(fnew, X[S.Nx,:], addedidx)
@avx for j=1:addedNψ
for i=1:Ne
addedψoffij = addedψoff[i,j]
addedψoffψd[i,j] *= addedψoffij
addedψoffψd0[i,j] *= addedψoffij
addedψoffdψxd[i,j] *= addedψoffij
end
end
addedψnorm = zeros(addedNψ)
for i=1:addedNψ
addedψnorm[i] = norm(view(addedψoffψd,:,i))
end
rmul!(addedψnorm, 1/sqrt(Ne))
return Storage(S.m, newNψ, S.Nx, fnew, hcat(S.ψoff, addedψoff),
hcat(S.ψoffψd, addedψoffψd),
hcat(S.ψoffψd0, addedψoffψd0),
hcat(S.ψoffdψxd, addedψoffdψxd),
vcat(S.ψnorm, addedψnorm),
hcat(S.cache_dcψxdt, zeros(Ne, addedNψ)),
hcat(S.cache_gradxd, zeros(Ne,newmaxj-oldmaxj)),
S.cache_dψxd,
vcat(S.cache_integral, zeros(Ne*addedNψ)),
S.cache_g)
end
|
{"hexsha": "d2826300438daf6e5ed60017cf2dbaed56439b2c", "size": 4406, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/hermitemap/storage.jl", "max_stars_repo_name": "mleprovost/TransportBasedInference.jl", "max_stars_repo_head_hexsha": "bdcedf72e9ea23c24678fe6af7a00202c5f9d5d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-23T03:16:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T03:16:56.000Z", "max_issues_repo_path": "src/hermitemap/storage.jl", "max_issues_repo_name": "mleprovost/TransportBasedInference.jl", "max_issues_repo_head_hexsha": "bdcedf72e9ea23c24678fe6af7a00202c5f9d5d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/hermitemap/storage.jl", "max_forks_repo_name": "mleprovost/TransportBasedInference.jl", "max_forks_repo_head_hexsha": "bdcedf72e9ea23c24678fe6af7a00202c5f9d5d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6103896104, "max_line_length": 141, "alphanum_fraction": 0.5721743078, "num_tokens": 1479}
|
import os
from timeit import default_timer as timer
import fire
import h5py
import numpy as np
import torch
from torch.utils.data import Subset
from cnn_gp import save_K
from plotting.createStartPlot import loadDataset
from utils import load_kern, constructSymmetricMatrix, deleteValues, loadNormalizedModel
def computeKxxPert(inpath, outpath, fraction):
"""
Takes a given matrix and sets randomly a fraction of all matrix values to nan.
@param inpath: Matrix given in h5 file where values are set to nan
@param outpath: Permuted matrix is stored in that h5 file
@param fraction: determines how many values are set to nan from the overall matrix elements
@return:
"""
frac = float(fraction)
with h5py.File(inpath, 'r') as f:
Kxx_symm = np.array(f.get('Kxx'))
f.close()
Kxx_pert = deleteValues(Kxx_symm, frac)
with h5py.File(outpath, 'w') as f:
f.create_dataset('Kxx_pert', shape=(Kxx_symm.shape[0], Kxx_symm.shape[1]), data=Kxx_pert)
f.close()
def computeValidationAndTestKernel(path):
"""
Computes and stores the matrices Kxtx and Kxvx for computing the predictions the test and validation predictions
@param path: File where the matrices are stored (Data set names are Kxvx and Kxtx)
@return:
"""
model = loadNormalizedModel()
train = loadDataset()
val = loadDataset(mode='val')
test = loadDataset(mode='test')
kwargs = dict(worker_rank=0, n_workers=1,
batch_size=200, print_interval=2.)
def kern(x, x2, **args):
with torch.no_grad():
return model(x.cuda(), x2.cuda(), **args).detach().cpu().numpy()
with h5py.File(path, 'w') as f:
save_K(f, kern, 'Kxvx', val, train, diag=False, **kwargs)
save_K(f, kern, 'Kxtx', test, train, diag=False, **kwargs)
f.close()
def computeValidationKernel(path, name, mode='val'):
"""
Computes the validation matrix and stores it in a file with a given name
@param path: specifies file to store Kxvx matrix
@param name: dataset name within the file
@return:
"""
model = loadNormalizedModel()
train = loadDataset(mode='train')
val = loadDataset(mode=mode)
kwargs = dict(worker_rank=0, n_workers=1,
batch_size=200, print_interval=2.)
def kern(x, x2, **args):
with torch.no_grad():
return model(x.cuda(), x2.cuda(), **args).detach().cpu().numpy()
with h5py.File(path, 'w') as f:
save_K(f, kern, name, val, train, diag=False, **kwargs)
def computeKxxMatrix(path, name, fraction=1.0):
"""
Stores the kernel matrix Kxx between the training points
@param fraction: determines which fraction of the dataset is used
@param path: file path containing the kernel matrix
@param name: name of the dataset of the kernel matrix
@return:
"""
fraction = float(fraction)
model = loadNormalizedModel()
dataset = loadDataset()
kwargs = dict(worker_rank=0, n_workers=1,
batch_size=200, print_interval=2.)
def kern(x, x2, **args):
with torch.no_grad():
return model(x.cuda(), x2.cuda(), **args).detach().cpu().numpy()
if fraction == 1.0:
with h5py.File(path, "w") as f:
save_K(f, kern, name, X=dataset, X2=None, diag=False, **kwargs)
f.close()
else:
new_length = int(fraction * len(dataset))
subset = Subset(dataset, range(new_length))
start = timer()
with h5py.File(path, "w") as f:
save_K(f, kern, name, X=subset, X2=None, diag=False, **kwargs)
f.close()
end = timer()
diff = (end - start) / 60
os.system(f"python -m plotting.computeKernel loadMatrixFromDiskAndMirror {path} {name}")
# Create subMatrix
with h5py.File(path, 'a') as f:
time = np.array(f.get('time'))
diff = diff + time
del f['time']
sub_Matrix = np.array(f.get(name))
newMatrix = np.empty((len(dataset), len(dataset)))
newMatrix.fill(np.nan)
newMatrix[:new_length, :new_length] = sub_Matrix[:, :]
del f[name]
f.close()
with h5py.File(path, 'w') as f:
f.create_dataset(name, shape=(len(dataset), len(dataset)), data=newMatrix)
f.create_dataset(name='time', data=np.array(diff))
f.close()
def computeNystroem(path, components):
model = loadNormalizedModel()
dataset = loadDataset()
subset = Subset(dataset, range(components))
def kern(x, x2, **args):
with torch.no_grad():
return model(x.cuda(), x2.cuda(), **args).detach().cpu().numpy()
dataset_low = Subset(dataset, range(len(subset), len(dataset)))
with h5py.File(path, 'w') as f:
kwargs = dict(worker_rank=0, n_workers=1,
batch_size=200, print_interval=2.)
# Compute squared part first
save_K(f, kern, 'W', X=subset, X2=None, diag=False, **kwargs)
# Lower part
save_K(f, kern, "C_down", X=dataset_low, X2=subset, diag=False, **kwargs)
def loadMatrixFromDiskAndMirror(path, name):
"""
Loads a matrix from a file and mirrors it if desired before returning
@param path: File path containing the matrix
@param name: Name of the dataset in the file
"""
with h5py.File(path, "a") as f:
matrix = load_kern(f[name], 0)
start = timer()
sym_Matrix = constructSymmetricMatrix(matrix)
stop = timer()
diff = (stop - start) / 60
del f[name]
with h5py.File(path, 'w') as f:
f.create_dataset(name=name, shape=(matrix.shape[0], matrix.shape[1]), data=sym_Matrix)
f.create_dataset(name='time', data=np.array(diff))
if __name__ == "__main__":
fire.Fire()
|
{"hexsha": "d37091818eecf361657d2e309b333113441e9c93", "size": 5848, "ext": "py", "lang": "Python", "max_stars_repo_path": "plotting/computeKernel.py", "max_stars_repo_name": "meinma/master-thesis", "max_stars_repo_head_hexsha": "0b1c3fa124eef97c4759064576bf6d25e8c60efd", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plotting/computeKernel.py", "max_issues_repo_name": "meinma/master-thesis", "max_issues_repo_head_hexsha": "0b1c3fa124eef97c4759064576bf6d25e8c60efd", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plotting/computeKernel.py", "max_forks_repo_name": "meinma/master-thesis", "max_forks_repo_head_hexsha": "0b1c3fa124eef97c4759064576bf6d25e8c60efd", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2289156627, "max_line_length": 116, "alphanum_fraction": 0.6222640219, "include": true, "reason": "import numpy", "num_tokens": 1498}
|
import pandas
import numpy as np
import click
from bitstring import BitArray
from base58 import b58encode_int, b58decode_int
class Clusterer:
def __init__(self):
pass
def cluster(self, n, state_processor, pca = False, model_type = 'kmeans', z_score_exclude = 0.0, seed = None, quiet = False):
from sklearn.cluster import FeatureAgglomeration, KMeans, SpectralClustering
from scipy import stats
model_types = {
'feature-agglomeration': FeatureAgglomeration,
'kmeans': KMeans,
'spectral': SpectralClustering,
}
states = state_processor.states(two_d = pca)
excluded_states, labels = [], []
if z_score_exclude > 0:
if not model_type == 'kmeans':
raise click.UsageError("--z-score-exclude can only be used when --model-type is 'kmeans'")
states_2d = state_processor.states(two_d = True)
excluded_states = states[-(np.abs(stats.zscore(states_2d)) < z_score_exclude).all(axis=1)]
states = states[(np.abs(stats.zscore(states_2d)) < z_score_exclude).all(axis=1)]
seed = seed or np.random.randint(0, 10 ** 6)
np.random.seed(seed)
if not quiet:
click.echo("Clustering with seed %d..." % seed)
self.model = model_types[model_type](n_clusters = n)
self.data = states.as_matrix()
self.model.fit(self.data)
labels = self.model.labels_
self.results = pandas.DataFrame([states.index, self.model.labels_]).T.sort_values(by=0)
if any(excluded_states):
excluded_results = pandas.DataFrame([excluded_states.index, self.model.predict(excluded_states)]).T
self.results = pandas.DataFrame(np.concatenate([self.results, excluded_results]))
def cluster_ids(self):
labels = self.results[1]
sorted_labels = sorted(labels.unique())
ids = map(lambda l: b58encode_int(BitArray((labels == l).astype(int).tolist()).uint), sorted_labels)
return zip(sorted_labels, ids)
def cluster_id_to_states(self, cluster_id):
states = np.array(['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY'])
return states[list(BitArray(uint = b58decode_int(cluster_id), length = 50))]
def evaluate(self, metric, distance = None):
from sklearn.metrics import silhouette_score, calinski_harabaz_score
if metric == 'silhouette':
return silhouette_score(self.data, self.model.labels_, metric = distance)
if metric == 'calinski_harabaz':
return calinski_harabaz_score(self.data, self.model.labels_)
if metric == 'inertia':
return self.model.inertia_
def results_dict(self):
return self.results.set_index(0)[1].to_dict()
|
{"hexsha": "02c4b250b261c6bb504888c067ab3db8c6959e7e", "size": 2828, "ext": "py", "lang": "Python", "max_stars_repo_path": "wethepeopletoolkit/clusterer.py", "max_stars_repo_name": "alexpeattie/wethepeopletoolkit", "max_stars_repo_head_hexsha": "665881ef536355552b936f7a8341bdcc2711efeb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-05T14:36:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-05T14:36:07.000Z", "max_issues_repo_path": "wethepeopletoolkit/clusterer.py", "max_issues_repo_name": "alexpeattie/wethepeopletoolkit", "max_issues_repo_head_hexsha": "665881ef536355552b936f7a8341bdcc2711efeb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wethepeopletoolkit/clusterer.py", "max_forks_repo_name": "alexpeattie/wethepeopletoolkit", "max_forks_repo_head_hexsha": "665881ef536355552b936f7a8341bdcc2711efeb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2089552239, "max_line_length": 323, "alphanum_fraction": 0.6651343706, "include": true, "reason": "import numpy,from scipy", "num_tokens": 782}
|
"""Set up the environment for doctests
This file is automatically evaluated by py.test. It ensures that we can write
doctests without distracting import statements in the doctest.
"""
import inspect
from collections import OrderedDict
import numpy
import pytest
import krotov
@pytest.fixture(autouse=True)
def set_doctest_env(doctest_namespace):
doctest_namespace['numpy'] = numpy
doctest_namespace['krotov'] = krotov
doctest_namespace['inspect'] = inspect
doctest_namespace['OrderedDict'] = OrderedDict
|
{"hexsha": "6dec8efc1d332cd75b4bb703fc649da5a2cad937", "size": 545, "ext": "py", "lang": "Python", "max_stars_repo_path": "Mixed/Linear/src/conftest.py", "max_stars_repo_name": "mcditoos/krotov", "max_stars_repo_head_hexsha": "6a70cc791fa21186997ad2ca5a72f6d30574e7a0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Mixed/Linear/src/conftest.py", "max_issues_repo_name": "mcditoos/krotov", "max_issues_repo_head_hexsha": "6a70cc791fa21186997ad2ca5a72f6d30574e7a0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Mixed/Linear/src/conftest.py", "max_forks_repo_name": "mcditoos/krotov", "max_forks_repo_head_hexsha": "6a70cc791fa21186997ad2ca5a72f6d30574e7a0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-26T17:01:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T17:01:29.000Z", "avg_line_length": 25.9523809524, "max_line_length": 78, "alphanum_fraction": 0.7559633028, "include": true, "reason": "import numpy", "num_tokens": 120}
|
from panda3d.core import PNMImage, TextNode
from direct.gui.DirectGui import DirectFrame, DirectButton, DirectLabel, DirectEntry, DGG, DirectOptionMenu
from direct.showbase.ShowBase import ShowBase
from direct.showbase.DirectObject import DirectObject
import numpy as np
from typing import Tuple, Union, List, Any, Dict, Callable, Type
import traceback
from direct.gui.OnscreenText import OnscreenText
from structures import Point, WHITE, TRANSPARENT
from simulator.services.services import Services
from simulator.services.debug import DebugLevel
from simulator.services.event_manager.events.event import Event
from simulator.services.event_manager.events.reset_event import ResetEvent
from simulator.services.event_manager.events.toggle_simulator_config_event import ToggleSimulatorConfigEvent
from simulator.views.gui.common import WINDOW_BG_COLOUR, WIDGET_BG_COLOUR
from simulator.views.gui.window import Window
from simulator.views.gui.simulator_config_state import SimulatorConfigState
from algorithms.configuration.configuration import Configuration
from algorithms.algorithm_manager import AlgorithmManager
from maps.map_manager import MapManager
from algorithms.configuration.maps.map import Map
class SimulatorConfig(DirectObject):
__services: Services
__base: ShowBase
__window: Window
__animations = {
"None": (0, 0),
"Normal": (np.finfo(float).eps, 0),
"Slow": (0.5, 0),
"Fast": (np.finfo(float).eps, 20)
}
def __init__(self, services: Services, mouse1_press_callbacks: List[Callable[[], None]]):
self.__services = services
self.__services.ev_manager.register_listener(self)
self.__base = self.__services.graphics.window
self.__text = " t - find the path between the agent and goal\n\n" \
" mouse click - moves agent to mouse location \n\n mouse right click - moves goal to" \
" mouse location\n\n arrow keys, PgUp, PgDn - move agent / goal (Alt down)\n\n x - toggle trace" \
" animation (animations required)\n\n m - toggle map between Sparse and Dense\n\n o - take a " \
"default screenshot of the map\n\n p - take a custom screenshot of the scene\n\n w, a, s, d " \
"- orbit around the map\n\n q - top view of the map\n\n c, v - toggle Simulator Configuration /" \
" View Editor\n\n i - toggle Debug Overlay"
self.__algorithms = self.__services.settings.algorithms
self.__maps = self.__services.settings.maps
self.__map_keys = list(self.__maps.keys())
self.__algorithm_keys = list(self.__algorithms.keys())
self.__animation_keys = list(self.__animations.keys())
self.__window = Window(self.__base, "simulator_config", mouse1_press_callbacks,
borderWidth=(0.0, 0.0),
frameColor=WINDOW_BG_COLOUR,
pos=(-1, 0.5, 0.5),
frameSize=(-1.7, 1.3, -5.68, 0.85)
)
# spacer #
DirectFrame(parent=self.__window.frame,
borderWidth=(.0, .0),
frameColor=WIDGET_BG_COLOUR,
frameSize=(-1.4, 1.4, -0.011, 0.011),
pos=(-0.2, 0.0, 0.4))
DirectFrame(parent=self.__window.frame,
borderWidth=(.0, .0),
frameColor=WIDGET_BG_COLOUR,
frameSize=(-1.4, 1.4, -0.01, 0.01),
pos=(-0.2, 0.0, -2.96))
self.sim_config = DirectLabel(parent=self.__window.frame,
text="Simulator Configuration",
text_fg=WHITE,
text_bg=WINDOW_BG_COLOUR,
frameColor=WINDOW_BG_COLOUR,
borderWidth=(.0, .0),
pos=(-0.53, 0.0, 0.56),
scale=(0.2, 3, 0.2))
# Zoom buttons
self.btn_zoom_out = DirectButton(
text="-",
text_fg=WHITE,
pressEffect=1,
command=self.__window.zoom_out,
pos=(0.71, 0., 0.55),
parent=self.__window.frame,
scale=(0.3, 4.15, 0.35),
frameColor=TRANSPARENT)
self.btn_zoom_in = DirectButton(
text="+",
text_fg=WHITE,
pressEffect=1,
command=self.__window.zoom_in,
pos=(0.92, 0., 0.56),
parent=self.__window.frame,
scale=(0.3, 4.15, 0.35),
frameColor=TRANSPARENT)
# Quit button
self.btn = DirectButton(text='x',
text_fg=WHITE,
command=self.__window.toggle_visible,
pos=(1.12, 0., 0.576),
parent=self.__window.frame,
scale=(0.3, 2.9, 0.2),
pressEffect=1,
frameColor=TRANSPARENT)
self.user_information = DirectLabel(parent=self.__window.frame,
text=self.__text,
text_fg=WHITE,
text_bg=WINDOW_BG_COLOUR,
frameColor=WINDOW_BG_COLOUR,
text_align=TextNode.ALeft,
borderWidth=(.0, .0),
pos=(-1.55, 0.0, -3.2),
scale=(0.11, 1.1, 0.11))
self.map_label = DirectLabel(parent=self.__window.frame,
text="Map:",
text_fg=WHITE,
text_bg=WINDOW_BG_COLOUR,
text_align=TextNode.ALeft,
frameColor=WINDOW_BG_COLOUR,
borderWidth=(.0, .0),
pos=(-1.52, 0.4, 0.),
scale=(0.17, 1.09, 0.13))
self.algo_label = DirectLabel(parent=self.__window.frame,
text="Algorithm:",
text_fg=WHITE,
text_bg=WINDOW_BG_COLOUR,
frameColor=WINDOW_BG_COLOUR,
text_align=TextNode.ALeft,
borderWidth=(.0, .0),
pos=(-1.52, 0.4, -0.5),
scale=(0.17, 1.09, 0.13))
self.animation_label = DirectLabel(parent=self.__window.frame,
text="Animation:",
text_fg=WHITE,
text_bg=WINDOW_BG_COLOUR,
frameColor=WINDOW_BG_COLOUR,
text_align=TextNode.ALeft,
borderWidth=(.0, .0),
pos=(-1.52, 0.4, -1),
scale=(0.17, 1.09, 0.13))
self.agent_label = DirectLabel(parent=self.__window.frame,
text="Agent:",
text_fg=WHITE,
text_bg=WINDOW_BG_COLOUR,
frameColor=WINDOW_BG_COLOUR,
text_align=TextNode.ALeft,
borderWidth=(.0, .0),
pos=(-1.52, 0.4, -1.5),
scale=(0.17, 1.09, 0.13))
self.goal_label = DirectLabel(parent=self.__window.frame,
text="Goal:",
text_fg=WHITE,
text_bg=WINDOW_BG_COLOUR,
frameColor=WINDOW_BG_COLOUR,
text_align=TextNode.ALeft,
borderWidth=(.0, .0),
pos=(-1.52, 0.4, -2),
scale=(0.17, 1.09, 0.13))
# Creating goal and agent's entry fields
self.__entries = []
self.__entry_hovered = False
mouse1_press_callbacks.append(self.__entry_mouse_click_callback)
for i in range(0, 6):
e = DirectEntry(parent=self.__window.frame,
scale=0.12,
pos=(-0.24 + (i % 3) * 0.57, 0.4, -1.5 - 0.5 * (i // 3)),
numLines=1,
width=3,
suppressKeys=True,
text_align=TextNode.ACenter,
focusInCommand=self.clear_text,
focusInExtraArgs=[i])
self.__entries.append(e)
e.bind(DGG.EXIT, self.__entry_exit_callback)
e.bind(DGG.ENTER, self.__entry_enter_callback)
e.bind(DGG.B1PRESS, self.__entry_mouse_click_callback)
self.accept("mouse1", self.__entry_mouse_click_callback)
self.__agent_disable_overlay = DirectButton(parent=self.__window.frame,
frameColor=TRANSPARENT,
borderWidth=(0.0, 0.0),
frameSize=(-0.6, 1.4, -0.2, 0.2),
pos=(-0.24, 0.4, -1.5),
suppressMouse=True)
self.__agent_disable_overlay.hide()
self.__maps_option = DirectOptionMenu(text="options",
scale=0.14,
parent=self.__window.frame,
initialitem=self.__map_keys.index("Labyrinth") if "Labyrinth" in self.__map_keys else 0,
items=self.__map_keys,
pos=(-0.65, 0.4, 0.),
highlightColor=(0.65, 0.65, 0.65, 1),
textMayChange=1,
command=self.__use_default_map_positions)
self.__algorithms_option = DirectOptionMenu(text="options",
scale=0.14,
parent=self.__window.frame,
initialitem=self.__algorithm_keys.index("A*") if "A*" in self.__algorithm_keys else 0,
items=self.__algorithm_keys,
pos=(-0.46, 0.4, -0.5),
highlightColor=(0.65, 0.65, 0.65, 1),
textMayChange=1)
self.__animations_option = DirectOptionMenu(text="options",
scale=0.14,
parent=self.__window.frame,
initialitem=self.__animation_keys.index("Fast"),
items=self.__animation_keys,
pos=(-0.45, 0.4, -1),
highlightColor=(0.65, 0.65, 0.65, 1),
textMayChange=1)
self._update_frame = DirectFrame(parent=self.__window.frame,
frameColor=WHITE,
pos=(-1, 0.4, -2.6),
borderWidth=(0.25, 0.15),
frameSize=(-0.5, 0.95, -0.54, 0.54),
scale=(0.50, 3.1, 0.25))
self._reset_frame = DirectFrame(parent=self.__window.frame,
frameColor=WHITE,
pos=(0.412, 0.4, -2.6),
borderWidth=(0.25, 0.15),
frameSize=(-0.5, 0.92, -0.54, 0.54),
scale=(0.50, 3.1, 0.25))
self.btn_update = DirectButton(
text="Update",
text_fg=(0.3, 0.3, 0.3, 1.0),
pressEffect=1,
command=self.__update_simulator_callback,
pos=(-0.9, 0.4, -2.65),
parent=self.__window.frame,
scale=(0.20, 2.1, 0.15),
frameColor=TRANSPARENT)
self.btn_reset = DirectButton(
text="Reset",
text_fg=(0.4, 0.3, 0.3, 1.0),
pressEffect=1,
command=self.__reset_simulator_callback,
pos=(0.51, 0.4, -2.65),
parent=self.__window.frame,
scale=(0.20, 2.1, 0.15),
frameColor=TRANSPARENT)
# setup state & use saved state if possible
self.__state = None
for so in self.__services.state.objects:
if isinstance(so, SimulatorConfigState):
self.__state = so
cmd = self.__maps_option['command']
try:
self.__maps_option.set(self.__map_keys.index(so.mp))
self.__algorithms_option.set(self.__algorithm_keys.index(so.algo))
self.__animations_option.set(self.__animation_keys.index(so.ani))
self.__update_position_entries()
except:
msg = "Failed to load Simulator Config state:\n{}".format(traceback.format_exc())
self.__services.debug.write(msg, DebugLevel.NONE)
break
finally:
self.__maps_option['command'] = cmd
return
new_state = self.__state is None
if new_state:
self.__state = SimulatorConfigState(self.__services.state)
self.__state.mp = self.__maps_option.get()
self.__state.algo = self.__algorithms_option.get()
self.__state.ani = self.__animations_option.get()
self.__use_default_map_positions()
if new_state:
self.__services.state.add(self.__state)
else:
self.__services.state.save()
def __entry_exit_callback(self, *discard) -> None:
self.__entry_hovered = False
def __entry_enter_callback(self, *discard) -> None:
self.__entry_hovered = True
def __entry_mouse_click_callback(self, *discard) -> None:
if self.__entry_hovered:
self.__window.focus()
else:
for e in self.__entries:
e['focus'] = False
def __get_map(self) -> Map:
name = self.__maps_option.get()
data = self.__maps[name]
if isinstance(data, str):
data = self.__services.resources.maps_dir.load(data)
self.__maps[name] = data
assert isinstance(data, Map), "Map failed to load"
return data
def __update_simulator_callback(self) -> None:
agent_mutable = (not self.__services.settings.get_agent_position)
mp = self.__get_map()
algo = self.__algorithms[self.__algorithms_option.get()]
ani = self.__animations[self.__animations_option.get()]
# update state
self.__state.mp = self.__maps_option.get()
self.__state.algo = self.__algorithms_option.get()
self.__state.ani = self.__animations_option.get()
def deduce_pos(default, entries) -> Point:
nonlocal mp
vs = []
for i in range(default.n_dim):
try:
vs.append(int(entries[i].get()))
except:
vs.append(default[i])
p = Point(*vs)
return p if mp.is_agent_valid_pos(p) else default
if agent_mutable:
self.__state.agent = deduce_pos(mp.agent.position, self.__entries[:3])
self.__state.goal = deduce_pos(mp.goal.position, self.__entries[3:])
self.__update_position_entries() # update if user-provided point was invalid
# save state
self.__services.state.save()
# launch simulation
config = self.__services.settings
config.algorithm_name = self.__algorithms_option.get()
old_map_name = config.map_name
config.map_name = self.__maps_option.get()
refresh_map = (old_map_name != config.map_name) or \
(mp != config.simulator_initial_map) or \
(agent_mutable and self.__state.agent != mp.agent.position) or \
(self.__state.goal != mp.goal.position)
if refresh_map:
if agent_mutable:
mp.move(mp.agent, self.__state.agent, True)
mp.move(mp.goal, self.__state.goal, True)
config.simulator_initial_map = mp
config.simulator_algorithm_type, config.simulator_testing_type, config.simulator_algorithm_parameters = algo
config.simulator_key_frame_speed, config.simulator_key_frame_skip = ani
self.__services.reinit(refresh_map=refresh_map)
def __reset_simulator_callback(self) -> None:
self.__maps_option.set(self.__map_keys.index(self.__state.mp))
self.__algorithms_option.set(self.__algorithm_keys.index(self.__state.algo))
self.__animations_option.set(self.__animation_keys.index(self.__state.ani))
self.__services.ev_manager.post(ResetEvent())
def __use_default_map_positions(self, *discard) -> None:
m = self.__get_map()
self.__state.agent = m.agent.position
self.__state.goal = m.goal.position
self.__update_position_entries()
def __update_position_entries(self) -> None:
def update_entries(entries, pos, mutable=True):
dim = pos.n_dim
if not mutable:
pos = ['-' for _ in range(dim)]
entries[0].enterText(str(pos[0]))
entries[1].enterText(str(pos[1]))
if dim == 3:
entries[2].enterText(str(pos[2]))
entries[2].show()
else:
entries[2].hide()
# agent may be externally set
agent_mutable = (not self.__services.settings.get_agent_position)
update_entries(self.__entries[:3], self.__state.agent, mutable=agent_mutable)
update_entries(self.__entries[3:], self.__state.goal)
if agent_mutable:
self.__agent_disable_overlay.hide()
else:
self.__agent_disable_overlay.show()
# user has performed an action such as pressing a
# button, therefore all entries should lose focus
for e in self.__entries:
e['focus'] = False
def notify(self, event: Event) -> None:
if isinstance(event, ToggleSimulatorConfigEvent):
self.__window.toggle_visible()
def clear_text(self, i):
self.__entries[i].enterText('')
|
{"hexsha": "7c00fb4896d425bfd2af1cfc7b23e7a31ccad597", "size": 19678, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/simulator/views/gui/simulator_config.py", "max_stars_repo_name": "ed741/PathBench", "max_stars_repo_head_hexsha": "50fe138eb1f824f49fe1a862705e435a1c3ec3ae", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2020-12-25T04:09:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T12:32:42.000Z", "max_issues_repo_path": "src/simulator/views/gui/simulator_config.py", "max_issues_repo_name": "ed741/PathBench", "max_issues_repo_head_hexsha": "50fe138eb1f824f49fe1a862705e435a1c3ec3ae", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2020-12-21T16:10:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T01:42:01.000Z", "max_forks_repo_path": "src/simulator/views/gui/simulator_config.py", "max_forks_repo_name": "judicaelclair/PathBenchURO", "max_forks_repo_head_hexsha": "101e67674efdfa8e27e1cf7787dac9fdf99552fe", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2021-01-06T23:34:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T17:21:47.000Z", "avg_line_length": 46.6303317536, "max_line_length": 138, "alphanum_fraction": 0.4910560016, "include": true, "reason": "import numpy", "num_tokens": 3947}
|
[STATEMENT]
lemma CondLowCompositionality:
assumes "nonInterference \<Gamma> c1" and "nonInterference \<Gamma> c2" and "\<Gamma> \<turnstile> b : Low"
shows "nonInterference \<Gamma> (if (b) c1 else c2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nonInterference \<Gamma> (if (b) c1 else c2)
[PROOF STEP]
proof(rule nonInterferenceI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>s1 s2 s1' s2'. \<lbrakk>\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2; \<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>; \<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
fix s1 s2 s1' s2'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>s1 s2 s1' s2'. \<lbrakk>\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2; \<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>; \<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
assume "\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2" and "\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>"
and "\<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>"
[PROOF STATE]
proof (state)
this:
\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2
\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
\<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
goal (1 subgoal):
1. \<And>s1 s2 s1' s2'. \<lbrakk>\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2; \<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>; \<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
from \<open>\<Gamma> \<turnstile> b : Low\<close> \<open>\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2\<close>
[PROOF STATE]
proof (chain)
picking this:
\<Gamma> \<turnstile> b : Low
\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2
[PROOF STEP]
have "\<lbrakk>b\<rbrakk> s1 = \<lbrakk>b\<rbrakk> s2"
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile> b : Low
\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s1 = \<lbrakk>b\<rbrakk>s2
[PROOF STEP]
by(auto intro:interpretLow2)
[PROOF STATE]
proof (state)
this:
\<lbrakk>b\<rbrakk>s1 = \<lbrakk>b\<rbrakk>s2
goal (1 subgoal):
1. \<And>s1 s2 s1' s2'. \<lbrakk>\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2; \<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>; \<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
from \<open>\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
[PROOF STEP]
have "\<lbrakk>b\<rbrakk> s1 = Some true \<or> \<lbrakk>b\<rbrakk> s1 = Some false"
[PROOF STATE]
proof (prove)
using this:
\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s1 = Some true \<or> \<lbrakk>b\<rbrakk>s1 = Some false
[PROOF STEP]
by(auto dest:Cond_True_or_False)
[PROOF STATE]
proof (state)
this:
\<lbrakk>b\<rbrakk>s1 = Some true \<or> \<lbrakk>b\<rbrakk>s1 = Some false
goal (1 subgoal):
1. \<And>s1 s2 s1' s2'. \<lbrakk>\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2; \<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>; \<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
thus "\<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>b\<rbrakk>s1 = Some true \<or> \<lbrakk>b\<rbrakk>s1 = Some false
goal (1 subgoal):
1. \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>b\<rbrakk>s1 = Some true \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
2. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
assume "\<lbrakk>b\<rbrakk> s1 = Some true"
[PROOF STATE]
proof (state)
this:
\<lbrakk>b\<rbrakk>s1 = Some true
goal (2 subgoals):
1. \<lbrakk>b\<rbrakk>s1 = Some true \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
2. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
with \<open>\<lbrakk>b\<rbrakk> s1 = \<lbrakk>b\<rbrakk> s2\<close>
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>b\<rbrakk>s1 = \<lbrakk>b\<rbrakk>s2
\<lbrakk>b\<rbrakk>s1 = Some true
[PROOF STEP]
have "\<lbrakk>b\<rbrakk> s2 = Some true"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>b\<rbrakk>s1 = \<lbrakk>b\<rbrakk>s2
\<lbrakk>b\<rbrakk>s1 = Some true
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s2 = Some true
[PROOF STEP]
by(auto intro:CondTrue_reds)
[PROOF STATE]
proof (state)
this:
\<lbrakk>b\<rbrakk>s2 = Some true
goal (2 subgoals):
1. \<lbrakk>b\<rbrakk>s1 = Some true \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
2. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
from \<open>\<lbrakk>b\<rbrakk> s1 = Some true\<close> \<open>\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>b\<rbrakk>s1 = Some true
\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
[PROOF STEP]
have "\<langle>c1,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>b\<rbrakk>s1 = Some true
\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
goal (1 subgoal):
1. \<langle>c1,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
[PROOF STEP]
by(auto intro:CondTrue_reds)
[PROOF STATE]
proof (state)
this:
\<langle>c1,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
goal (2 subgoals):
1. \<lbrakk>b\<rbrakk>s1 = Some true \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
2. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
from \<open>\<lbrakk>b\<rbrakk> s2 = Some true\<close> \<open>\<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>b\<rbrakk>s2 = Some true
\<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
[PROOF STEP]
have "\<langle>c1,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>b\<rbrakk>s2 = Some true
\<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
goal (1 subgoal):
1. \<langle>c1,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
[PROOF STEP]
by(auto intro:CondTrue_reds)
[PROOF STATE]
proof (state)
this:
\<langle>c1,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
goal (2 subgoals):
1. \<lbrakk>b\<rbrakk>s1 = Some true \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
2. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
with \<open>\<langle>c1,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>\<close> \<open>\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2\<close> \<open>nonInterference \<Gamma> c1\<close>
[PROOF STATE]
proof (chain)
picking this:
\<langle>c1,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2
nonInterference \<Gamma> c1
\<langle>c1,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<langle>c1,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2
nonInterference \<Gamma> c1
\<langle>c1,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
goal (1 subgoal):
1. \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
by(auto simp:nonInterference_def)
[PROOF STATE]
proof (state)
this:
\<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
assume "\<lbrakk>b\<rbrakk> s1 = Some false"
[PROOF STATE]
proof (state)
this:
\<lbrakk>b\<rbrakk>s1 = Some false
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
with \<open>\<lbrakk>b\<rbrakk> s1 = \<lbrakk>b\<rbrakk> s2\<close>
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>b\<rbrakk>s1 = \<lbrakk>b\<rbrakk>s2
\<lbrakk>b\<rbrakk>s1 = Some false
[PROOF STEP]
have "\<lbrakk>b\<rbrakk> s2 = Some false"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>b\<rbrakk>s1 = \<lbrakk>b\<rbrakk>s2
\<lbrakk>b\<rbrakk>s1 = Some false
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s2 = Some false
[PROOF STEP]
by(auto intro:CondTrue_reds)
[PROOF STATE]
proof (state)
this:
\<lbrakk>b\<rbrakk>s2 = Some false
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
from \<open>\<lbrakk>b\<rbrakk> s1 = Some false\<close> \<open>\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>b\<rbrakk>s1 = Some false
\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
[PROOF STEP]
have "\<langle>c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>b\<rbrakk>s1 = Some false
\<langle>if (b) c1 else c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
goal (1 subgoal):
1. \<langle>c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
[PROOF STEP]
by(auto intro:CondFalse_reds)
[PROOF STATE]
proof (state)
this:
\<langle>c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
from \<open>\<lbrakk>b\<rbrakk> s2 = Some false\<close> \<open>\<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>\<close>
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>b\<rbrakk>s2 = Some false
\<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
[PROOF STEP]
have "\<langle>c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>b\<rbrakk>s2 = Some false
\<langle>if (b) c1 else c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
goal (1 subgoal):
1. \<langle>c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
[PROOF STEP]
by(auto intro:CondFalse_reds)
[PROOF STATE]
proof (state)
this:
\<langle>c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
goal (1 subgoal):
1. \<lbrakk>b\<rbrakk>s1 = Some false \<Longrightarrow> \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
with \<open>\<langle>c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>\<close> \<open>\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2\<close> \<open>nonInterference \<Gamma> c2\<close>
[PROOF STATE]
proof (chain)
picking this:
\<langle>c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2
nonInterference \<Gamma> c2
\<langle>c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<langle>c2,s1\<rangle> \<rightarrow>* \<langle>Skip,s1'\<rangle>
\<Gamma> \<turnstile> s1 \<approx>\<^sub>L s2
nonInterference \<Gamma> c2
\<langle>c2,s2\<rangle> \<rightarrow>* \<langle>Skip,s2'\<rangle>
goal (1 subgoal):
1. \<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
[PROOF STEP]
by(auto simp:nonInterference_def)
[PROOF STATE]
proof (state)
this:
\<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<Gamma> \<turnstile> s1' \<approx>\<^sub>L s2'
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5524, "file": "VolpanoSmith_secTypes", "length": 40}
|
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
try:
from osgeo import ogr
except ImportError:
ogr = None
import numpy as np
from threedigrid.admin import constants
from threedigrid.geo_utils import raise_import_exception
def as_numpy_array(array):
if hasattr(array, 'value'):
return array.value
return array[()]
class PrepareBreaches(object):
"""
prepares breaches for visualization
"""
def __init__(self):
if ogr is None:
raise_import_exception('ogr')
@staticmethod
def get_coordinates(levees, line_coords, levl):
breaches_x = np.zeros(levl.shape, dtype='f8')
breaches_y = np.zeros(levl.shape, dtype='f8')
for i, line_id in enumerate(levl):
if i == 0:
continue
line = ogr.Geometry(ogr.wkbLineString)
line.AddPoint(
line_coords[0][line_id],
line_coords[1][line_id])
line.AddPoint(
line_coords[2][line_id],
line_coords[3][line_id])
for levee_geom in levees.geoms:
if not levee_geom.Intersect(line):
continue
intersection = levee_geom.Intersection(line)
breaches_x[i] = intersection.GetX()
breaches_y[i] = intersection.GetY()
break
return np.array([breaches_x, breaches_y])
@classmethod
def prepare_datasource(cls, datasource, kcu, id_mapper,
levees, line_coords):
"""
:param datasource: datasource for breaches like HFD5 group
:param kcu: array of kcu values for datasource
:param id_mapper: threedigrid.admin.idmapper.IdMapper instance
:param levees: threedigrid.admin.levees.model.Levees instance
:param line_coords: coordinates from Lines instance
:return:
"""
# TODO: Check values below
if 'id' not in list(datasource.keys()):
datasource.set(
'id', np.arange(0, datasource['levl'].size))
if 'seq_ids' not in list(datasource.keys()):
datasource.set(
'seq_ids', np.arange(0, datasource['levl'].size))
if 'content_pk' not in list(datasource.keys()):
content_pk = np.zeros(datasource['levl'].shape, dtype='i4')
type_code = constants.TYPE_CODE_MAP['v2_breach']
id_mapping = id_mapper.id_mapping
src = id_mapper.obj_slices[type_code]
sort_by = as_numpy_array(datasource['seq_ids'])
sort_idx = np.argsort(sort_by)
idx = sort_idx[np.searchsorted(
sort_by,
id_mapping[src]['seq_id'],
sorter=sort_idx)]
content_pk[idx] = id_mapping[src]['pk']
datasource.set('content_pk', content_pk)
levl = as_numpy_array(datasource['levl'])
if 'kcu' not in list(datasource.keys()):
datasource.set('kcu', as_numpy_array(kcu)[levl])
if 'coordinates' not in list(datasource.keys()) and levees:
datasource.set(
'coordinates',
cls.get_coordinates(levees, as_numpy_array(line_coords), levl))
|
{"hexsha": "0f7e9113e7e993990bd7574227129eab0cfb6423", "size": 3398, "ext": "py", "lang": "Python", "max_stars_repo_path": "threedigrid/admin/breaches/prepare.py", "max_stars_repo_name": "nens/threedigrid", "max_stars_repo_head_hexsha": "7bff5f9efa5edb8335a8ed601b9a096f85e19711", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-09-05T07:32:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-30T08:56:39.000Z", "max_issues_repo_path": "threedigrid/admin/breaches/prepare.py", "max_issues_repo_name": "nens/threedigrid", "max_issues_repo_head_hexsha": "7bff5f9efa5edb8335a8ed601b9a096f85e19711", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2018-04-11T09:18:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T13:22:47.000Z", "max_forks_repo_path": "threedigrid/admin/breaches/prepare.py", "max_forks_repo_name": "nens/threedigrid", "max_forks_repo_head_hexsha": "7bff5f9efa5edb8335a8ed601b9a096f85e19711", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9902912621, "max_line_length": 79, "alphanum_fraction": 0.5962330783, "include": true, "reason": "import numpy", "num_tokens": 758}
|
% Options for packages loaded elsewhere
\PassOptionsToPackage{unicode}{hyperref}
\PassOptionsToPackage{hyphens}{url}
%
\documentclass[
12pt,
]{book}
\usepackage{amsmath,amssymb}
\usepackage{lmodern}
\usepackage{iftex}
\ifPDFTeX
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{textcomp} % provide euro and other symbols
\else % if luatex or xetex
\usepackage{unicode-math}
\defaultfontfeatures{Scale=MatchLowercase}
\defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1}
\fi
% Use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\IfFileExists{microtype.sty}{% use microtype if available
\usepackage[]{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\makeatletter
\@ifundefined{KOMAClassName}{% if non-KOMA class
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}}
}{% if KOMA class
\KOMAoptions{parskip=half}}
\makeatother
\usepackage{xcolor}
\IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available
\IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}}
\hypersetup{
pdftitle={How to fit an animal model},
pdfauthor={Julien Martin},
hidelinks,
pdfcreator={LaTeX via pandoc}}
\urlstyle{same} % disable monospaced font for URLs
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\BuiltInTok}[1]{#1}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}}
\newcommand{\ExtensionTok}[1]{#1}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ImportTok}[1]{#1}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\NormalTok}[1]{#1}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\RegionMarkerTok}[1]{#1}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\usepackage{longtable,booktabs,array}
\usepackage{calc} % for calculating minipage widths
% Correct order of tables after \paragraph or \subparagraph
\usepackage{etoolbox}
\makeatletter
\patchcmd\longtable{\par}{\if@noskipsec\mbox{}\fi\par}{}{}
\makeatother
% Allow footnotes in longtable head/foot
\IfFileExists{footnotehyper.sty}{\usepackage{footnotehyper}}{\usepackage{footnote}}
\makesavenoteenv{longtable}
\usepackage{graphicx}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
% Set default figure placement to htbp
\makeatletter
\def\fps@figure{htbp}
\makeatother
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{5}
%\usepackage{booktabs}
\usepackage{ctable}
\usepackage{fancyhdr}
\usepackage{float}
\usepackage[margin=2cm]{geometry}
\floatplacement{figure}{H}
%\usepackage[sf,bf]{titlesec}
\hypersetup{colorlinks=true, urlcolor=blue}
\renewcommand{\chaptername}{Chapitre}
\renewcommand{\contentsname}{Table des Matières}
\renewcommand{\partname}{Partie}
\usepackage{framed,color}
\definecolor{incolor}{RGB}{240,240,240}
\definecolor{outcolor}{RGB}{248,248,248}
\renewcommand{\textfraction}{0.05}
\renewcommand{\topfraction}{0.8}
\renewcommand{\bottomfraction}{0.8}
\renewcommand{\floatpagefraction}{0.75}
%\renewenvironment{quote}{\begin{VF}}{\end{VF}}
\ifxetex
\usepackage{letltxmacro}
\setlength{\XeTeXLinkMargin}{1pt}
\LetLtxMacro\SavedIncludeGraphics\includegraphics
\def\includegraphics#1#{% #1 catches optional stuff (star/opt. arg.)
\IncludeGraphicsAux{#1}%
}%
\newcommand*{\IncludeGraphicsAux}[2]{%
\XeTeXLinkBox{%
\SavedIncludeGraphics#1{#2}%
}%
}%
\fi
\makeatletter
\newenvironment{kframe}{%
\medskip{}
\setlength{\fboxsep}{.8em}
\def\at@end@of@kframe{}%
\ifinner\ifhmode%
\def\at@end@of@kframe{\end{minipage}}%
\begin{minipage}{\columnwidth}%
\fi\fi%
\def\FrameCommand##1{\hskip\@totalleftmargin \hskip-\fboxsep
\colorbox{incolor}{##1}\hskip-\fboxsep
% There is no \\@totalrightmargin, so:
\hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}%
\MakeFramed {\advance\hsize-\width
\@totalleftmargin\z@ \linewidth\hsize
\@setminipage}}%
{\par\unskip\endMakeFramed%
\at@end@of@kframe}
\makeatother
\makeatletter
\@ifundefined{Shaded}{
}{\renewenvironment{Shaded}{\begin{kframe}}{\end{kframe}}}
\makeatother
% \let\oldverbatim\verbatim
% \renewenvironment{Shaded}{\vspace{0.2cm}\begin{kframe}}{\end{kframe}}
% \renewenvironment{verbatim}{\begin{shaded}\begin{oldverbatim}}{\end{oldverbatim}\end{shaded}}
\newenvironment{rmdblock}[1]
{
\begin{itemize}
\renewcommand{\labelitemi}{
\raisebox{-.7\height}[0pt][0pt]{
{\setkeys{Gin}{width=3em,keepaspectratio}\includegraphics{images/#1}}
}
}
\begin{kframe}
\setlength{\fboxsep}{1em}
\item
}
{
\end{kframe}
\end{itemize}
}
\newenvironment{rmdnote}
{\begin{rmdblock}{note}}
{\end{rmdblock}}
\newenvironment{rmdcaution}
{\begin{rmdblock}{caution}}
{\end{rmdblock}}
\newenvironment{rmdimportant}
{\begin{rmdblock}{important}}
{\end{rmdblock}}
\newenvironment{rmdtip}
{\begin{rmdblock}{tip}}
{\end{rmdblock}}
\newenvironment{rmdwarning}
{\begin{rmdblock}{warning}}
{\end{rmdblock}}
\newenvironment{rmdcode}
{\begin{rmdblock}{screen}}
{\end{rmdblock}}
\usepackage{makeidx}
\makeindex
\urlstyle{tt}
\usepackage{amsthm}
\makeatletter
\def\thm@space@setup{%
\thm@preskip=8pt plus 2pt minus 4pt
\thm@postskip=\thm@preskip
}
\makeatother
% \frontmatter
\ifLuaTeX
\usepackage{selnolig} % disable illegal ligatures
\fi
\usepackage[]{natbib}
\bibliographystyle{apalike}
\title{How to fit an animal model}
\usepackage{etoolbox}
\makeatletter
\providecommand{\subtitle}[1]{% add subtitle to \maketitle
\apptocmd{\@title}{\par {\large #1 \par}}{}{}
}
\makeatother
\subtitle{An ecologist guide}
\author{Julien Martin}
\date{21-05-2021}
\begin{document}
\maketitle
%\cleardoublepage\newpage\thispagestyle{empty}\null
%\cleardoublepage\newpage\thispagestyle{empty}\null
%\cleardoublepage\newpage
%\thispagestyle{empty}
%\begin{center}
%\includegraphics{images/missing.png}
%\end{center}
%\setlength{\abovedisplayskip}{-5pt}
%\setlength{\abovedisplayshortskip}{-5pt}
{
\setcounter{tocdepth}{1}
\tableofcontents
}
\hypertarget{preface}{%
\chapter*{Preface}\label{preface}}
\addcontentsline{toc}{chapter}{Preface}
This book is a collection of tutorial from the excellent paper by \citep{wilson2010}.
Instead of just copy pasting the tutorial in a bookdown format, the tutorials have been updated to work with the newest version of the softwares and extended to present other softwares.
\textbf{However, this is still a work in progress.}
\begin{rmdwarning}
Do not take anything in this manual as gospel.
\end{rmdwarning}
\hypertarget{contributors}{%
\subsection*{Contributors}\label{contributors}}
\addcontentsline{toc}{subsection}{Contributors}
List of people who contributed to update and extend tutorials:
\begin{itemize}
\tightlist
\item
Eric Postma
\item
Julien Martin
\item
Mathieu Videlier
\end{itemize}
\hypertarget{intro}{%
\chapter{Introduction}\label{intro}}
The book is provides a series of tutorials (and accompanying data files) to fit animal model in \texttt{R} using different packages (\texttt{ASReml-R}, \texttt{gremlin}, \texttt{MCMCglmm} and \texttt{brms}/\texttt{stan}) .
You will need to carefully follow the instructions below to first download the data files and second install the R packages.
Before beginning the tutorial, we assume the reader has successfully installed the chosen R package on their computer and has saved the required data files to an appropriate directory from which they will be read.
Full instructions for how to do this are provided with software distributions.
To work though the different tutorial I would recommend to create a folder where you will save your different \texttt{R} scripts for the tutorials.
\hypertarget{data}{%
\section{Data}\label{data}}
\hypertarget{data-files}{%
\subsection{Data files}\label{data-files}}
You will need to download 3 data files for the tutorial in \texttt{R}:
\begin{itemize}
\tightlist
\item
gryphon.csv: data on gryphon birth weight
\item
gryphonRM.csv: data
\item
gryphonped.csv
\end{itemize}
In addition, some models presented in the tutorials can take a while to run (sometimes \textgreater{} 1 hour), thus we are also providing model the model outputs to allow you continue the tutorial without waiting for the model to run.
The files are available \href{https://github.com/JulienGAMartin/wam_tuto/tree/master/data}{here}
I recommend to save the data and Rdata files in a subfolder \texttt{data} in the folder you will use as your working directory for R and where you will save your R scripts. It should be noted that the tutorial are using this structure to read or save data.
\hypertarget{notes-on-data-and-pedigree}{%
\subsection{Notes on data and pedigree}\label{notes-on-data-and-pedigree}}
It is always important to take time to think carefully about the strengths and potential limitations of your pedigree information before embarking on quantitative genetic analyses. Pedigree Viewer, written by Brian Kinghorn, is an extremely useful application for visualizing pedigrees, and can be downloaded from: \url{http://www-personal.une.edu.au/~bkinghor/pedigree.htm}. \texttt{Pedantics} an R package written by Michael Morrissey and distributed through CRAN (\url{http://cran.r-project.org/}) can also be used for this and offers some nice additional features for visualizing pedigree structures and generating associated statistics. Before you begin running through the tutorials, we advise taking a moment to look at the pedigree files provided with them using Pedigree Viewer or Pedantics.
\hypertarget{r}{%
\section{R}\label{r}}
You should check that you have the most current version of R and R packages. You can check the number of the current version on CRAN. If you need to update (or install) R packages, use \texttt{install.packages()} and follow the prompted instructions.
\hypertarget{r-packages}{%
\subsection{R packages}\label{r-packages}}
\hypertarget{asreml-r}{%
\subsubsection{asreml-r}\label{asreml-r}}
ASReml-R is commercial software published by VSN international (\url{http://www.vsni.co.uk/software/asreml/}). This package is not free and requires a key access ()
Additional information and guide can be find in the Asreml-R manual: (\url{https://asreml.kb.vsni.co.uk/wp-content/uploads/sites/3/2018/02/ASReml-R-Reference-Manual-4.pdf})
\hypertarget{gremlin}{%
\subsubsection{gremlin}\label{gremlin}}
\texttt{gremlin} is a little monster appearing if you feed a mugwai after midnight. It is also a great and promising software to fit mixed models using a frequentist approach.
\hypertarget{mcmcglmm}{%
\subsubsection{MCMCglmm}\label{mcmcglmm}}
\texttt{MCMCglmm} is an R package for Bayesian mixed model analysis written by Jarrod Hadfield. It is a freeware distributed through CRAN (\url{http://cran.r-project.org/}). Information and guide about the package can be find in the user manual and vignettes (\url{http://cran.r-project.org/web/packages/MCMCglmm/index.html}).
Reference: \citep[\citet{R-MCMCglmm}]{MCMCglmm2010}.
This module provides some information that applies to MCMCglmm-based analyses in general, but that will not be included in other tutorials.
Most importantly, this applies to some of the simplest ways of determining the performance of a run using MCMCglmm, i.e., verification of the validity of of the posterior distribution.
This tutorial is not a substitute for working through the MCMCglmm course notes, which is available from CRAN (the Comprehensive R ArchiveNetwork, \url{http://cran.r-project.org/}, or can be accessed in R using the command vignette(``CourseNotes'',``MCMCglmm'')).
These tutorials do not introduce one of the main advantages of using MCMCglmm for analyses of data from natural populations -the ability to properly model non-normal responses.
These capabilities are introduced in the documentation that is distributed with MCMCglmm, and available from CRAN.
\hypertarget{brms}{%
\subsubsection{brms}\label{brms}}
\texttt{brms} provides an interface to fit Bayesian generalized multivariate (non-)linear multilevel models using \texttt{Stan}, which is a C++ package for obtaining full Bayesian inference (see \url{https://mc-stan.org/}).
The formula syntax is an extended version of the syntax applied in the `lme4' package to provide a familiar and simple interface for performing regression analyses.
It should be noted that if \texttt{brms} is able to fit animal model the parametrization used is not the most efficient and can take quite longer than using a different parametrization directly in \texttt{stan}.
\hypertarget{univariate-animal-model}{%
\chapter{Univariate animal model}\label{univariate-animal-model}}
This tutorial will demonstrate how to run a univariate animal model to estimate genetic variance in birth weight in the mighty gryphons.
\hypertarget{scenario-and-data}{%
\section{Scenario and data}\label{scenario-and-data}}
\hypertarget{scenario}{%
\subsection{Scenario}\label{scenario}}
In a population of gryphons there is strong positive selection on birth weight with heavier born individuals having, on average higher fitness. To find out whether increased birth weight will evolve in response to the selection, and if so how quickly, we want to estimate the heritability of birth weight.
\hypertarget{data-files-1}{%
\subsection{Data files}\label{data-files-1}}
Open \texttt{gryphonped.csv} and \texttt{gryphon.csv} in your text editor. The structure and contents of these files is fairly self-explanatory. The pedigree file \texttt{gryphonped.csv} contains three columns containing unique IDs that correspond to each animal, its father, and its mother. Note that this is a multigenerational pedigree, with the earliest generation (for which parentage information is necessarily missing) at the beginning of the file. For later-born individuals maternal identities are all known but paternity information is incomplete (a common situation in real world applications).
The phenotype data, as well as additional factors and covariates that we may wish to include in our model are contained in \texttt{gryphon.csv}. Columns correspond to individual identity (\texttt{animal}), maternal identity (\texttt{mother}), year of birth (\texttt{byear}), sex (\texttt{sex}, where \texttt{1} is female and \texttt{2} is male), birth weight (\texttt{bwt}), and tarsus length (\texttt{tarsus}). Each row of the data file contains a record for a different offspring individual. Note that all individuals included in the data file must be included as offspring in the pedigree file.
We can read teh data file, using \texttt{read.csv()} which consider by default that \texttt{NA} is the symbol for missing values and that the first line of the file contains the column headers.
It is a good idea to make sure that all variables are correctly assigned as numeric or factors:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{animal }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{animal)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{mother }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{mother)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{byear }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{byear)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{sex }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{sex)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{bwt }\OtherTok{\textless{}{-}} \FunctionTok{as.numeric}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{bwt)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{tarsus }\OtherTok{\textless{}{-}} \FunctionTok{as.numeric}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{tarsus)}
\end{Highlighting}
\end{Shaded}
Similarly we can read in the pedigree file, using \texttt{read.csv()} which consider by default that \texttt{NA} is the symbol for missing values and that the first line of the file contains the column headers.
\begin{verbatim}
## 'data.frame': 1309 obs. of 3 variables:
## $ id : int 1306 1304 1298 1293 1290 1288 1284 1283 1282 1278 ...
## $ father: int NA NA NA NA NA NA NA NA NA NA ...
## $ mother: int NA NA NA NA NA NA NA NA NA NA ...
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{gryphonped}\SpecialCharTok{$}\NormalTok{id }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphonped}\SpecialCharTok{$}\NormalTok{id)}
\NormalTok{gryphonped}\SpecialCharTok{$}\NormalTok{father }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphonped}\SpecialCharTok{$}\NormalTok{father)}
\NormalTok{gryphonped}\SpecialCharTok{$}\NormalTok{mother }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphonped}\SpecialCharTok{$}\NormalTok{mother)}
\end{Highlighting}
\end{Shaded}
Now that we have imported the data and the pedigree file, we are ready to fit an animal model.
\hypertarget{asreml-r-1}{%
\section{Asreml-R}\label{asreml-r-1}}
\hypertarget{running-the-model}{%
\subsection{Running the model}\label{running-the-model}}
First we need to load the \texttt{asreml} library:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(asreml)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Loading required package: Matrix
\end{verbatim}
\begin{verbatim}
## Online License checked out Fri May 21 10:23:14 2021
\end{verbatim}
To be able to fit an animal model, Asreml-r needs (the inverse of) the relationship matrix using the ainverse function:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{ainv }\OtherTok{\textless{}{-}} \FunctionTok{ainverse}\NormalTok{(gryphonped)}
\end{Highlighting}
\end{Shaded}
We are now ready to specify our first model:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model1 }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1}\NormalTok{, }\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{vm}\NormalTok{(animal, ainv),}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Online License checked out Fri May 21 10:23:15 2021
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:15 2021
## LogLik Sigma2 DF wall cpu
## 1 -4128.454 1.0 853 10:23:15 0.0
## 2 -3284.272 1.0 853 10:23:15 0.0
## 3 -2354.992 1.0 853 10:23:15 0.0
## 4 -1710.357 1.0 853 10:23:15 0.0
## 5 -1363.555 1.0 853 10:23:15 0.0
## 6 -1263.516 1.0 853 10:23:15 0.0
## 7 -1247.854 1.0 853 10:23:15 0.0
## 8 -1247.185 1.0 853 10:23:15 0.0
## 9 -1247.183 1.0 853 10:23:15 0.0
\end{verbatim}
In this model, \texttt{bwt} is the response variable and the only fixed effect is the mean (the intercept, denoted as \texttt{1}). The only random effect we have fitted is \texttt{animal}, which will provide an estimate of \(V_A\). Our random \texttt{animal} effect is connected to the inverse related matrix \texttt{ainv} which integrate the relativeness or pedigree information.\\
\texttt{data=} specifies the name of the dataframe that contains our variables. Finally, we tell \texttt{asreml()} what to when it encounters \texttt{NA}s in either the dependent or predictor variables (in this case we choose to remove the records). If you use the argument ``include'' instead of ``omit'', model will keep the NA. With x=``include'', the model will exchange \texttt{NA} with 0. Be careful you need to standardize your trait so the mean will be equal to 0, if not estimates (including covariance in multivariate models) could be strongly biased due to the the missing values. y=``include'' will exchange \texttt{NA} with a factor labeled \texttt{mv} which will be included in the sparse equation. For more details see Asreml-R manual.
A note of the specification of the structure of the residuals: This simple univariate model will run fine without \texttt{residual=\textasciitilde{}idv(units)}. However, if you are going to use \texttt{vpredict()} to calculate the heritability (see below), not specifying the residuals in this way will result in a standard error for the heritability that is incorrect.
Any model has assumption which need to be checked. The model can be plot which help visualizing the distribution of the model residual and check the different assumptions.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(model1)}
\end{Highlighting}
\end{Shaded}
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-10-1.pdf}
To see the estimates for the variance components, we run:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(model1)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## vm(animal, ainv) 3.395398 0.6349915 5.347154 P 0
## units!units 3.828602 0.5185919 7.382687 P 0
## units!R 1.000000 NA NA F 0
\end{verbatim}
We fitted a single random effect so we partitioned the phenotypic variance into two components. The \texttt{vm(animal,\ ainv)} variance component is \(V_A\) and is estimated as 3.4. Given that the ratio of \(V_A\) to its standard error (\texttt{z.ratio}) is considerably larger than 2 (\emph{i.e.} the parameter estimate is more than 2 SEs from zero), this looks likely to be significant. The \texttt{units!units} component refers to the residual variance \(V_R\), and \texttt{units\$R} should be ignored. If you don't include \texttt{residual=\textasciitilde{}idv(units)}in your model specification, \texttt{units\$R} will provide you with the residual variance.
\hypertarget{estimating-heritability}{%
\subsection{Estimating heritability}\label{estimating-heritability}}
We can calculate the \(h^2\) of birth weight from the components above since \(h^2 = V_A/V_P = V_A/(V_A+V_R)\). Thus according to this model, \(h^2\) = 3.4 / (3.4 + 3.83) = 0.47.
Alternatively we can use the \texttt{vpredict()} function to calculate \(h^2\) and its standard error. \texttt{vpredict()}function has two structures, first the model used (here \texttt{model1}) and then the estimate name with its associated equation. The equation used different \texttt{V} and their associated numbers depend of the order of the different random and residual effects included in the model.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model1, h2.bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V1 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V2))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.bwt 0.4700163 0.07650881
\end{verbatim}
\hypertarget{adding-fixed-effects}{%
\subsection{Adding fixed effects}\label{adding-fixed-effects}}
To add fixed effects to a univariate model, we simply modify the model statement. For example, we might know (or suspect) that birth weight is a sexually dimorphic trait and therefore fit in the model.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model2 }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{vm}\NormalTok{(animal, ainv),}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:16 2021
## LogLik Sigma2 DF wall cpu
## 1 -3364.126 1.0 852 10:23:16 0.0
## 2 -2702.117 1.0 852 10:23:16 0.0
## 3 -1978.916 1.0 852 10:23:16 0.0
## 4 -1487.834 1.0 852 10:23:16 0.0
## 5 -1236.350 1.0 852 10:23:16 0.0
## 6 -1172.771 1.0 852 10:23:16 0.0
## 7 -1165.270 1.0 852 10:23:16 0.0
## 8 -1165.093 1.0 852 10:23:16 0.0
## 9 -1165.093 1.0 852 10:23:16 0.0
\end{verbatim}
Now we can look at the fixed effects parameters and assess their significance with a conditional Wald F-test:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(model2, }\AttributeTok{coef =} \ConstantTok{TRUE}\NormalTok{)}\SpecialCharTok{$}\NormalTok{coef.fixed}
\FunctionTok{wald.asreml}\NormalTok{(model2, }\AttributeTok{ssType =} \StringTok{"conditional"}\NormalTok{, }\AttributeTok{denDF =} \StringTok{"numeric"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## solution std error z.ratio
## sex_1 0.000000 NA NA
## sex_2 2.206996 0.1619974 13.62365
## (Intercept) 6.058669 0.1718244 35.26082
\end{verbatim}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:16 2021
## LogLik Sigma2 DF wall cpu
## 1 -1165.093 1.0 852 10:23:16 0.0
## 2 -1165.093 1.0 852 10:23:16 0.0
## Calculating denominator DF
\end{verbatim}
The very small probability (\texttt{Pr}) in the Wald test above shows that \texttt{sex} is a highly significant fixed effect, and from the parameter estimates (\texttt{summary(model2,coef=T)\$coef.fixed}) we can see that the average male (sex 2) is 2.2 kg (\(\pm\) 0.16 SE) heavier than the average female (sex 1). However, when we look at the variance components in the model including \texttt{sex} as a fixed effect, we see that they have changed slightly from the previous model:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(model2)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## vm(animal, ainv) 3.060441 0.5243571 5.836558 P 0
## units!units 2.938412 0.4161473 7.060991 P 0
## units!R 1.000000 NA NA F 0
\end{verbatim}
In fact since \texttt{sex} effects were previously contributing to the residual variance of the model, our estimate of \(V_R\) (denoted \texttt{units!R} in the output) is now slightly lower than before. This has an important consequence for estimating heritability since if we calculate \(V_P\) as \(V_A\)+\(V_R\) then as we include fixed effects we will soak up more residual variance driving \(V_P\). Assuming that \(V_A\) is more or less unaffected by the fixed effects fitted then as \(V_P\) goes down we expect our estimate of \(h^2\) will go up:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model2, h2.bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V1 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V2))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.bwt 0.510171 0.07432388
\end{verbatim}
\begin{verbatim}
## Estimate SE
## h2.bwt 0.4700163 0.07650881
\end{verbatim}
\begin{verbatim}
## Estimate SE
## h2.bwt 0.510171 0.07432388
\end{verbatim}
Here \(h^2\) has increased slightly from 0.47 to 0.51. Which is the better estimate? It depends on what your question is. The first is an estimate of the proportion of variance in birth weight explained by additive effects, the latter is an estimate of the proportion of variance in birth weight \emph{after conditioning on sex} that is explained by additive effects.
An important piece of advice, each researcher should be consistent in how they name their estimates and always correctly describe which estimates they are using conditional or not (to avoid any confusion).
\hypertarget{adding-random-effects}{%
\subsection{Adding random effects}\label{adding-random-effects}}
This is done by simply modifying the model statement in the same way. For instance fitting
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model3 }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}\NormalTok{ byear,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:16 2021
## LogLik Sigma2 DF wall cpu
## 1 -2742.658 1.0 852 10:23:16 0.0
## 2 -2237.268 1.0 852 10:23:16 0.0
## 3 -1690.453 1.0 852 10:23:16 0.0
## 4 -1328.910 1.0 852 10:23:16 0.0
## 5 -1154.597 1.0 852 10:23:16 0.0
## 6 -1116.992 1.0 852 10:23:16 0.0
## 7 -1113.809 1.0 852 10:23:16 0.0
## 8 -1113.772 1.0 852 10:23:16 0.0
## 9 -1113.772 1.0 852 10:23:16 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(model3)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## byear 0.8862604 0.2695918 3.287416 P 0
## vm(animal, ainv) 2.7068665 0.4422140 6.121169 P 0
## units!units 2.3092415 0.3451025 6.691466 P 0
## units!R 1.0000000 NA NA F 0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{(h2}\FloatTok{.3} \OtherTok{\textless{}{-}} \FunctionTok{vpredict}\NormalTok{(model3, h2.bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V1 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V2 }\SpecialCharTok{+}\NormalTok{ V3)))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.bwt 0.1501533 0.03960814
\end{verbatim}
Here the variance in \texttt{bwt} explained by \texttt{byear} is 0.886 and, based on the \texttt{z.ratio}, appears to be significant (\textgreater2). Thus we would conclude that year-to-year variation (\emph{e.g.}, in weather, resource abundance) contributes to \(V_P\). Note that although \(V_A\) has changed somewhat, as most of what is now partitioned as a birth year effect was previously partitioned as \(V_R\). Thus what we have really done here is to partition environmental effects into those arising from year-to-year differences versus everything else, and we do not really expect much change in \(h^2\) (since now \(h^2 = V_A/ (V_A+V_{BY}+V_R)\)).
However, we get a somewhat different result if we also add a random effect of \texttt{mother} to test for maternal effects:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model4 }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}\NormalTok{ byear }\SpecialCharTok{+}\NormalTok{ mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:16 2021
## LogLik Sigma2 DF wall cpu
## 1 -2033.178 1.0 852 10:23:17 0.0
## 2 -1723.734 1.0 852 10:23:17 0.0
## 3 -1396.354 1.0 852 10:23:17 0.0
## 4 -1193.012 1.0 852 10:23:17 0.0
## 5 -1107.946 1.0 852 10:23:17 0.0
## 6 -1095.327 1.0 852 10:23:17 0.0
## 7 -1094.816 1.0 852 10:23:17 0.0
## 8 -1094.815 1.0 852 10:23:17 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(model4)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## byear 0.8820313 0.2632455 3.350604 P 0
## mother 1.1184698 0.2386239 4.687167 P 0
## vm(animal, ainv) 2.2985320 0.4962496 4.631806 P 0
## units!units 1.6290034 0.3714154 4.385934 P 0
## units!R 1.0000000 NA NA F 0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{(h2}\FloatTok{.4} \OtherTok{\textless{}{-}} \FunctionTok{vpredict}\NormalTok{(model4, h2.bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V1 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V2 }\SpecialCharTok{+}\NormalTok{ V3 }\SpecialCharTok{+}\NormalTok{ V4)))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.bwt 0.1487898 0.03861552
\end{verbatim}
Here partitioning of significant maternal variance has resulted in a further decrease in \(V_R\) but also a decrease in \(V_A\). The latter is because maternal effects of the sort we simulated (fixed differences between mothers) will have the consequence of increasing similarity among maternal siblings. Consequently they can look very much like additive genetic effects and if present, but unmodelled, represent a type of ``common environment effect'' that can - and will - cause upward bias in \(V_A\) and so \(h^2\).
The ``common environment'' can be conceived as the indissociable sum of the maternal additive genetic effect (such as loci affecting milk production) and the maternal environment or permanent environment (such as litter or nest environment created or modified by the mother).
\hypertarget{testing-significance-of-random-effects}{%
\subsection{Testing significance of random effects}\label{testing-significance-of-random-effects}}
An important point to note in this tutorial is that while the \texttt{z.ratio} (\texttt{component}/\texttt{std.error}) reported is a good indicator of likely statistical significance (\textgreater1.96?), the standard errors are approximate and are not recommended for formal hypothesis testing. A better approach is to use likelihood-ratio tests (LRT).
For example, to test the significance of maternal effects we could compare models with and without the inclusion of maternal identity as a random effect and compare the final log-likelihoods of these models.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model4}\SpecialCharTok{$}\NormalTok{loglik}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] -1094.815
\end{verbatim}
shows that the model including maternal identity has a log-likelihood of -1094.815, and
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model3}\SpecialCharTok{$}\NormalTok{loglik}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] -1113.772
\end{verbatim}
shows that the model excluding maternal identity has a log-likelihood of -1113.772.
A test statistic equal to twice the absolute difference in these log-likelihoods is assumed to be distributed as Chi square with \texttt{one} degree of freedom (one term of difference between the two models). In this case we would conclude that the maternal effects are highly significant since:
2 \(\times\) (-1094.8145793 - -1113.7719147) equals 37.9146708, and the p-value that comes with this is:
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{1} \SpecialCharTok{{-}} \FunctionTok{pchisq}\NormalTok{(}\DecValTok{2} \SpecialCharTok{*}\NormalTok{ (model4}\SpecialCharTok{$}\NormalTok{loglik }\SpecialCharTok{{-}}\NormalTok{ model3}\SpecialCharTok{$}\NormalTok{loglik), }\DecValTok{1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 7.390738e-10
\end{verbatim}
As P \textless{} 0.0001 we would therefore conclude that the additional of maternal identity as a random effect significantly improves the fit of the model, given an increase in log-likelihood of approximately 19.
\hypertarget{further-partitioning-the-variance}{%
\subsection{Further partitioning the variance}\label{further-partitioning-the-variance}}
A population can be further fragmented into different groups or categories (such as females and males, juveniles and adults or treated and untreated). Some scientific questions require further and deeper analysis of the variance.
To avoid multiple model (one for each group), we can directly partition the variance between groups in a unique model. In addition, by doing so, we can also test if the variance are different between groups.
As example, we decide to take the model4 and partition its additive genetic variance and residual variance by sex. It is impossible to further partition the other variances but complexify an animal model requires sufficient sample size.\\
First, the dataset needs to be order by group (here sex).
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{gryphon }\OtherTok{\textless{}{-}}\NormalTok{ gryphon[}\FunctionTok{order}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{sex), ]}
\NormalTok{model\_SEX }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{at}\NormalTok{(sex)}\SpecialCharTok{:}\FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}\NormalTok{ byear }\SpecialCharTok{+}\NormalTok{ mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{dsum}\NormalTok{(}\SpecialCharTok{\textasciitilde{}}\NormalTok{ units }\SpecialCharTok{|}\NormalTok{ sex),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Multi-section model using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:17 2021
## LogLik Sigma2 DF wall cpu
## 1 -1142.164 1.0 852 10:23:17 0.0
## 2 -1126.308 1.0 852 10:23:17 0.0
## 3 -1111.536 1.0 852 10:23:17 0.0
## 4 -1105.383 1.0 852 10:23:17 0.0
## 5 -1104.375 1.0 852 10:23:17 0.0
## 6 -1104.364 1.0 852 10:23:17 0.0
\end{verbatim}
To partition variance, two distinct function are requires \texttt{at()} at the random level, and \texttt{dsum()} at the residual level. Both are specify structure.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(model\_SEX)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## byear 0.9001595 0.2690012 3.346303 P 0.0
## mother 1.3396184 0.2663118 5.030263 P 0.0
## at(sex, 1):vm(animal, ainv) 1.4372390 0.6514306 2.206281 P 0.1
## at(sex, 2):vm(animal, ainv) 1.9861434 0.9974302 1.991261 P 0.3
## sex_1!R 2.1706213 0.5542492 3.916327 P 0.0
## sex_2!R 1.7112948 0.8246188 2.075256 P 0.3
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{(h2.F }\OtherTok{\textless{}{-}} \FunctionTok{vpredict}\NormalTok{(model\_SEX, h2.bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V3 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V2 }\SpecialCharTok{+}\NormalTok{ V3 }\SpecialCharTok{+}\NormalTok{ V5)))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.bwt 0.2457811 0.1070794
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{(h2.M }\OtherTok{\textless{}{-}} \FunctionTok{vpredict}\NormalTok{(model\_SEX, h2.bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V4 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V2 }\SpecialCharTok{+}\NormalTok{ V4 }\SpecialCharTok{+}\NormalTok{ V6)))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.bwt 0.3345244 0.1619218
\end{verbatim}
By partitioning the additive genetic variance and the residual variance, the model estimates the \(V_A\) and \(V_R\) for each group (sex). Doing so, we can calculate the \(h^2\) for each group (sex).
To test if the variance are different between groups, we can compare the model partitioned \texttt{model\_SEX} and the previous model without the partitioning \texttt{model4} in a likelihood ratio test (LRT) with 2 degrees of freedom since models have two components of variance of difference.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model\_SEX}\SpecialCharTok{$}\NormalTok{loglik}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] -1104.364
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model4}\SpecialCharTok{$}\NormalTok{loglik}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] -1094.815
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{1} \SpecialCharTok{{-}} \FunctionTok{pchisq}\NormalTok{(}\DecValTok{2} \SpecialCharTok{*}\NormalTok{ (model\_SEX}\SpecialCharTok{$}\NormalTok{loglik }\SpecialCharTok{{-}}\NormalTok{ model4}\SpecialCharTok{$}\NormalTok{loglik), }\DecValTok{2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 1
\end{verbatim}
Here, we can see the point estimates of \(h^2\) seems to differ between sexes (0.25 and 0.33), but their SE overlaps.
LRT give more information and showed that partitioning the variance and the residual between group (sex) did not improved the fit of the model and so their variance are not significantly different.
\hypertarget{modification-of-model-parameter}{%
\subsection{Modification of model parameter}\label{modification-of-model-parameter}}
Variance represents the deviation of the distribution and it expected to be a positive values.
Due to a lack of power, a structural problem in the dataset or a very low variance, Asreml-r often fixes the variance to a boundary \texttt{B} instead of a positive value \texttt{P}. When it is happen, it is generally a good idea to examine it.
To examine the boundary effect, we can exploit an alternative model where the model allowed a unstructured parameter for variance of interest or the entire variance matrix. For this example: we allowed the model to estimate any values (so allowing possible negative values of estimates) for the random and residual matrix.
First, we create a temporary model \texttt{model.temp} with the exact structure to modify.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model.temp }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1}\NormalTok{,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}\NormalTok{ byear }\SpecialCharTok{+}\NormalTok{ mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{),}
\AttributeTok{start.values =}\NormalTok{ T}
\NormalTok{)}
\NormalTok{G.temp }\OtherTok{\textless{}{-}}\NormalTok{ model.temp}\SpecialCharTok{$}\NormalTok{vparameters[(}\DecValTok{1}\SpecialCharTok{:}\DecValTok{3}\NormalTok{), ]}
\NormalTok{G.temp}\SpecialCharTok{$}\NormalTok{Constraint }\OtherTok{\textless{}{-}} \StringTok{"U"}
\NormalTok{R.temp }\OtherTok{\textless{}{-}}\NormalTok{ model.temp}\SpecialCharTok{$}\NormalTok{vparameters[}\SpecialCharTok{{-}}\NormalTok{(}\DecValTok{1}\SpecialCharTok{:}\DecValTok{3}\NormalTok{), ]}
\NormalTok{R.temp}\SpecialCharTok{$}\NormalTok{Constraint[}\DecValTok{2}\NormalTok{] }\OtherTok{\textless{}{-}} \StringTok{"U"}
\end{Highlighting}
\end{Shaded}
The argument \texttt{start.values=T} allowed the \texttt{model.temp} to change its random parameters. We can create the two different matrices and specify which parameters will be modified. For this example we modified the G and the R matrix to fit all variance to be \texttt{U} unstructured. it is important to not for the R matrix the lines \texttt{units!R} has to be fix to 1, so it will never change.
The object G.temp and R.temp can be implemented in the following model as new parameters using the argument \texttt{R.param} and \texttt{G.param}.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model5 }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}\NormalTok{ byear }\SpecialCharTok{+}\NormalTok{ mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{),}
\AttributeTok{R.param =}\NormalTok{ R.temp, }\AttributeTok{G.param =}\NormalTok{ G.temp}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:17 2021
## LogLik Sigma2 DF wall cpu
## 1 -2033.178 1.0 852 10:23:17 0.0
## 2 -1723.734 1.0 852 10:23:17 0.0
## 3 -1396.354 1.0 852 10:23:17 0.0
## 4 -1193.012 1.0 852 10:23:17 0.0
## 5 -1107.946 1.0 852 10:23:17 0.0
## 6 -1095.327 1.0 852 10:23:17 0.0
## 7 -1094.816 1.0 852 10:23:17 0.0
## 8 -1094.815 1.0 852 10:23:17 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(model5)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## byear 0.8820313 0.2632455 3.350604 U 0
## mother 1.1184698 0.2386239 4.687167 U 0
## vm(animal, ainv) 2.2985320 0.4962496 4.631806 U 0
## units!units 1.6290034 0.3714154 4.385934 U 0
## units!R 1.0000000 NA NA F 0
\end{verbatim}
Since \texttt{model4} did not showed boundary, \texttt{the\ model5} is very similar.
\hypertarget{covariance-between-two-random-effects}{%
\subsection{Covariance between two random effects}\label{covariance-between-two-random-effects}}
Sometimes, but changing the parameters of the model and allowing unstructured variance instead of boundary, significant negative variance can appeared.
A negative variance is counter-intuitive because statistically the mean within the random effect is less similar than expected by chance. However a possible biological reason can be hypothesized,such as a sibling competition within the nest creating a negative among-individual covariance within the nest.
To test this hypotheses, we need to estimate the covariance between two random effect in a univariate model.
Fo example, we can estimate the covariance between the additive genetic variance and the mother variance is negative using the argument \texttt{str}. This argument has two components, first the equation term \texttt{\textasciitilde{}vm(animal,Ainv)+mother} and second the structural term \texttt{\textasciitilde{}us(2):id(4500)}. Here within the structural term, we fit a 2x2 unstructured matrix \texttt{us(2)} which estimated the variance and the covariance between the random effects in the equation term. To successfully work, the structural term also requires the number of level identified within \texttt{id()}
\hypertarget{gremlin-1}{%
\section{gremlin}\label{gremlin-1}}
TODO (maybe just bother Matthew to do it)
Meanwhile
\begin{figure}
\includegraphics[width=1\linewidth]{images/Gizmo} \caption{Keep it dry and do no feed after midnight.}\label{fig:unnamed-chunk-30}
\end{figure}
\hypertarget{mcmcglmm-1}{%
\section{MCMCglmm}\label{mcmcglmm-1}}
\hypertarget{running-the-model-1}{%
\subsection{Running the model}\label{running-the-model-1}}
First load MCMCglmm:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(MCMCglmm)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Loading required package: coda
\end{verbatim}
\begin{verbatim}
## Loading required package: ape
\end{verbatim}
The first model we will fit is a simple animal model with no fixed effects, and only an `animal' random effect relating individuals to their additive genetic values through the pedigree.
First we are going to define priors. In a way we might want to avoid using priors, because we would like all of the information in our analysis to come from our data.
By default MCMCglmm uses improper priors, but this can cause inferential and numerical problems. We will specify priors for the animal effect and the residual variance using the following code:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{prior1}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{)),}
\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
A prior allowed the model to fit different variance structures. With the unique random effect ``animal'', we partitioned the phenotypic variance into two distinct variances matrices \texttt{G} (additive genetic) and \texttt{R} (residual).
This prior specification is the simplistic one and often used because it was believed to be relatively uninformative, and is equivalent to an inverse-gamma prior with shape and scale equal to 0.001. In many cases it is relatively uninformative but when the posterior distribution for the variances has support close to zero it can behave poorly. Parameter expanded priors (See Chapter 8 of the MCMCglmm CourseNotes, available from CRAN) are gaining in popularity due to their better behaviour but for the purposes of this tutorial we will stick with the inverse-gamma prior.
We have told MCMCglmm to pay little heed to our prior expectation (V) by specifying a small degree of belief parameter (nu) of 0.002. Since this is a univariate analysis, the priors are matrix of order 1 and thus nu\textgreater0 is the smallest degree of belief that provides what is known as a `proper' prior, avoiding numerical problems. In fact, there is a lot of information in the data regarding the marginal distributions of the parameters, and MCMCglmm will run most of the models that we suggest in these tutorials without priors. However, this is poor practice , but we will therefore use this simple priors throughout these tutorials. We can now fit an animal model. The model to decompose variation in birth weight into genetic and residual effects is as follows:
The lower case ``animal'' is a can be a \textbf{special} word for MCMCglmm. If a \texttt{pedigree} argument is provided then \texttt{MCMCglmm} will recognize the term \texttt{animal} as the term to use to estimate additive genetic variance. When the argument \texttt{pedigree} is not provided then the word \texttt{animal} is not different than any other variable. However, instead of providing a pedigree argument to the call to MCMCglmm function, it is much more flexible to use the \texttt{ginv} argument to specify the random effect that must be linked to the pedigree (with the inverse relatedness matrix). We thus first estimate the inverse relatedness matrix using \texttt{inverseA()} then fit the animal model.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{Ainv }\OtherTok{\textless{}{-}} \FunctionTok{inverseA}\NormalTok{(gryphonped)}\SpecialCharTok{$}\NormalTok{Ainv}
\NormalTok{model1}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1}\NormalTok{,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{ animal, }\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv),}
\AttributeTok{data =}\NormalTok{ gryphon, }\AttributeTok{prior =}\NormalTok{ prior1}\FloatTok{.1}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## MCMC iteration = 0
##
## MCMC iteration = 1000
##
## MCMC iteration = 2000
##
## MCMC iteration = 3000
##
## MCMC iteration = 4000
##
## MCMC iteration = 5000
##
## MCMC iteration = 6000
##
## MCMC iteration = 7000
##
## MCMC iteration = 8000
##
## MCMC iteration = 9000
##
## MCMC iteration = 10000
##
## MCMC iteration = 11000
##
## MCMC iteration = 12000
##
## MCMC iteration = 13000
\end{verbatim}
After typing this code, MCMCglmm will run, taking about 20 seconds on a modern desk- top computer. The progress of the run will be printed to the screen. Also, note the warning message will be printed at the end of the run. This is natural too. In order for the MCMC algorithm to work, MCMCglmm must keep track of effects associated with unmeasured individuals appearing in the pedigree. This will not affect the answers, but when many unmeasured individuals exist, it can hinder the ability of the algorithm to explore the parameter space (more on this, and a solution, later). Lets have a look at the MCMCglmm outputs. First we will evaluate how confident we can be that MCMCglmm found good answers. By entering
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(model1}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{Sol)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
\centering
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-34-1.pdf}
\caption{\label{fig:unnamed-chunk-34}The posterior distribution of the fixed effect (the intercept, or mean) in model 1.1}
\end{figure}
in the console, we get Figure 1 (p.~5). The plot on the left shows a time series of the values of 1000 samples of the posterior distribution of the the model intercept (mean birthweight). The plot on the right shows the same data as a distribution. Complicated statistical methods for estimating population means are of course of little interest; rather, we are examining these outputs to check that MCMCglmm's algorithms worked well for our data and for this model. The important point here is that a consistent amount of variation around a largely unchanging mean value of the intercept was obtained (which give this fluctuating trace concentrated around the mean), and the posterior distribution of the intercept appears to be valid. More rigorous means of evaluation the independence of the samples in the posterior distribution (evaluating autocorrelation) are discussed in the MCMCglmm CourseNotes, available from CRAN. Note that your output for \texttt{model\ 1.1} may not be identical to this due to Monte Carlo (random number) error. So every times, you run the model, you will get similar but slightly different results.
The posterior distributions of the the variance components are generally of more interest to animal model users. We can view plots of the posterior distribution for the variance components for model 1.1 by
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(model1}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
\centering
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-35-1.pdf}
\caption{\label{fig:unnamed-chunk-35}The posterior distributions of the variance components of model 1.1, based on an analysis with the default values for nitt, burnin, and thin in MCMCglmm}
\end{figure}
which generates Figure 2 (p.~6). Here we see distributions of the estimates of the additive genetic (animal) and residual (units) effects. These samples contain some au- tocorrelation, i.e., trends are apparent in the left-hand plot. We can deal with this easily.
\hypertarget{change-in-iteration-and-sampling}{%
\subsection{Change in iteration and sampling}\label{change-in-iteration-and-sampling}}
We will simply re-run the model for a longer number of iterations, and sample the chain less frequently. So far we have been running MCMCglmm with its default values. These defaults are a total run length of 13000 iterations, the first 3000 of which are discarded as a `burn-in' period to make sure that the converges to the part of the parameter space where the maximum likelihood exists. The remaining 10000 iterations are sampled (estimates retained) every 10 iterations (the thinning interval). Because the values in the left-hand plots in figure 2 to appear to have different values at the beginning of the run, we might suspect that a longer burn-in period might be required. We can reduce the autocorrelation by lengthening the rest of the run and sampling the chain less frequently. The following code runs the same model 1.1, but is likely to produce better samples of the posterior distributions. This model should take about two minutes to analyze.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model1}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1}\NormalTok{,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{animal, }\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv),}
\AttributeTok{data =}\NormalTok{ gryphon, }\AttributeTok{nitt =} \DecValTok{65000}\NormalTok{, }\AttributeTok{thin =} \DecValTok{50}\NormalTok{, }\AttributeTok{burnin =} \DecValTok{15000}\NormalTok{,}
\AttributeTok{prior =}\NormalTok{ prior1}\FloatTok{.1}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
Notes that we have now included the argument verbose=FALSE in the MCMCglmm call. We will continue this throughout the tutorial so that more complete screen outputs can be included in this document without using too much space. Now produce the plots of the samples of the fixed and random effects (they have not been included in this document). Note that the autocorrelation is much reduced. A more compact way to evaluate the validity of the posterior distributions is to calculate autocorrelation among samples, as follows:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{autocorr.diag}\NormalTok{(model1}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal units
## Lag 0 1.000000000 1.000000000
## Lag 50 0.206132870 0.152649149
## Lag 250 -0.003396339 -0.001107505
## Lag 500 0.011277620 0.014167911
## Lag 2500 0.046030492 0.037726623
\end{verbatim}
We will consider these levels of autocorrelation acceptable, at least for the purposes of this tutorial. Ideally, all samples of the posterior distribution should be independent, and the autocorrelation for all lag values greater than zero should be near zero. However, in practice this will not strictly be achievable for all analytic scenarios. Certainly the levels of autocorrelation observed here should not be tolerated in any formal analysis.
Note that the validity of posterior distributions of any analysis should always be checked; however, for brevity we will not continue to be so consistently diligent throughout the rest of these tutorials. We can now proceed with confidence to recover some more information from these samples. We can obtain estimates of the additive genetic and residual variance by calculating the modes of the posterior distributions:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(model1}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal units
## 3.211534 3.739673
\end{verbatim}
We can obtain the Bayesian equivalent of confidence intervals by calculating the the values of the estimates that bound 95\% (or any other proportion) of the posterior distributions:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{HPDinterval}\NormalTok{(model1}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## lower upper
## animal 2.153336 4.655585
## units 2.930350 4.888197
## attr(,"Probability")
## [1] 0.95
\end{verbatim}
\hypertarget{change-priors-parameters}{%
\subsection{Change priors parameters}\label{change-priors-parameters}}
We specified weak priors in this analyses. Now we will check whether or not proper priors would have influenced the results that we obtained. The simplest way to do this is to rerun the model with different priors. Here we construct priors with a larger degree of belief parameter (\texttt{nu}), and we will specify that a large proportion (50\%) of the variation is under genetic control (\texttt{V}):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{p.var }\OtherTok{\textless{}{-}} \FunctionTok{var}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{bwt, }\AttributeTok{na.rm =} \ConstantTok{TRUE}\NormalTok{)}
\NormalTok{prior1.}\FloatTok{1.2} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{matrix}\NormalTok{(p.var }\SpecialCharTok{*} \FloatTok{0.05}\NormalTok{), }\AttributeTok{nu =} \DecValTok{1}\NormalTok{)),}
\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{matrix}\NormalTok{(p.var }\SpecialCharTok{*} \FloatTok{0.95}\NormalTok{), }\AttributeTok{nu =} \DecValTok{1}\NormalTok{)}
\NormalTok{)}
\NormalTok{model1.}\FloatTok{1.2} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1}\NormalTok{,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{animal, }\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv),}
\AttributeTok{data =}\NormalTok{ gryphon, }\AttributeTok{prior =}\NormalTok{ prior1.}\FloatTok{1.2}\NormalTok{, }\AttributeTok{nitt =} \DecValTok{65000}\NormalTok{, }\AttributeTok{thin =} \DecValTok{50}\NormalTok{,}
\AttributeTok{burnin =} \DecValTok{15000}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{posterior.mode}\NormalTok{(model1}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal units
## 3.211534 3.739673
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(model1.}\FloatTok{1.2}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal units
## 2.901751 3.972179
\end{verbatim}
and we can therefore conclude that the difference in the priors has little effect on the outcome of the analysis. This is typical for an analysis where lots of data are available relative to the complexity of the model, but is often not the case. In all cases, it is important to check the effect of priors on conclusions drawn from a model. In addition, you can also specify the prior with previous knowledge or expectation for the variance.
\hypertarget{estimating-heritability-1}{%
\subsection{Estimating heritability}\label{estimating-heritability-1}}
A useful property of Bayesian posterior distributions is that we can apply almost any transformation to these distributions and they will remain valid. This applies to the calculation of heritabilities. We can obtain an estimate of the heritability by applying the basic formula h 2 =V A /V P to each sample of the posterior disribution:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{posterior.heritability1}\FloatTok{.1} \OtherTok{\textless{}{-}}\NormalTok{ model1}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{/}
\NormalTok{ (model1}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"units"}\NormalTok{])}
\FunctionTok{HPDinterval}\NormalTok{(posterior.heritability1}\FloatTok{.1}\NormalTok{, }\FloatTok{0.95}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## lower upper
## var1 0.3031621 0.5995078
## attr(,"Probability")
## [1] 0.95
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(posterior.heritability1}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.4546461
\end{verbatim}
Generate a plot of the posterior distribution of this heritability estimate:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(posterior.heritability1}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
\centering
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-42-1.pdf}
\caption{\label{fig:unnamed-chunk-42}The posterior distributions the heritability from model 1.1}
\end{figure}
\hypertarget{adding-fixed-effects-1}{%
\subsection{Adding fixed effects}\label{adding-fixed-effects-1}}
To add effects to a univariate model, we simply modify the fixed effect part of the model specification:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model1}\FloatTok{.2} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{animal, }\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv),}
\AttributeTok{data =}\NormalTok{ gryphon, }\AttributeTok{prior =}\NormalTok{ prior1}\FloatTok{.1}\NormalTok{,}
\AttributeTok{nitt =} \DecValTok{65000}\NormalTok{, }\AttributeTok{thin =} \DecValTok{50}\NormalTok{, }\AttributeTok{burnin =} \DecValTok{15000}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
We can assess the significance of \texttt{sex} as a fixed effect by examining its posterior distribution.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(model1}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{Sol[, }\StringTok{"sex2"}\NormalTok{])}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 2.220736
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{HPDinterval}\NormalTok{(model1}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{Sol[, }\StringTok{"sex2"}\NormalTok{], }\FloatTok{0.95}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## lower upper
## var1 1.867391 2.508573
## attr(,"Probability")
## [1] 0.95
\end{verbatim}
The posterior distribution of the \texttt{sex2} term does not overlap zero. Thus, we can infer that sex has an effect on birthweight (presence of a sexual dimorphism) in this model and is a useful addition to the model, for most purposes. It is also worth noting that the variance components have changed slightly:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(model1}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal units
## 3.096504 2.787671
\end{verbatim}
In fact since sex effects were previously contributing to the residual variance of the model our estimate of \(V_R\) (denoted 'units' in the output) is now slightly lower than before. This has an important consequence for estimating heritability since if we calculate \(V_P\) as \(V_A +V_R\) then as we include fixed effects we will soak up more residual variance driving \(V_P\) . Assuming that \(V_A\) is more or less unaffected by the fixed effects fitted then as \(V_P\) goes down we expect our estimate of \(h^2\) will go up.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{posterior.heritability1}\FloatTok{.2} \OtherTok{\textless{}{-}}\NormalTok{ model1}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{/}
\NormalTok{ (model1}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"units"}\NormalTok{])}
\FunctionTok{posterior.mode}\NormalTok{(posterior.heritability1}\FloatTok{.2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.5179268
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{HPDinterval}\NormalTok{(posterior.heritability1}\FloatTok{.2}\NormalTok{, }\FloatTok{0.95}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## lower upper
## var1 0.3686665 0.6418853
## attr(,"Probability")
## [1] 0.95
\end{verbatim}
Here \(h^2\) has increased slightly from 0.4829 to 0.5079 (again, your values may differ slightly due to Monte Carlo error). Which is the better estimate?
It depends on what your question is. The first is an estimate of the proportion of variance in birth weight explained by additive effects, the latter is an estimate of the proportion of variance in birth weight after conditioning on sex that is explained by additive effects.
An important piece of advice, each researcher should be consistent in how they name their estimates and always correctly describe which estimates they are using conditional or not (to avoid any confusion).
\hypertarget{adding-random-effects-1}{%
\subsection{Adding random effects}\label{adding-random-effects-1}}
This is done by simply modifying the model statement in the same way, but requires addition of a prior for the new random effect. For instance, we can fit an effect of birth year:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{prior1}\FloatTok{.3} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{), }\AttributeTok{G2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{)),}
\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{))}
\NormalTok{model1}\FloatTok{.3} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{ animal }\SpecialCharTok{+}\NormalTok{ byear, }\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{nitt =} \DecValTok{65000}\NormalTok{, }\AttributeTok{thin =} \DecValTok{50}\NormalTok{, }\AttributeTok{burnin =} \DecValTok{15000}\NormalTok{,}
\AttributeTok{prior =}\NormalTok{ prior1}\FloatTok{.3}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{posterior.mode}\NormalTok{(model1}\FloatTok{.3}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal byear units
## 2.6462756 0.8360178 2.2879627
\end{verbatim}
Here the variance in birth weight explained by birth year is 0.7887. Note that although \(V_A\) has changed somewhat, most of what is now partitioned as a \texttt{birth\ year} effect was previously partitioned as \(V_R\) . Thus what we have really done here is to partition environmental effects into those arising from year to year differences versus everything else, and we do not really expect much change in \(h^2\) (since now \(h^2 = V_A /(V_A + V_{BY} + V_R )\)). However, we get a somewhat different result if we also add a random effect of \texttt{mother} to test for maternal effects:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{p.var }\OtherTok{\textless{}{-}} \FunctionTok{var}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{bwt, }\AttributeTok{na.rm =} \ConstantTok{TRUE}\NormalTok{)}
\NormalTok{prior1}\FloatTok{.4} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{),}
\AttributeTok{G2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{),}
\AttributeTok{G3 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{)),}
\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{)}
\NormalTok{)}
\NormalTok{model1}\FloatTok{.4} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{ animal }\SpecialCharTok{+}\NormalTok{ byear }\SpecialCharTok{+}\NormalTok{ mother,}
\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv), }\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{nitt =} \DecValTok{65000}\NormalTok{, }\AttributeTok{thin =} \DecValTok{50}\NormalTok{, }\AttributeTok{burnin =} \DecValTok{15000}\NormalTok{,}
\AttributeTok{prior =}\NormalTok{ prior1}\FloatTok{.4}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{posterior.mode}\NormalTok{(model1}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal byear mother units
## 2.4964379 0.8993490 0.9651266 1.3814133
\end{verbatim}
Here partitioning of significant maternal variance has resulted in a further decrease in \(V_R\) but also a decrease in \(V_A\). The latter is because maternal effects of the sort we simulated (fixed differences between mothers) will have the consequence of increasing similarity among maternal siblings. Consequently they can look very much like additive genetic effects and if present, but unmodelled, represent a type of `common environment effect' that can - and will- cause upward bias in \(V_A\)and so \(h^2\). Let's compare the estimates of heritability from each of models 1.2, 1.3 and 1.4:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{posterior.heritability1}\FloatTok{.3} \OtherTok{\textless{}{-}}\NormalTok{ model1}\FloatTok{.3}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{/}
\NormalTok{ (model1}\FloatTok{.3}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1}\FloatTok{.3}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"byear"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1}\FloatTok{.3}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"units"}\NormalTok{])}
\NormalTok{posterior.heritability1}\FloatTok{.4} \OtherTok{\textless{}{-}}\NormalTok{ model1}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{/}
\NormalTok{ (model1}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"byear"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"mother"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"units"}\NormalTok{])}
\FunctionTok{posterior.mode}\NormalTok{(posterior.heritability1}\FloatTok{.2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.5179268
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(posterior.heritability1}\FloatTok{.3}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.4389851
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(posterior.heritability1}\FloatTok{.4}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.4409629
\end{verbatim}
\hypertarget{testing-significance-of-variance-components}{%
\subsection{Testing significance of variance components}\label{testing-significance-of-variance-components}}
While testing the significance of fixed effects by evaluating whether or not their posterior distributions overlap zero was simple and valid, this approach does not work for variance components.
Variance components are bounded to be positive (given a proper prior), and thus even when a random effect is not meaningful, its posterior distribution will never overlap zero. Model comparisons can be performed using the deviance information criterion (\texttt{DIC}), although it should be noted that the properties of DIC are not well understood and that the \texttt{DIC} may be focused at the wrong level for most people's intended level of inference - particularly with non-Gaussian responses. The implementation of \texttt{DIC} in MCMCglmm is further described in the reference manual. \texttt{DIC} values are calculated by MCMCglmm by default. Briefly, \texttt{DIC} like other information criteria balance model fit and model complexity simultaneously, and small values of DIC are preferred. We can compare \texttt{models\ 1.4} and \texttt{1.3}, i.e., models with and without the mother term:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model1}\FloatTok{.3}\SpecialCharTok{$}\NormalTok{DIC}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 3549.694
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model1}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{DIC}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 3316.696
\end{verbatim}
\texttt{model\ 1.4} has a much lower DIC value. Since the maternal effect term is the only difference between the models, we can consider the inclusion of this term statistically justifiable. We should note however that DIC has a large sampling variance and should probably only be calculated based on much longer MCMC runs.
\hypertarget{further-partitioning-variance}{%
\subsection{Further partitioning variance}\label{further-partitioning-variance}}
A population can be further fragmented into different groups or categories (such as females and males, juveniles and adults or treated and untreated). Some scientific questions require further and deeper analysis of the variance.
To avoid multiple model (one for each group), we can directly partition the variance between groups in a unique model. In addition, by doing so, we can also test if the variance are different between groups.
As example, we can partition the additive genetic variance and residual variance by sex. It is impossible to further partition the other variances but complexify an animal model requires sufficient sample size.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{prior1.}\FloatTok{4.}\NormalTok{SEX }\OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{), }\AttributeTok{G2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{), }\AttributeTok{G3 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{)),}
\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{)}
\NormalTok{)}
\NormalTok{model1.}\FloatTok{4.}\NormalTok{SEX }\OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idh}\NormalTok{(sex)}\SpecialCharTok{:}\NormalTok{animal }\SpecialCharTok{+}\NormalTok{ byear }\SpecialCharTok{+}\NormalTok{ mother,}
\AttributeTok{rcov =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idh}\NormalTok{(sex)}\SpecialCharTok{:}\NormalTok{units,}
\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv), }\AttributeTok{data =}\NormalTok{ gryphon, }\AttributeTok{nitt =} \DecValTok{65000}\NormalTok{, }\AttributeTok{thin =} \DecValTok{50}\NormalTok{, }\AttributeTok{burnin =} \DecValTok{15000}\NormalTok{,}
\AttributeTok{prior =}\NormalTok{ prior1.}\FloatTok{4.}\NormalTok{SEX, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{posterior.mode}\NormalTok{(model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## sex1.animal sex2.animal byear mother sex1.units sex2.units
## 0.8704608 1.3035599 0.7596410 1.2952954 2.2853356 1.5491127
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{posterior.heritability1.}\FloatTok{4.}\NormalTok{FEM }\OtherTok{\textless{}{-}}\NormalTok{ model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"sex1.animal"}\NormalTok{] }\SpecialCharTok{/}
\NormalTok{ (model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"sex1.animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"byear"}\NormalTok{] }\SpecialCharTok{+}
\NormalTok{ model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"mother"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"sex1.units"}\NormalTok{])}
\NormalTok{posterior.heritability1.}\FloatTok{4.}\NormalTok{MAL }\OtherTok{\textless{}{-}}\NormalTok{ model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"sex2.animal"}\NormalTok{] }\SpecialCharTok{/}
\NormalTok{ (model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"sex2.animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"byear"}\NormalTok{] }\SpecialCharTok{+}
\NormalTok{ model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"mother"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model1.}\FloatTok{4.}\NormalTok{SEX}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"sex2.units"}\NormalTok{])}
\FunctionTok{posterior.mode}\NormalTok{(posterior.heritability1.}\FloatTok{4.}\NormalTok{FEM)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.1399386
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{HPDinterval}\NormalTok{(posterior.heritability1.}\FloatTok{4.}\NormalTok{FEM, }\FloatTok{0.95}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## lower upper
## var1 0.04297738 0.4022488
## attr(,"Probability")
## [1] 0.95
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(posterior.heritability1.}\FloatTok{4.}\NormalTok{MAL)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.221888
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{HPDinterval}\NormalTok{(posterior.heritability1.}\FloatTok{4.}\NormalTok{MAL, }\FloatTok{0.95}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## lower upper
## var1 0.03165791 0.5510488
## attr(,"Probability")
## [1] 0.95
\end{verbatim}
Here, we can estimate the heritability for each sex. Both doesn't overlap with zero, so we can conclude both sexes have significant. However due to their overlaps CIs, we can not conclude the heritability is not significantly different between sexes.
An important quote to remember is ``A difference in significance is not a significant difference''
\hypertarget{modification-of-model-parameter-1}{%
\subsection{Modification of model parameter}\label{modification-of-model-parameter-1}}
\hypertarget{covariance-between-two-random-effects-1}{%
\subsection{Covariance between two random effects}\label{covariance-between-two-random-effects-1}}
\hypertarget{brms-1}{%
\section{brms}\label{brms-1}}
\hypertarget{running-the-model-2}{%
\subsection{Running the model}\label{running-the-model-2}}
First we need to load the \texttt{brms} library:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(brms)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Loading required package: Rcpp
\end{verbatim}
\begin{verbatim}
## Loading 'brms' package (version 2.15.0). Useful instructions
## can be found by typing help('brms'). A more detailed introduction
## to the package is available through vignette('brms_overview').
\end{verbatim}
\begin{verbatim}
##
## Attaching package: 'brms'
\end{verbatim}
\begin{verbatim}
## The following object is masked from 'package:MCMCglmm':
##
## me
\end{verbatim}
\begin{verbatim}
## The following object is masked from 'package:stats':
##
## ar
\end{verbatim}
To be able to fit an animal model, brms needs the relationship matrix (and not its inverse as in other softwares).
This can be estimated using the \texttt{nadiv} package
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{Amat }\OtherTok{\textless{}{-}} \FunctionTok{as.matrix}\NormalTok{(nadiv}\SpecialCharTok{::}\FunctionTok{makeA}\NormalTok{(gryphonped))}
\end{Highlighting}
\end{Shaded}
We are now ready to specify our first model:
The structure of a bmrs model is similar to \texttt{lme4}, the random effect is added to the model with the term \texttt{(1\ \textbar{}\ gr(animal,\ cov\ =\ Amat)}. ()
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{brms\_m1}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{brm}\NormalTok{(}
\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|} \FunctionTok{gr}\NormalTok{(animal, }\AttributeTok{cov =}\NormalTok{ Amat)),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{data2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{Amat =}\NormalTok{ Amat),}
\AttributeTok{family =} \FunctionTok{gaussian}\NormalTok{(),}
\AttributeTok{chains =} \DecValTok{2}\NormalTok{, }\AttributeTok{cores =} \DecValTok{2}\NormalTok{, }\AttributeTok{iter =} \DecValTok{1000}
\NormalTok{)}
\FunctionTok{save}\NormalTok{(brms\_m1}\FloatTok{.1}\NormalTok{, }\AttributeTok{file =} \StringTok{"data/brms\_m1\_1.rda"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
The result of the long model calculation is save in a spare file \texttt{brms\_m1\_1.rda"}.
reloading it, we can examine (or directly using the model) the variance estimate and their distributions
It is also possible to calcule the heritability using xxx
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{load}\NormalTok{(}\StringTok{"data/brms\_m1\_1.rda"}\NormalTok{)}
\FunctionTok{summary}\NormalTok{(brms\_m1}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: bwt ~ 1 + (1 | gr(animal, cov = Amat))
## Data: gryphon (Number of observations: 854)
## Samples: 2 chains, each with iter = 1000; warmup = 500; thin = 1;
## total post-warmup samples = 1000
##
## Group-Level Effects:
## ~animal (Number of levels: 854)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.88 0.17 1.54 2.23 1.03 74 99
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 7.60 0.14 7.33 7.86 1.01 428 727
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 1.93 0.13 1.66 2.18 1.04 71 112
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(brms\_m1}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-55-1.pdf}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{v\_animal }\OtherTok{\textless{}{-}}\NormalTok{ (}\FunctionTok{VarCorr}\NormalTok{(brms\_m1}\FloatTok{.1}\NormalTok{, }\AttributeTok{summary =} \ConstantTok{FALSE}\NormalTok{)}\SpecialCharTok{$}\NormalTok{animal}\SpecialCharTok{$}\NormalTok{sd)}\SpecialCharTok{\^{}}\DecValTok{2}
\NormalTok{v\_r }\OtherTok{\textless{}{-}}\NormalTok{ (}\FunctionTok{VarCorr}\NormalTok{(brms\_m1}\FloatTok{.1}\NormalTok{, }\AttributeTok{summary =} \ConstantTok{FALSE}\NormalTok{)}\SpecialCharTok{$}\NormalTok{residual}\SpecialCharTok{$}\NormalTok{sd)}\SpecialCharTok{\^{}}\DecValTok{2}
\NormalTok{h.bwt }\OtherTok{\textless{}{-}} \FunctionTok{as.mcmc}\NormalTok{(v\_animal }\SpecialCharTok{/}\NormalTok{ (v\_animal }\SpecialCharTok{+}\NormalTok{ v\_r))}
\FunctionTok{summary}\NormalTok{(h.bwt)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Iterations = 1:1000
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 1000
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## 0.484221 0.074533 0.002357 0.009275
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## 0.3433 0.4338 0.4841 0.5350 0.6369
\end{verbatim}
\hypertarget{adding-fixed-effects-2}{%
\subsection{Adding fixed effects}\label{adding-fixed-effects-2}}
To add effects to a univariate model we simply modify the fixed effect portion of the model specification:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{brms\_m1}\FloatTok{.2} \OtherTok{\textless{}{-}} \FunctionTok{brm}\NormalTok{(}
\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ sex }\SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|} \FunctionTok{gr}\NormalTok{(animal, }\AttributeTok{cov =}\NormalTok{ Amat)),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{data2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{Amat =}\NormalTok{ Amat),}
\AttributeTok{family =} \FunctionTok{gaussian}\NormalTok{(),}
\AttributeTok{chains =} \DecValTok{2}\NormalTok{, }\AttributeTok{cores =} \DecValTok{2}\NormalTok{, }\AttributeTok{iter =} \DecValTok{1000}
\NormalTok{)}
\FunctionTok{save}\NormalTok{(brms\_m1}\FloatTok{.2}\NormalTok{, }\AttributeTok{file =} \StringTok{"data/brms\_m1\_2.rda"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{load}\NormalTok{(}\StringTok{"data/brms\_m1\_2.rda"}\NormalTok{)}
\FunctionTok{summary}\NormalTok{(brms\_m1}\FloatTok{.2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
\end{verbatim}
\begin{verbatim}
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: bwt ~ 1 + sex + (1 | gr(animal, cov = Amat))
## Data: gryphon (Number of observations: 854)
## Samples: 2 chains, each with iter = 1000; warmup = 500; thin = 1;
## total post-warmup samples = 1000
##
## Group-Level Effects:
## ~animal (Number of levels: 854)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.75 0.16 1.42 2.05 1.13 13 113
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 6.06 0.17 5.74 6.41 1.00 357 574
## sex2 2.21 0.16 1.90 2.52 1.00 722 657
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 1.71 0.13 1.47 1.98 1.12 14 97
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(brms\_m1}\FloatTok{.2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-57-1.pdf}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(brms\_m1}\FloatTok{.2}\NormalTok{)}\SpecialCharTok{$}\NormalTok{fixed}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
\end{verbatim}
\begin{verbatim}
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 6.064853 0.1726459 5.735170 6.410117 1.003015 357 574
## sex2 2.210675 0.1574542 1.898645 2.520026 1.002318 722 657
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(brms\_m1}\FloatTok{.2}\NormalTok{)}\SpecialCharTok{$}\NormalTok{random}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
\end{verbatim}
\begin{verbatim}
## $animal
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.747083 0.1632548 1.419377 2.050884 1.126556 13 113
\end{verbatim}
\hypertarget{adding-random-effects-2}{%
\subsection{Adding random effects}\label{adding-random-effects-2}}
This is done by simply modifying the model statement in the same way, but requires addition of a prior for the new random effect. For instance, we can fit an effect of birth year:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{brms\_m1}\FloatTok{.3} \OtherTok{\textless{}{-}} \FunctionTok{brm}\NormalTok{(}
\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ sex }\SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|} \FunctionTok{gr}\NormalTok{(animal, }\AttributeTok{cov =}\NormalTok{ Amat)) }\SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|}\NormalTok{ byear) }\SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|}\NormalTok{ mother),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{data2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{Amat =}\NormalTok{ Amat),}
\AttributeTok{family =} \FunctionTok{gaussian}\NormalTok{(),}
\AttributeTok{chains =} \DecValTok{2}\NormalTok{, }\AttributeTok{cores =} \DecValTok{2}\NormalTok{, }\AttributeTok{iter =} \DecValTok{1000}
\NormalTok{)}
\FunctionTok{save}\NormalTok{(brms\_m1}\FloatTok{.3}\NormalTok{, }\AttributeTok{file =} \StringTok{"data/brms\_m1\_3.rda"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{load}\NormalTok{(}\StringTok{"data/brms\_m1\_3.rda"}\NormalTok{)}
\FunctionTok{summary}\NormalTok{(brms\_m1}\FloatTok{.3}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
\end{verbatim}
\begin{verbatim}
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: bwt ~ 1 + sex + (1 | gr(animal, cov = Amat)) + (1 | byear) + (1 | mother)
## Data: gryphon (Number of observations: 854)
## Samples: 2 chains, each with iter = 1000; warmup = 500; thin = 1;
## total post-warmup samples = 1000
##
## Group-Level Effects:
## ~animal (Number of levels: 854)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.50 0.17 1.11 1.78 1.16 9 51
##
## ~byear (Number of levels: 34)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.96 0.14 0.71 1.28 1.00 400 642
##
## ~mother (Number of levels: 394)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.05 0.11 0.83 1.26 1.01 193 379
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 6.39 0.23 5.93 6.87 1.02 251 504
## sex2 1.97 0.15 1.67 2.28 1.00 644 785
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 1.30 0.14 1.03 1.56 1.17 9 55
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(brms\_m1}\FloatTok{.3}\NormalTok{, }\AttributeTok{ask =} \ConstantTok{FALSE}\NormalTok{, }\AttributeTok{N =} \DecValTok{3}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-59-1.pdf} \includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-59-2.pdf}
\hypertarget{testing-significance-of-variance-components-1}{%
\subsection{Testing significance of variance components}\label{testing-significance-of-variance-components-1}}
\hypertarget{further-partitioning-of-the-variance}{%
\subsection{Further partitioning of the variance}\label{further-partitioning-of-the-variance}}
Depending of the research question and the presence of different group within the dataset, MCMCglmm allowed to partition the variance at different level. For the example, we can partition the additive genetic and residual variance between SEXE (male and female) to estimate the sex-specific heritability
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{brms\_m1}\FloatTok{.4} \OtherTok{\textless{}{-}} \FunctionTok{brm}\NormalTok{(}
\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ sex }\SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|} \FunctionTok{gr}\NormalTok{(animal, }\AttributeTok{cov =}\NormalTok{ Amat)) }\SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|}\NormalTok{ byear) }\SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|}\NormalTok{ mother),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{data2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{Amat =}\NormalTok{ Amat),}
\AttributeTok{family =} \FunctionTok{gaussian}\NormalTok{(),}
\AttributeTok{chains =} \DecValTok{2}\NormalTok{, }\AttributeTok{cores =} \DecValTok{2}\NormalTok{, }\AttributeTok{iter =} \DecValTok{1000}
\NormalTok{)}
\FunctionTok{save}\NormalTok{(brms\_m1}\FloatTok{.4}\NormalTok{, }\AttributeTok{file =} \StringTok{"data/brms\_m1\_4.rda"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{load}\NormalTok{(}\StringTok{"data/brms\_m1\_4.rda"}\NormalTok{)}
\FunctionTok{summary}\NormalTok{(brms\_m1}\FloatTok{.4}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
\end{verbatim}
\begin{verbatim}
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: bwt ~ 1 + sex + (1 | gr(animal, cov = Amat)) + (1 | byear) + (1 | mother)
## Data: gryphon (Number of observations: 854)
## Samples: 2 chains, each with iter = 1000; warmup = 500; thin = 1;
## total post-warmup samples = 1000
##
## Group-Level Effects:
## ~animal (Number of levels: 854)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.52 0.16 1.21 1.82 1.09 22 103
##
## ~byear (Number of levels: 34)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.98 0.15 0.73 1.32 1.00 538 677
##
## ~mother (Number of levels: 394)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.05 0.11 0.83 1.28 1.00 219 428
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 6.38 0.24 5.91 6.86 1.00 516 579
## sex2 1.97 0.15 1.68 2.28 1.00 1044 766
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 1.27 0.13 1.01 1.50 1.11 16 48
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(brms\_m1}\FloatTok{.4}\NormalTok{, }\AttributeTok{ask =} \ConstantTok{FALSE}\NormalTok{, }\AttributeTok{N =} \DecValTok{3}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-61-1.pdf} \includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-61-2.pdf}
\hypertarget{modification-of-model-parameter-2}{%
\subsection{Modification of model parameter}\label{modification-of-model-parameter-2}}
\hypertarget{covariance-between-two-random-effects-2}{%
\subsection{Covariance between two random effects}\label{covariance-between-two-random-effects-2}}
\hypertarget{stan}{%
\section{stan}\label{stan}}
to do
\hypertarget{multivariate-animal-model}{%
\chapter{Multivariate animal model}\label{multivariate-animal-model}}
This tutorial will demonstrate how to run a multivariate animal model looking at birth weight and tarsus length of the phenomenal gryphons.
\hypertarget{scenario-and-data-1}{%
\section{Scenario and data}\label{scenario-and-data-1}}
\hypertarget{scenario-1}{%
\subsection{Scenario}\label{scenario-1}}
Since natural selection rarely acts on single traits, to understand how birth weight might evolve in our population of gryphons, we may also want to think about possible covariance with other traits. If tarsus length at fledging is also under positive selection, what implications does it have for birth weight and vice versa? If the two traits are positively genetically correlated then this will facilitate evolution of larger size (since response of one trait will induce a positively correlated response in the other). If there is negative genetic covariance then this could act as an evolutionary constraint.
Using multivariate models allows the estimation of parameters relating to each trait alone (\emph{i.e.} \(V_A\), \(h^2\), etc), but also yields estimates of covariance components between traits. These include the (additive) genetic covariance \(COV_A\) which is often rescaled to give the additive genetic correlation \(r_A\). However, covariance can also arise through other random effects (\emph{e.g.} maternal covariance) and these sources can also be explicitly modelled in a bivariate analysis.
\hypertarget{gryphon-files}{%
\subsection{gryphon files}\label{gryphon-files}}
gryphonpedigree and phenotypic data files are the same as those used in tutorial 1 (\emph{i.e}, \texttt{gryphonped.csv} and \texttt{gryphon.csv} respectively).
Reading the data
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{gryphon }\OtherTok{\textless{}{-}} \FunctionTok{read.csv}\NormalTok{(}\StringTok{"data/gryphon.csv"}\NormalTok{)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{animal }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{animal)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{mother }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{mother)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{byear }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{byear)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{sex }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{sex)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{bwt }\OtherTok{\textless{}{-}} \FunctionTok{as.numeric}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{bwt)}
\NormalTok{gryphon}\SpecialCharTok{$}\NormalTok{tarsus }\OtherTok{\textless{}{-}} \FunctionTok{as.numeric}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{tarsus)}
\end{Highlighting}
\end{Shaded}
Reading the pedigree
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{gryphonped }\OtherTok{\textless{}{-}} \FunctionTok{read.csv}\NormalTok{(}\StringTok{"data/gryphonped.csv"}\NormalTok{)}
\NormalTok{gryphonped}\SpecialCharTok{$}\NormalTok{id }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphonped}\SpecialCharTok{$}\NormalTok{id)}
\NormalTok{gryphonped}\SpecialCharTok{$}\NormalTok{father }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphonped}\SpecialCharTok{$}\NormalTok{father)}
\NormalTok{gryphonped}\SpecialCharTok{$}\NormalTok{mother }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphonped}\SpecialCharTok{$}\NormalTok{mother)}
\end{Highlighting}
\end{Shaded}
\hypertarget{asreml-biv}{%
\section{Asreml-R}\label{asreml-biv}}
\hypertarget{running-the-model-3}{%
\subsection{Running the model}\label{running-the-model-3}}
First we need to load the \texttt{asreml} library:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(asreml)}
\end{Highlighting}
\end{Shaded}
For running multivariate analyses in ASReml-R, the code is slightly more complex than for the univariate case. This is because ASReml-R allows us to make different assumptions about the way in which traits might be related. So we need to explicitly code a model of the (co)variance structure we want to fit by specified some starting values. These are can be very approximate \emph{guestimates}, but having reasonable starting values can aid convergence. Finally, we have increased the default maximum number of iterations (\texttt{maxiter}) which can help to achieve convergence for more complicated models. Another way to increase the number of iteration will be to use the \texttt{update} function. Notes that if the \texttt{LogLik} is not stabilized after several iterations, it is good indication of the model require more iteration.
It is also possible to let the model running without any specify starting values but usually univariate model will allow to get some \emph{guestimates} in the additive genetic variances.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{ainv }\OtherTok{\textless{}{-}} \FunctionTok{ainverse}\NormalTok{(gryphonped)}
\NormalTok{modela }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =} \FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\FunctionTok{vm}\NormalTok{(animal, ainv), }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{),}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{id}\NormalTok{(units)}\SpecialCharTok{:}\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{)),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"include"}\NormalTok{, }\AttributeTok{y =} \StringTok{"include"}\NormalTok{),}
\AttributeTok{maxit =} \DecValTok{20}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:31 2021
## LogLik Sigma2 DF wall cpu
## 1 -5118.122 1.0 1535 10:23:31 0.0
## 2 -4358.769 1.0 1535 10:23:31 0.0
## 3 -3540.792 1.0 1535 10:23:31 0.0
## 4 -3004.970 1.0 1535 10:23:31 0.0
## 5 -2747.831 1.0 1535 10:23:31 0.0
## 6 -2687.807 1.0 1535 10:23:31 0.0
## 7 -2680.057 1.0 1535 10:23:31 0.0
## 8 -2679.743 1.0 1535 10:23:31 0.0
## 9 -2679.741 1.0 1535 10:23:31 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modela }\OtherTok{\textless{}{-}} \FunctionTok{update}\NormalTok{(modela)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:31 2021
## LogLik Sigma2 DF wall cpu
## 1 -2679.741 1.0 1535 10:23:31 0.0
## 2 -2679.741 1.0 1535 10:23:31 0.0
\end{verbatim}
\texttt{modela} has fitted a bivariate model of \texttt{bwt} and \texttt{tarsus}, with the mean for each of the traits as a fixed effect (\texttt{trait}). The additive genetic variance-covariance matrix (\(\textbf{G}\)) is unstructured (\texttt{us}; \emph{i.e.} all elements are free to vary) and the starting values for \(V_A\) for \texttt{bwt}, \(COV_A\) between \texttt{bwt} and \texttt{tarsus}, and \(V_A\) for \texttt{tarsus} are set to 1, 0.1 and 1, respectively. Similarly, the residual matrix is unstructured and uses the same starting values.
Note that the argument \texttt{na.action\ =\ na.method(x\ =\ "include",\ y\ =\ "include")} can be add to the model.In a bivariate model, it will help calculate the covariance between two traits with different missing information \texttt{NA} and so help imbalance phenotypage and save sample size. However, it is important to scale ( mean =0, var =1) the two traits to correctly adjust the model(see Asreml-R manual for more information).
For the example, no missing information are present in the dataset.
Let's have a look at the variance components, and notice that there are now seven (co)variance components reported in the table:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modela)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound
## trait:vm(animal, ainv)!trait_bwt:bwt 3.368405 0.6348356 5.305948 P
## trait:vm(animal, ainv)!trait_tarsus:bwt 2.459827 1.0732809 2.291876 P
## trait:vm(animal, ainv)!trait_tarsus:tarsus 12.345849 3.0744787 4.015591 P
## units:trait!R 1.000000 NA NA F
## units:trait!trait_bwt:bwt 3.849910 0.5200095 7.403539 P
## units:trait!trait_tarsus:bwt 3.313269 0.9129222 3.629300 P
## units:trait!trait_tarsus:tarsus 17.646386 2.6670308 6.616491 P
## %ch
## trait:vm(animal, ainv)!trait_bwt:bwt 0
## trait:vm(animal, ainv)!trait_tarsus:bwt 0
## trait:vm(animal, ainv)!trait_tarsus:tarsus 0
## units:trait!R 0
## units:trait!trait_bwt:bwt 0
## units:trait!trait_tarsus:bwt 0
## units:trait!trait_tarsus:tarsus 0
\end{verbatim}
The first three terms are related to the genetic matrix and, in order are \(V_{A,bwt}\), \(COV_A\), \(V_{A, tarsus}\). Below is again a line where the \texttt{units:traitr!R} component equals to 1, which again can be ignored. The final three terms relate to the residual matrix and correspond to \(V_{R,bwt}\), \(COV_R\), \(V_{R,tarsus}\). Based on our quick and dirty check (is \texttt{z.ratio} \textgreater{} 1.96?) all components look to be statistically significant.
We can calculate the genetic correlation as \(COV_A / \sqrt{V_{A,bwt} \cdot V_{A,tarsus}}\). Thus this model gives an estimate of \(r_A\) = 0.38. It is also possible to estimate the residual correlation \(r_res\) = 0.4.
Although we can calculate this by hand, we can also use \texttt{vpredict()}, which also provides an (approximate) standard error:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modela, r\_A }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V2 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V1 }\SpecialCharTok{*}\NormalTok{ V3))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_A 0.381445 0.1299765
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modela, r\_res }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V6 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V5 }\SpecialCharTok{*}\NormalTok{ V7))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_res 0.4019791 0.08607119
\end{verbatim}
Of course we can also calculate the heritability of \texttt{bwt} and \texttt{tarsus} from this model:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modela, h2.bwt }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V1 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V5))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.bwt 0.4666469 0.07671563
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modela, h2.tarsus }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V3 }\SpecialCharTok{/}\NormalTok{ (V3 }\SpecialCharTok{+}\NormalTok{ V7))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.tarsus 0.4116348 0.09305947
\end{verbatim}
\hypertarget{adding-fixed-and-random-effects}{%
\subsection{Adding fixed and random effects}\label{adding-fixed-and-random-effects}}
Fixed and random effects can be added just as for the univariate case. Given that our full model of bwt from tutorial 1 had sex as a fixed effect as well as birth year and mother as random effects, we could specify a bivariate formulation with the same complexity:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelb }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =} \FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{+} \FunctionTok{at}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{byear }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{id}\NormalTok{(units)}\SpecialCharTok{:}\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{)),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"include"}\NormalTok{, }\AttributeTok{y =} \StringTok{"include"}\NormalTok{),}
\AttributeTok{maxit =} \DecValTok{20}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:31 2021
## LogLik Sigma2 DF wall cpu
## 1 -4672.301 1.0 1533 10:23:32 0.0
## 2 -4005.615 1.0 1533 10:23:32 0.0
## 3 -3271.483 1.0 1533 10:23:32 0.0 (1 restrained)
## 4 -2761.414 1.0 1533 10:23:32 0.0 (1 restrained)
## 5 -2481.357 1.0 1533 10:23:32 0.0
## 6 -2395.858 1.0 1533 10:23:32 0.0
## 7 -2381.050 1.0 1533 10:23:32 0.0
## 8 -2380.251 1.0 1533 10:23:32 0.0
## 9 -2380.246 1.0 1533 10:23:32 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelb }\OtherTok{\textless{}{-}} \FunctionTok{update}\NormalTok{(modelb)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:32 2021
## LogLik Sigma2 DF wall cpu
## 1 -2380.246 1.0 1533 10:23:32 0.0
## 2 -2380.246 1.0 1533 10:23:32 0.0
\end{verbatim}
Note that we have specified a covariance structure for each random effect and an estimate of the effect of sex on both birth weight and tarsus length.
There will now be thirteen (co)variance components reported after running the code:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modelb)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio
## trait:byear!trait_bwt:bwt 0.9746385 0.2825727 3.4491602
## trait:byear!trait_tarsus:bwt 0.1624076 0.4185079 0.3880635
## trait:byear!trait_tarsus:tarsus 3.7383721 1.2065992 3.0982716
## trait:mother!trait_bwt:bwt 1.1445184 0.2302182 4.9714512
## trait:mother!trait_tarsus:bwt -1.5567306 0.4051848 -3.8420260
## trait:mother!trait_tarsus:tarsus 4.8206132 1.3201300 3.6516202
## trait:vm(animal, ainv)!trait_bwt:bwt 1.9893546 0.4410246 4.5107569
## trait:vm(animal, ainv)!trait_tarsus:bwt 3.3170404 0.9032323 3.6724110
## trait:vm(animal, ainv)!trait_tarsus:tarsus 10.2294887 2.8077066 3.6433610
## units:trait!R 1.0000000 NA NA
## units:trait!trait_bwt:bwt 1.8443110 0.3443178 5.3564203
## units:trait!trait_tarsus:bwt 4.0142841 0.7412540 5.4155308
## units:trait!trait_tarsus:tarsus 12.4845955 2.2893363 5.4533690
## bound %ch
## trait:byear!trait_bwt:bwt P 0
## trait:byear!trait_tarsus:bwt P 0
## trait:byear!trait_tarsus:tarsus P 0
## trait:mother!trait_bwt:bwt P 0
## trait:mother!trait_tarsus:bwt P 0
## trait:mother!trait_tarsus:tarsus P 0
## trait:vm(animal, ainv)!trait_bwt:bwt P 0
## trait:vm(animal, ainv)!trait_tarsus:bwt P 0
## trait:vm(animal, ainv)!trait_tarsus:tarsus P 0
## units:trait!R F 0
## units:trait!trait_bwt:bwt P 0
## units:trait!trait_tarsus:bwt P 0
## units:trait!trait_tarsus:tarsus P 0
\end{verbatim}
we can estimate the different correlations using \texttt{vpredict}:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modelb, r\_byear }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V2 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V1 }\SpecialCharTok{*}\NormalTok{ V3))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_byear 0.08508312 0.2134209
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modelb, r\_M }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V5 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V4 }\SpecialCharTok{*}\NormalTok{ V6))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_M -0.6627518 0.2487963
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modelb, r\_A }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V8 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V7 }\SpecialCharTok{*}\NormalTok{ V9))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_A 0.7353053 0.1094747
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modelb, r\_res }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V12 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V11 }\SpecialCharTok{*}\NormalTok{ V13))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_res 0.8365729 0.07366762
\end{verbatim}
Now we can look at the fixed effects parameters and assess their significance with a conditional Wald F-test:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modelb, }\AttributeTok{coef =} \ConstantTok{TRUE}\NormalTok{)}\SpecialCharTok{$}\NormalTok{coef.fi}
\FunctionTok{wald.asreml}\NormalTok{(modelb, }\AttributeTok{denDF =} \StringTok{"default"}\NormalTok{, }\AttributeTok{ssType =} \StringTok{"conditional"}\NormalTok{)}\SpecialCharTok{$}\NormalTok{Wald}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## solution std error z.ratio
## at(trait, tarsus):sex_1 0.0000000 NA NA
## at(trait, tarsus):sex_2 -0.0684413 0.3823448 -0.1790041
## at(trait, bwt):sex_1 0.0000000 NA NA
## at(trait, bwt):sex_2 1.9502053 0.1480467 13.1729086
## trait_bwt 6.3844483 0.2328210 27.4221324
## trait_tarsus 20.5936436 0.5098944 40.3880569
\end{verbatim}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:32 2021
## LogLik Sigma2 DF wall cpu
## 1 -2380.246 1.0 1533 10:23:32 0.0
## 2 -2380.246 1.0 1533 10:23:32 0.0
## Calculating denominator DF
\end{verbatim}
\begin{verbatim}
##
## Df denDF F.inc F.con Margin Pr
## trait 2 52.6 1396.00 1396.00 0.00000
## at(trait, bwt):sex 1 812.8 298.40 173.50 B 0.00000
## at(trait, tarsus):sex 1 747.9 0.03 0.03 B 0.85798
\end{verbatim}
Note that it is possible to specify a fixed effect to a specific trait by adding the number of order within \texttt{cbind} inside the argument \texttt{at(trait,x)}. For example, here we apply the fixed effect \texttt{sex} only to the response variable \texttt{tarsus}.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelb\_2 }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =} \FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{+} \FunctionTok{at}\NormalTok{(trait, }\DecValTok{2}\NormalTok{)}\SpecialCharTok{:}\NormalTok{sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{byear }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{id}\NormalTok{(units)}\SpecialCharTok{:}\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{)),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"include"}\NormalTok{, }\AttributeTok{y =} \StringTok{"include"}\NormalTok{),}
\AttributeTok{maxit =} \DecValTok{20}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:32 2021
## LogLik Sigma2 DF wall cpu
## 1 -4810.563 1.0 1534 10:23:33 0.0
## 2 -4129.799 1.0 1534 10:23:33 0.0
## 3 -3382.529 1.0 1534 10:23:33 0.0 (1 restrained)
## 4 -2864.076 1.0 1534 10:23:33 0.0
## 5 -2574.891 1.0 1534 10:23:33 0.0
## 6 -2478.879 1.0 1534 10:23:33 0.0
## 7 -2458.305 1.0 1534 10:23:33 0.0
## 8 -2456.425 1.0 1534 10:23:33 0.0
## 9 -2456.377 1.0 1534 10:23:33 0.0
## 10 -2456.376 1.0 1534 10:23:33 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modelb\_2, }\AttributeTok{coef =} \ConstantTok{TRUE}\NormalTok{)}\SpecialCharTok{$}\NormalTok{coef.fi}
\FunctionTok{wald.asreml}\NormalTok{(modelb\_2, }\AttributeTok{denDF =} \StringTok{"default"}\NormalTok{, }\AttributeTok{ssType =} \StringTok{"conditional"}\NormalTok{)}\SpecialCharTok{$}\NormalTok{Wald}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## solution std error z.ratio
## at(trait, tarsus):sex_1 0.000000 NA NA
## at(trait, tarsus):sex_2 -3.267042 0.2953279 -11.06242
## trait_bwt 7.636226 0.2389515 31.95722
## trait_tarsus 22.703658 0.4827348 47.03133
\end{verbatim}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:33 2021
## LogLik Sigma2 DF wall cpu
## 1 -2456.376 1.0 1534 10:23:33 0.0
## 2 -2456.376 1.0 1534 10:23:33 0.0
## Calculating denominator DF
\end{verbatim}
\begin{verbatim}
##
## Df denDF F.inc F.con Margin Pr
## trait 2 50.7 1233.0 1233.0 0.00000e+00
## at(trait, tarsus):sex 1 522.9 122.4 122.4 B 1.02886e-25
\end{verbatim}
\hypertarget{significance-testing}{%
\subsection{Significance testing}\label{significance-testing}}
Under the model above \(r_M\) is estimated as -0.66 and the \texttt{z.ratio} associated with the corresponding covariance (\(COV_M\)) is \textgreater2 (in absolute terms). We might therefore infer that there is evidence for a strong negative correlation between the traits with respect to the mother and that while maternal identity explains variance in both traits those mothers that tend to produce heavier offspring actually tend to produce offspring with shorter tarsus lengths.
To formally test if \(COV_M\) is significantly different from zero, we can compare the log-likelihood for this model:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelb}\SpecialCharTok{$}\NormalTok{loglik}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] -2380.246
\end{verbatim}
to a model in which we specify that \(COV_M\)=0. Since this constraint reduces the number of parameters to be estimated by one, we can use a likelihood ratio test (LRT) with one degree of freedom. To run the constrained model, we modify the G structure defined for the \texttt{mother} random effect to diagonal (\texttt{diag}), which means we only estimate the variances (the diagonal of the matrix) but not the covariance (the covariance are fixed to 0):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelc }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =} \FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{+} \FunctionTok{at}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{byear }\SpecialCharTok{+}
\FunctionTok{diag}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{id}\NormalTok{(units)}\SpecialCharTok{:}\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{)),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"include"}\NormalTok{, }\AttributeTok{y =} \StringTok{"include"}\NormalTok{),}
\AttributeTok{maxit =} \DecValTok{20}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:33 2021
## LogLik Sigma2 DF wall cpu
## 1 -4677.820 1.0 1533 10:23:33 0.0
## 2 -4010.442 1.0 1533 10:23:33 0.0
## 3 -3275.409 1.0 1533 10:23:33 0.0
## 4 -2763.519 1.0 1533 10:23:34 0.0
## 5 -2483.732 1.0 1533 10:23:34 0.0
## 6 -2400.242 1.0 1533 10:23:34 0.0
## 7 -2386.663 1.0 1533 10:23:34 0.0
## 8 -2386.049 1.0 1533 10:23:34 0.0
## 9 -2386.045 1.0 1533 10:23:34 0.0
\end{verbatim}
You can run \texttt{summary(modelc)\$varcomp} to confirm this worked. We can now obtain the log-likelihood of this model and compare this to that of \texttt{modelb} using a likelihood ratio test:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelc}\SpecialCharTok{$}\NormalTok{loglik}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] -2386.045
\end{verbatim}
We can see that the model log-likelihood is now -2386.05.
And comparing the models using a likelihood ratio test:
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{2} \SpecialCharTok{*}\NormalTok{ (modelb}\SpecialCharTok{$}\NormalTok{loglik }\SpecialCharTok{{-}}\NormalTok{ modelc}\SpecialCharTok{$}\NormalTok{loglik)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 11.59835
\end{verbatim}
So our chi-square test statistic is \(\chi^2_1\)= 11.6.
The p-value that goes with this is obtained by:
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{1} \SpecialCharTok{{-}} \FunctionTok{pchisq}\NormalTok{(}\DecValTok{2} \SpecialCharTok{*}\NormalTok{ (modelb}\SpecialCharTok{$}\NormalTok{loglik }\SpecialCharTok{{-}}\NormalTok{ modelc}\SpecialCharTok{$}\NormalTok{loglik), }\DecValTok{1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 0.0006601037
\end{verbatim}
We would therefore conclude that the maternal covariance is significantly different from zero.
We could apply the same procedure to show that the residual (environmental) covariance and the genetic covariance estimates are significantly greater than zero (\emph{i.e.}, heavier individuals tend to have longer tarsus lengths). In contrast, we should find that the byear covariance between the two traits is non-significant.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modeld }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =} \FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{+} \FunctionTok{at}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}
\FunctionTok{diag}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{byear }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{id}\NormalTok{(units)}\SpecialCharTok{:}\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{)),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"include"}\NormalTok{, }\AttributeTok{y =} \StringTok{"include"}\NormalTok{),}
\AttributeTok{maxit =} \DecValTok{20}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:34 2021
## LogLik Sigma2 DF wall cpu
## 1 -4672.708 1.0 1533 10:23:34 0.0
## 2 -4005.953 1.0 1533 10:23:34 0.0
## 3 -3271.737 1.0 1533 10:23:34 0.0 (1 restrained)
## 4 -2761.626 1.0 1533 10:23:34 0.0 (1 restrained)
## 5 -2481.649 1.0 1533 10:23:34 0.0
## 6 -2395.992 1.0 1533 10:23:34 0.0
## 7 -2381.136 1.0 1533 10:23:34 0.0
## 8 -2380.331 1.0 1533 10:23:34 0.0
## 9 -2380.326 1.0 1533 10:23:34 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{2} \SpecialCharTok{*}\NormalTok{ (modelb}\SpecialCharTok{$}\NormalTok{loglik }\SpecialCharTok{{-}}\NormalTok{ modeld}\SpecialCharTok{$}\NormalTok{loglik)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 0.1600641
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{1} \SpecialCharTok{{-}} \FunctionTok{pchisq}\NormalTok{(}\DecValTok{2} \SpecialCharTok{*}\NormalTok{ (modelb}\SpecialCharTok{$}\NormalTok{loglik }\SpecialCharTok{{-}}\NormalTok{ modeld}\SpecialCharTok{$}\NormalTok{loglik), }\DecValTok{1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 0.6890975
\end{verbatim}
\hypertarget{estimate-directly-the-genetic-correlation-within-the-model}{%
\subsection{Estimate directly the genetic correlation within the model}\label{estimate-directly-the-genetic-correlation-within-the-model}}
Within Asreml-r, different matrix structure can be specify such as \texttt{us},\texttt{corg}, \texttt{diag}, etc (cf see the Asreml-r guide). Instead of the fitting an unstructured matrix with the argument \texttt{us} or a reduced model with no covariance with the argument \texttt{diag}, we can also directly estimate the genetic correlation between the \texttt{bwt} and \texttt{tarsus} with \texttt{corgh}.
Here we decide to estimate directly the additive genetic correlation.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modele }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =} \FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{+} \FunctionTok{at}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{corgh}\NormalTok{(trait)}\SpecialCharTok{:}\FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{byear }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{id}\NormalTok{(units)}\SpecialCharTok{:}\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{)),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"include"}\NormalTok{, }\AttributeTok{y =} \StringTok{"include"}\NormalTok{),}
\AttributeTok{maxit =} \DecValTok{20}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:34 2021
## LogLik Sigma2 DF wall cpu
## 1 -3544.356 1.0 1533 10:23:34 0.0
## 2 -3177.908 1.0 1533 10:23:34 0.0 (1 restrained)
## 3 -2784.992 1.0 1533 10:23:34 0.0
## 4 -2527.352 1.0 1533 10:23:34 0.0
## 5 -2406.422 1.0 1533 10:23:34 0.0
## 6 -2382.062 1.0 1533 10:23:34 0.0
## 7 -2380.263 1.0 1533 10:23:34 0.0
## 8 -2380.246 1.0 1533 10:23:35 0.0
## 9 -2380.246 1.0 1533 10:23:35 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modele }\OtherTok{\textless{}{-}} \FunctionTok{update}\NormalTok{(modele)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:35 2021
## LogLik Sigma2 DF wall cpu
## 1 -2380.246 1.0 1533 10:23:35 0.0
## 2 -2380.246 1.0 1533 10:23:35 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modele)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error
## trait:byear!trait_bwt:bwt 0.9746376 0.2825690
## trait:byear!trait_tarsus:bwt 0.1624052 0.4184989
## trait:byear!trait_tarsus:tarsus 3.7383599 1.2065684
## trait:mother!trait_bwt:bwt 1.1445196 0.2302189
## trait:mother!trait_tarsus:bwt -1.5567326 0.4051847
## trait:mother!trait_tarsus:tarsus 4.8206100 1.3201239
## trait:vm(animal, ainv)!trait!tarsus:!trait!bwt.cor 0.7353034 0.1094744
## trait:vm(animal, ainv)!trait_bwt 1.9893544 0.4410242
## trait:vm(animal, ainv)!trait_tarsus 10.2296667 2.8078190
## units:trait!R 1.0000000 NA
## units:trait!trait_bwt:bwt 1.8443104 0.3443173
## units:trait!trait_tarsus:bwt 4.0142731 0.7412587
## units:trait!trait_tarsus:tarsus 12.4844745 2.2893621
## z.ratio bound %ch
## trait:byear!trait_bwt:bwt 3.449203 P 0
## trait:byear!trait_tarsus:bwt 0.388066 P 0
## trait:byear!trait_tarsus:tarsus 3.098341 P 0
## trait:mother!trait_bwt:bwt 4.971441 P 0
## trait:mother!trait_tarsus:bwt -3.842032 P 0
## trait:mother!trait_tarsus:tarsus 3.651634 P 0
## trait:vm(animal, ainv)!trait!tarsus:!trait!bwt.cor 6.716668 U 0
## trait:vm(animal, ainv)!trait_bwt 4.510760 P 0
## trait:vm(animal, ainv)!trait_tarsus 3.643279 P 0
## units:trait!R NA F 0
## units:trait!trait_bwt:bwt 5.356427 P 0
## units:trait!trait_tarsus:bwt 5.415482 P 0
## units:trait!trait_tarsus:tarsus 5.453255 P 0
\end{verbatim}
It is important to note that using \texttt{corgh} change the order of the estimate (co)variance. All different calculation need to be adjust in consequence.
It is also important to check the difference between the model with \texttt{us} and \texttt{corgh} to make sure any mistake are made.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modelb)}\SpecialCharTok{$}\NormalTok{loglik}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] -2380.246
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modele)}\SpecialCharTok{$}\NormalTok{loglik}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] -2380.246
\end{verbatim}
There two main advantages to use \texttt{corgh}: first, a direct estimation of correlation within the G matrix can avoid mistake in the \texttt{vpredict} calculation; second, it is possible to test if the correlation is significantly different than 0 (similar result as LRT with the covariance) but also to -1 and 1 which correspond of the correlation boundaries.
The following code showed how to create a reduced model with the correlation close to 1 and compared to the initial model.
Since we compared the correlation to its boundary, the degree of freedom is only half as a one tail LTR.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{MODEL\_MODIF }\OtherTok{\textless{}{-}} \FunctionTok{update.asreml}\NormalTok{(modele, }\AttributeTok{start.values =}\NormalTok{ T)}
\NormalTok{G\_MOD }\OtherTok{\textless{}{-}}\NormalTok{ MODEL\_MODIF}\SpecialCharTok{$}\NormalTok{vparameters.table[(}\DecValTok{1}\SpecialCharTok{:}\DecValTok{9}\NormalTok{), ]}
\NormalTok{G\_MOD[}\DecValTok{1}\NormalTok{, }\DecValTok{2}\NormalTok{] }\OtherTok{\textless{}{-}} \FloatTok{0.99999}
\NormalTok{G\_MOD[}\DecValTok{1}\NormalTok{, }\DecValTok{3}\NormalTok{] }\OtherTok{\textless{}{-}} \StringTok{"F"}
\NormalTok{modele.red }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =} \FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{+} \FunctionTok{at}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{corgh}\NormalTok{(trait)}\SpecialCharTok{:}\FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{byear }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{id}\NormalTok{(units)}\SpecialCharTok{:}\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{)),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"include"}\NormalTok{, }\AttributeTok{y =} \StringTok{"include"}\NormalTok{),}
\AttributeTok{maxit =} \DecValTok{20}\NormalTok{,}
\AttributeTok{G.param =}\NormalTok{ G\_MOD}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:35 2021
## LogLik Sigma2 DF wall cpu
## 1 -2545.227 1.0 1533 10:23:35 0.1
## 2 -2483.880 1.0 1533 10:23:35 0.0
## 3 -2423.502 1.0 1533 10:23:35 0.0
## 4 -2392.508 1.0 1533 10:23:35 0.0
## 5 -2383.661 1.0 1533 10:23:35 0.0
## 6 -2383.084 1.0 1533 10:23:35 0.0
## 7 -2383.033 1.0 1533 10:23:35 0.0
## 8 -2383.022 1.0 1533 10:23:35 0.0
## 9 -2383.019 1.0 1533 10:23:35 0.0
## 10 -2383.019 1.0 1533 10:23:35 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{2} \SpecialCharTok{*}\NormalTok{ (modele}\SpecialCharTok{$}\NormalTok{loglik }\SpecialCharTok{{-}}\NormalTok{ modele.red}\SpecialCharTok{$}\NormalTok{loglik)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 5.544679
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{1} \SpecialCharTok{{-}} \FunctionTok{pchisq}\NormalTok{(}\DecValTok{2} \SpecialCharTok{*}\NormalTok{ (modele}\SpecialCharTok{$}\NormalTok{loglik }\SpecialCharTok{{-}}\NormalTok{ modele.red}\SpecialCharTok{$}\NormalTok{loglik), }\AttributeTok{df =} \FloatTok{0.5}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 0.006598675
\end{verbatim}
\hypertarget{partitionning-covariance-between-groups}{%
\subsection{Partitionning (co)variance between groups}\label{partitionning-covariance-between-groups}}
Similar to the univariate model, it is possible to partition the variance and also the covariance between different groups within the dataset. Here, we can estimate sex-specific genetic correlation.
Note, to partition a correlation, it is require to have important sample size within each group. For this example, we simplify the model !
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{gryphon }\OtherTok{\textless{}{-}}\NormalTok{ gryphon[}\FunctionTok{order}\NormalTok{(gryphon}\SpecialCharTok{$}\NormalTok{sex), ]}
\NormalTok{model\_sex }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =} \FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{+} \FunctionTok{at}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{at}\NormalTok{(sex)}\SpecialCharTok{:}\FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{byear }\SpecialCharTok{+}
\FunctionTok{us}\NormalTok{(trait, }\AttributeTok{init =} \FunctionTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{, }\FloatTok{0.1}\NormalTok{, }\DecValTok{1}\NormalTok{))}\SpecialCharTok{:}\NormalTok{mother,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{dsum}\NormalTok{(}\SpecialCharTok{\textasciitilde{}} \FunctionTok{id}\NormalTok{(units)}\SpecialCharTok{:}\FunctionTok{us}\NormalTok{(trait) }\SpecialCharTok{|}\NormalTok{ sex),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"include"}\NormalTok{, }\AttributeTok{y =} \StringTok{"include"}\NormalTok{),}
\AttributeTok{maxit =} \DecValTok{20}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Multi-section model using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:35 2021
## LogLik Sigma2 DF wall cpu
## 1 -2495.853 1.0 1807 10:23:36 0.1 (1 restrained)
## 2 -2444.497 1.0 1807 10:23:36 0.1
## 3 -2401.367 1.0 1807 10:23:36 0.1
## 4 -2390.943 1.0 1807 10:23:36 0.1
## 5 -2388.819 1.0 1807 10:23:36 0.1
## 6 -2388.738 1.0 1807 10:23:36 0.1
## 7 -2388.736 1.0 1807 10:23:36 0.1
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model\_sex }\OtherTok{\textless{}{-}} \FunctionTok{update}\NormalTok{(model\_sex)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Multi-section model using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:36 2021
## LogLik Sigma2 DF wall cpu
## 1 -2388.736 1.0 1807 10:23:36 0.1
## 2 -2388.736 1.0 1807 10:23:36 0.1
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(model\_sex)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error
## trait:byear!trait_bwt:bwt 0.9858502 0.2863930
## trait:byear!trait_tarsus:bwt 0.1525073 0.4334219
## trait:byear!trait_tarsus:tarsus 3.9981750 1.2798231
## trait:mother!trait_bwt:bwt 1.3312802 0.2484496
## trait:mother!trait_tarsus:bwt -1.6173911 0.4283985
## trait:mother!trait_tarsus:tarsus 4.7543015 1.3546795
## at(sex, 1):trait:vm(animal, ainv)!trait_bwt:bwt 1.3402726 0.5670807
## at(sex, 1):trait:vm(animal, ainv)!trait_tarsus:bwt 2.3607838 1.1348534
## at(sex, 1):trait:vm(animal, ainv)!trait_tarsus:tarsus 6.0624925 3.1304679
## at(sex, 2):trait:vm(animal, ainv)!trait_bwt:bwt 1.8645331 0.8887843
## at(sex, 2):trait:vm(animal, ainv)!trait_tarsus:bwt 5.0952433 2.0683128
## at(sex, 2):trait:vm(animal, ainv)!trait_tarsus:tarsus 14.9762227 6.4472652
## sex_1!R 1.0000000 NA
## sex_1!trait_bwt:bwt 2.3079924 0.5015700
## sex_1!trait_tarsus:bwt 4.4288323 1.0376540
## sex_1!trait_tarsus:tarsus 13.4858721 2.9285483
## sex_2!R 1.0000000 NA
## sex_2!trait_bwt:bwt 1.7957128 0.7549623
## sex_2!trait_tarsus:bwt 2.6342177 1.7685005
## sex_2!trait_tarsus:tarsus 9.6101885 5.4914087
## z.ratio bound %ch
## trait:byear!trait_bwt:bwt 3.4422982 P 0.0
## trait:byear!trait_tarsus:bwt 0.3518681 P 0.0
## trait:byear!trait_tarsus:tarsus 3.1240060 P 0.0
## trait:mother!trait_bwt:bwt 5.3583516 P 0.0
## trait:mother!trait_tarsus:bwt -3.7754356 P 0.0
## trait:mother!trait_tarsus:tarsus 3.5095396 P 0.0
## at(sex, 1):trait:vm(animal, ainv)!trait_bwt:bwt 2.3634603 P 0.0
## at(sex, 1):trait:vm(animal, ainv)!trait_tarsus:bwt 2.0802544 P 0.0
## at(sex, 1):trait:vm(animal, ainv)!trait_tarsus:tarsus 1.9366091 P 0.0
## at(sex, 2):trait:vm(animal, ainv)!trait_bwt:bwt 2.0978466 P 0.0
## at(sex, 2):trait:vm(animal, ainv)!trait_tarsus:bwt 2.4634781 P 0.0
## at(sex, 2):trait:vm(animal, ainv)!trait_tarsus:tarsus 2.3228799 P 0.0
## sex_1!R NA F 0.0
## sex_1!trait_bwt:bwt 4.6015360 P 0.0
## sex_1!trait_tarsus:bwt 4.2681204 P 0.0
## sex_1!trait_tarsus:tarsus 4.6049683 P 0.0
## sex_2!R NA F 0.0
## sex_2!trait_bwt:bwt 2.3785463 P 0.0
## sex_2!trait_tarsus:bwt 1.4895205 P 0.1
## sex_2!trait_tarsus:tarsus 1.7500407 P 0.1
\end{verbatim}
we can estimate the different correlations using \texttt{vpredict}:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, r\_byear }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V2 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V1 }\SpecialCharTok{*}\NormalTok{ V3))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_byear 0.07681647 0.213139
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, r\_M }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V5 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V4 }\SpecialCharTok{*}\NormalTok{ V6))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_M -0.6428904 0.2489498
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, r\_A}\FloatTok{.1} \SpecialCharTok{\textasciitilde{}}\NormalTok{ V8 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V7 }\SpecialCharTok{*}\NormalTok{ V9))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_A.1 0.8281977 0.1723661
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, r\_A}\FloatTok{.2} \SpecialCharTok{\textasciitilde{}}\NormalTok{ V11 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V10 }\SpecialCharTok{*}\NormalTok{ V12))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_A.2 0.9642258 0.1241699
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, r\_res}\FloatTok{.1} \SpecialCharTok{\textasciitilde{}}\NormalTok{ V15 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V14 }\SpecialCharTok{*}\NormalTok{ V16))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_res.1 0.7938392 0.0789263
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, r\_res}\FloatTok{.2} \SpecialCharTok{\textasciitilde{}}\NormalTok{ V19 }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(V18 }\SpecialCharTok{*}\NormalTok{ V20))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## r_res.2 0.6341139 0.1894661
\end{verbatim}
and the heritability too:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, h2.bwt}\FloatTok{.1} \SpecialCharTok{\textasciitilde{}}\NormalTok{ V7 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V4 }\SpecialCharTok{+}\NormalTok{ V7 }\SpecialCharTok{+}\NormalTok{ V14))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.bwt.1 0.2246746 0.09176899
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, h2.bwt}\FloatTok{.2} \SpecialCharTok{\textasciitilde{}}\NormalTok{ V10 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V4 }\SpecialCharTok{+}\NormalTok{ V10 }\SpecialCharTok{+}\NormalTok{ V18))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.bwt.2 0.3119317 0.1442501
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, h2.tarsus}\FloatTok{.1} \SpecialCharTok{\textasciitilde{}}\NormalTok{ V9 }\SpecialCharTok{/}\NormalTok{ (V3 }\SpecialCharTok{+}\NormalTok{ V6 }\SpecialCharTok{+}\NormalTok{ V9 }\SpecialCharTok{+}\NormalTok{ V16))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.tarsus.1 0.214216 0.1070477
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(model\_sex, h2.tarsus}\FloatTok{.2} \SpecialCharTok{\textasciitilde{}}\NormalTok{ V12 }\SpecialCharTok{/}\NormalTok{ (V3 }\SpecialCharTok{+}\NormalTok{ V6 }\SpecialCharTok{+}\NormalTok{ V12 }\SpecialCharTok{+}\NormalTok{ V20))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2.tarsus.2 0.4492118 0.1833703
\end{verbatim}
Now we can look at the fixed effects parameters and assess their significance with a conditional Wald F-test:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(model\_sex, }\AttributeTok{coef =} \ConstantTok{TRUE}\NormalTok{)}\SpecialCharTok{$}\NormalTok{coef.fi}
\FunctionTok{wald.asreml}\NormalTok{(model\_sex, }\AttributeTok{denDF =} \StringTok{"default"}\NormalTok{, }\AttributeTok{ssType =} \StringTok{"conditional"}\NormalTok{)}\SpecialCharTok{$}\NormalTok{Wald}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## solution std error z.ratio
## at(trait, tarsus):sex_1 0.00000000 NA NA
## at(trait, tarsus):sex_2 -0.05549717 0.4758546 -0.1166263
## at(trait, bwt):sex_1 0.00000000 NA NA
## at(trait, bwt):sex_2 1.93936200 0.1903213 10.1899393
## trait_bwt 6.37791872 0.2311775 27.5888427
## trait_tarsus 20.58389296 0.4942600 41.6458836
\end{verbatim}
\begin{verbatim}
## Multi-section model using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:37 2021
## LogLik Sigma2 DF wall cpu
## 1 -2388.736 1.0 1807 10:23:37 0.1
## 2 -2388.736 1.0 1807 10:23:37 0.1
## Calculating denominator DF
\end{verbatim}
\begin{verbatim}
##
## Df denDF F.inc F.con Margin Pr
## trait 2 44.8 1522.00 1522.00 0.00000
## at(trait, bwt):sex 1 137.5 220.90 103.80 B 0.00000
## at(trait, tarsus):sex 1 138.6 0.01 0.01 B 0.90737
\end{verbatim}
\hypertarget{gremlin-2}{%
\section{gremlin}\label{gremlin-2}}
Might not available yet
Meanwhile
\begin{figure}
\includegraphics[width=1\linewidth]{images/Gizmo} \caption{Keep it dry and do no feed after midnight.}\label{fig:unnamed-chunk-91}
\end{figure}
\hypertarget{mcmcglmm-2}{%
\section{MCMCglmm}\label{mcmcglmm-2}}
All others software will remove automatically lines with missing data from the dataset use to fit the model.
\texttt{MCMCglmm}, however, will not do it and will try to fit the model use latent variables for missing data.
For comparison, we will remove the missing values from the data before fitting the model.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{gryphon2 }\OtherTok{\textless{}{-}} \FunctionTok{subset}\NormalTok{(gryphon, }\SpecialCharTok{!}\FunctionTok{is.na}\NormalTok{(bwt) }\SpecialCharTok{\&} \SpecialCharTok{!}\FunctionTok{is.na}\NormalTok{(tarsus))}
\end{Highlighting}
\end{Shaded}
First load MCMCglmm:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(MCMCglmm)}
\NormalTok{Ainv }\OtherTok{\textless{}{-}} \FunctionTok{inverseA}\NormalTok{(gryphonped)}\SpecialCharTok{$}\NormalTok{Ainv}
\end{Highlighting}
\end{Shaded}
\hypertarget{fitting-the-model}{%
\subsection{Fitting the model}\label{fitting-the-model}}
Fitting a multivariate model in MCMCglmm involves several new consideration above those for fitting univariate models. First, we have to fit multivariate priors; second, we have to specify the ways in which effects on different traits may covary, including the nature of residual (co)variation; and third, we will have to be a little more specific when specifying to MCMCglmm what type of distributions from which we assume our data are drawn. Our most basic model can be specified as:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{prior2}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{)),}
\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{)}
\NormalTok{)}
\NormalTok{model2}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(}\FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{{-}} \DecValTok{1}\NormalTok{,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{animal,}
\AttributeTok{rcov =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{units,}
\AttributeTok{family =} \FunctionTok{c}\NormalTok{(}\StringTok{"gaussian"}\NormalTok{, }\StringTok{"gaussian"}\NormalTok{),}
\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv),}
\AttributeTok{data =}\NormalTok{ gryphon, }\AttributeTok{prior =}\NormalTok{ prior2}\FloatTok{.1}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{summary}\NormalTok{(model2}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Iterations = 3001:12991
## Thinning interval = 10
## Sample size = 1000
##
## DIC: 7914.014
##
## G-structure: ~us(trait):animal
##
## post.mean l-95% CI u-95% CI eff.samp
## traitbwt:traitbwt.animal 3.459 2.3201 4.892 150.32
## traittarsus:traitbwt.animal 2.649 0.4966 5.014 82.60
## traitbwt:traittarsus.animal 2.649 0.4966 5.014 82.60
## traittarsus:traittarsus.animal 12.015 5.4475 18.350 49.79
##
## R-structure: ~us(trait):units
##
## post.mean l-95% CI u-95% CI eff.samp
## traitbwt:traitbwt.units 3.829 2.791 4.850 169.08
## traittarsus:traitbwt.units 3.211 1.269 5.185 113.39
## traitbwt:traittarsus.units 3.211 1.269 5.185 113.39
## traittarsus:traittarsus.units 18.133 12.153 23.967 64.55
##
## Location effects: cbind(bwt, tarsus) ~ trait - 1
##
## post.mean l-95% CI u-95% CI eff.samp pMCMC
## traitbwt 7.595 7.291 7.874 839.2 <0.001 ***
## traittarsus 20.543 19.916 21.126 702.3 <0.001 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traittarsus:traittarsus.animal"}\NormalTok{])}
\end{Highlighting}
\end{Shaded}
\begin{figure}
\centering
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-94-1.pdf}
\caption{\label{fig:unnamed-chunk-94}The posterior distribution of the additive genetic effect for tarsus length in a MCMCglmm run with default values}
\end{figure}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{autocorr.diag}\NormalTok{(model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV)[, }\StringTok{"traittarsus:traittarsus.animal"}\NormalTok{][}\DecValTok{2}\NormalTok{]}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Lag 10
## 0.8466367
\end{verbatim}
We have constructed the prior similarly to the those in the univariate models in tutorial 1, only we are specifying a 2x2 covariance matrix rather than a single variance. In order to provide proper priors, we have set the degree of belief parameter to greater than 1 (1.002). Those priors are not necessarily weak or uninformative in all circumstances. We will consider them adequate nonetheless for this tutorial. Please the vignette of the MCMCglmm packages \citep{R-MCMCglmm} for more information on priors. In tutorial 1, we used full autocorrelation tables to evaluate the validity of the posterior distribution. Note that we have not done this here.
For a bivariate model this table can become very complex. Nonetheless, it is worth evaluating, rather it is simply to large to include here. It can be viewed in the console as before. Here we have displayed only the autocorrelation for estimates of additive genetic effects for tarsus length with a lag of one samples (10 iterations given this MCMCglmm run with default values). This lag of 0.8466367 is clearly unacceptable. The posterior distribution of the additive genetic effect on tarsus length is shown in Figure 4 (p.~15), note the autocorrelation evident in the left-hand plot.
We will opt to run the analysis for longer. This longer run could be run using the following code (including a line to save the output):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model2}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(}\FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{{-}} \DecValTok{1}\NormalTok{,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{animal,}
\AttributeTok{rcov =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{units,}
\AttributeTok{family =} \FunctionTok{c}\NormalTok{(}\StringTok{"gaussian"}\NormalTok{, }\StringTok{"gaussian"}\NormalTok{),}
\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{nitt =} \DecValTok{130000}\NormalTok{, }\AttributeTok{thin =} \DecValTok{100}\NormalTok{, }\AttributeTok{burnin =} \DecValTok{30000}\NormalTok{,}
\AttributeTok{prior =}\NormalTok{ prior2}\FloatTok{.1}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{save}\NormalTok{(model2}\FloatTok{.1}\NormalTok{, }\AttributeTok{file =} \StringTok{"data/MCMCglmm\_model2\_1\_LongRun.rda"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
However, this run might take as long as an hour. For the purpose of this tutorial we have provided an output for such a run. It can be obtained and manipulated as follows, assuming that the file \texttt{MCMCglmm\_model2\_1\_LongRun.rda} is available at the specified location:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{load}\NormalTok{(}\AttributeTok{file =} \StringTok{"data/MCMCglmm\_model2\_1\_LongRun.rda"}\NormalTok{)}
\FunctionTok{autocorr.diag}\NormalTok{(model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV)[, }\StringTok{"traittarsus:traittarsus.animal"}\NormalTok{][}\DecValTok{2}\NormalTok{]}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Lag 100
## 0.2608752
\end{verbatim}
This level of autocorrelation is more acceptable, at least for the purpose of demonstration in this tutorial.
We can recover variance components, heritabilities, and genetic correlations from the posterior distribution of this model:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## traitbwt:traitbwt.animal traittarsus:traitbwt.animal
## 3.370616 2.581839
## traitbwt:traittarsus.animal traittarsus:traittarsus.animal
## 2.581839 12.463915
## traitbwt:traitbwt.units traittarsus:traitbwt.units
## 3.761401 2.982413
## traitbwt:traittarsus.units traittarsus:traittarsus.units
## 2.982413 19.556443
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{heritability.bwt2}\FloatTok{.1} \OtherTok{\textless{}{-}}\NormalTok{ model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traitbwt:traitbwt.animal"}\NormalTok{] }\SpecialCharTok{/}\NormalTok{ (model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traitbwt:traitbwt.animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traitbwt:traitbwt.animal"}\NormalTok{])}
\FunctionTok{posterior.mode}\NormalTok{(heritability.bwt2}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.4999336
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{heritability.tarsus2}\FloatTok{.1} \OtherTok{\textless{}{-}}\NormalTok{ model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traittarsus:traittarsus.animal"}\NormalTok{] }\SpecialCharTok{/}\NormalTok{ (model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traittarsus:traittarsus.animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traittarsus:traittarsus.units"}\NormalTok{])}
\FunctionTok{posterior.mode}\NormalTok{(heritability.tarsus2}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.4038754
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{genetic.correlation2}\FloatTok{.1} \OtherTok{\textless{}{-}}\NormalTok{ model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traitbwt:traittarsus.animal"}\NormalTok{] }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traitbwt:traitbwt.animal"}\NormalTok{] }\SpecialCharTok{*}\NormalTok{ model2}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traittarsus:traittarsus.animal"}\NormalTok{])}
\FunctionTok{posterior.mode}\NormalTok{(genetic.correlation2}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.3691503
\end{verbatim}
\hypertarget{adding-fixed-and-random-effects-1}{%
\subsection{Adding fixed and random effects}\label{adding-fixed-and-random-effects-1}}
Fixed and random effects can be added just as for the univariate case.
Given that our full model of bwt from tutorial 1 had sex as a fixed effect as well as random effects of byear and mother, we could specify a bivariate formulation of this using the following code (including a line to save the output):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{prior2}\FloatTok{.2} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{),}
\AttributeTok{G2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{),}
\AttributeTok{G3 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{)}
\NormalTok{ ),}
\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{)}
\NormalTok{)}
\NormalTok{model2}\FloatTok{.2} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(}\FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{{-}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ trait}\SpecialCharTok{:}\NormalTok{sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{animal }\SpecialCharTok{+} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{byear }\SpecialCharTok{+} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{mother,}
\AttributeTok{rcov =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{units,}
\AttributeTok{family =} \FunctionTok{c}\NormalTok{(}\StringTok{"gaussian"}\NormalTok{, }\StringTok{"gaussian"}\NormalTok{),}
\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv), }\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{nitt =} \DecValTok{130000}\NormalTok{, }\AttributeTok{thin =} \DecValTok{100}\NormalTok{, }\AttributeTok{burnin =} \DecValTok{30000}\NormalTok{,}
\AttributeTok{prior =}\NormalTok{ prior2}\FloatTok{.2}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{save}\NormalTok{(model2}\FloatTok{.2}\NormalTok{, }\AttributeTok{file =} \StringTok{"data/MCMCglmm\_model2\_2\_LongRun.rda"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
Again we have provided the data from one such run. It can be accessed using the code:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{load}\NormalTok{(}\AttributeTok{file =} \StringTok{"data/MCMCglmm\_model2\_2\_LongRun.rda"}\NormalTok{)}
\FunctionTok{summary}\NormalTok{(model2}\FloatTok{.2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Iterations = 30001:529501
## Thinning interval = 500
## Sample size = 1000
##
## DIC: 6222.288
##
## G-structure: ~us(trait):animal
##
## post.mean l-95% CI u-95% CI eff.samp
## traitbwt:traitbwt.animal 1.237 0.010802 2.183 71.22
## traittarsus:traitbwt.animal 2.138 -0.067106 4.268 49.85
## traitbwt:traittarsus.animal 2.138 -0.067106 4.268 49.85
## traittarsus:traittarsus.animal 5.265 0.005733 11.751 49.24
##
## ~us(trait):byear
##
## post.mean l-95% CI u-95% CI eff.samp
## traitbwt:traitbwt.byear 0.85157 0.3924 1.3839 1000
## traittarsus:traitbwt.byear -0.01322 -0.7624 0.8352 1000
## traitbwt:traittarsus.byear -0.01322 -0.7624 0.8352 1000
## traittarsus:traittarsus.byear 3.30411 1.2574 5.6918 1000
##
## ~us(trait):mother
##
## post.mean l-95% CI u-95% CI eff.samp
## traitbwt:traitbwt.mother 1.213 0.7414 1.688 164.7
## traittarsus:traitbwt.mother -1.969 -2.3860 -1.533 422.4
## traitbwt:traittarsus.mother -1.969 -2.3860 -1.533 422.4
## traittarsus:traittarsus.mother 3.484 1.7016 5.451 215.8
##
## R-structure: ~us(trait):units
##
## post.mean l-95% CI u-95% CI eff.samp
## traitbwt:traitbwt.units 2.531 1.618 3.485 133.27
## traittarsus:traitbwt.units 5.360 3.508 7.703 76.07
## traitbwt:traittarsus.units 5.360 3.508 7.703 76.07
## traittarsus:traittarsus.units 17.744 11.386 23.397 61.97
##
## Location effects: cbind(bwt, tarsus) ~ trait - 1 + trait:sex
##
## post.mean l-95% CI u-95% CI eff.samp pMCMC
## traitbwt 6.2643 5.8046 6.7063 1027 <0.001 ***
## traittarsus 20.3901 19.4377 21.2534 1000 <0.001 ***
## traitbwt:sex2 2.0315 1.7096 2.3452 1000 <0.001 ***
## traittarsus:sex2 0.1141 -0.6095 0.8959 1000 0.746
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{autocorr}\NormalTok{(model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV)[, , }\StringTok{"traittarsus:traittarsus.animal"}\NormalTok{][}\DecValTok{3}\NormalTok{, }\DecValTok{4}\NormalTok{]}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 0.5231926
\end{verbatim}
We can evaluate the fixed effect, their Ci evaluate their significance.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{Sol)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## traitbwt traittarsus traitbwt:sex2 traittarsus:sex2
## 6.37708530 20.33434582 1.97881662 0.00709564
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{HPDinterval}\NormalTok{(model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{Sol, }\FloatTok{0.95}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## lower upper
## traitbwt 5.8045704 6.706339
## traittarsus 19.4377228 21.253397
## traitbwt:sex2 1.7095714 2.345200
## traittarsus:sex2 -0.6094733 0.895885
## attr(,"Probability")
## [1] 0.95
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{Sol)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
\centering
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-100-1.pdf}
\caption{\label{fig:unnamed-chunk-100}Posterior trace and distribution for the fixed effects in model 2.2}
\end{figure}
As before we can obtain the raw variance component estimates and genetic correlations for the random effects:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## traitbwt:traitbwt.animal traittarsus:traitbwt.animal
## 1.32761860 1.98076467
## traitbwt:traittarsus.animal traittarsus:traittarsus.animal
## 1.98076467 0.07827994
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## 0.83300490 -0.14691430
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## -0.14691430 2.79242863
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## 1.29299116 -1.97967150
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## -1.97967150 3.71594577
## traitbwt:traitbwt.units traittarsus:traitbwt.units
## 2.53632072 5.22764927
## traitbwt:traittarsus.units traittarsus:traittarsus.units
## 5.22764927 16.57931045
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{genetic.correlation2}\FloatTok{.2} \OtherTok{\textless{}{-}}\NormalTok{ model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traitbwt:traittarsus.animal"}\NormalTok{] }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traitbwt:traitbwt.animal"}\NormalTok{] }\SpecialCharTok{*}\NormalTok{ model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traittarsus:traittarsus.animal"}\NormalTok{])}
\NormalTok{maternal.correlation2}\FloatTok{.2} \OtherTok{\textless{}{-}}\NormalTok{ model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traitbwt:traittarsus.mother"}\NormalTok{] }\SpecialCharTok{/} \FunctionTok{sqrt}\NormalTok{(model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traitbwt:traitbwt.mother"}\NormalTok{] }\SpecialCharTok{*}\NormalTok{ model2}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"traittarsus:traittarsus.mother"}\NormalTok{])}
\FunctionTok{posterior.mode}\NormalTok{(genetic.correlation2}\FloatTok{.2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.9942065
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(maternal.correlation2}\FloatTok{.2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## -0.9920879
\end{verbatim}
Evaluation of the statistical support for these genetic and maternal correlations is straightforward. Because we imposed no constraint on their estimation, we can evaluate the extent to which the posterior distributions overlap zero:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{HPDinterval}\NormalTok{(genetic.correlation2}\FloatTok{.2}\NormalTok{, }\FloatTok{0.95}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## lower upper
## var1 0.3935369 0.9990187
## attr(,"Probability")
## [1] 0.95
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{HPDinterval}\NormalTok{(maternal.correlation2}\FloatTok{.2}\NormalTok{, }\FloatTok{0.95}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## lower upper
## var1 -0.9980476 -0.9443838
## attr(,"Probability")
## [1] 0.95
\end{verbatim}
Neither or these posterior distributions overlaps zero, so we can consider them both statistically supported.
\hypertarget{partitioning-covariances}{%
\subsection{Partitioning (co)variances}\label{partitioning-covariances}}
As in the tutorial 1, it is possible to partition the variance-covariance matrix between groups (here sex)
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{prior2}\FloatTok{.3} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}
\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{),}
\AttributeTok{G2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{),}
\AttributeTok{G3 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{),}
\AttributeTok{G4 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{)}
\NormalTok{ ),}
\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}
\AttributeTok{V1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{),}
\AttributeTok{V2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \FunctionTok{diag}\NormalTok{(}\DecValTok{2}\NormalTok{), }\AttributeTok{nu =} \FloatTok{1.002}\NormalTok{)}
\NormalTok{ )}
\NormalTok{)}
\NormalTok{model2}\FloatTok{.3} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(}\FunctionTok{cbind}\NormalTok{(bwt, tarsus) }\SpecialCharTok{\textasciitilde{}}\NormalTok{ trait }\SpecialCharTok{{-}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ trait}\SpecialCharTok{:}\NormalTok{sex,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(}\FunctionTok{at.level}\NormalTok{(sex, }\StringTok{"1"}\NormalTok{)}\SpecialCharTok{:}\NormalTok{trait)}\SpecialCharTok{:}\NormalTok{animal }\SpecialCharTok{+} \FunctionTok{us}\NormalTok{(}\FunctionTok{at.level}\NormalTok{(sex, }\StringTok{"2"}\NormalTok{)}\SpecialCharTok{:}\NormalTok{trait)}\SpecialCharTok{:}\NormalTok{animal }\SpecialCharTok{+} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{byear }\SpecialCharTok{+} \FunctionTok{us}\NormalTok{(trait)}\SpecialCharTok{:}\NormalTok{mother,}
\AttributeTok{rcov =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{us}\NormalTok{(}\FunctionTok{at.level}\NormalTok{(sex, }\StringTok{"1"}\NormalTok{)}\SpecialCharTok{:}\NormalTok{trait)}\SpecialCharTok{:}\NormalTok{units }\SpecialCharTok{+} \FunctionTok{us}\NormalTok{(}\FunctionTok{at.level}\NormalTok{(sex, }\StringTok{"2"}\NormalTok{)}\SpecialCharTok{:}\NormalTok{trait)}\SpecialCharTok{:}\NormalTok{units,}
\AttributeTok{family =} \FunctionTok{c}\NormalTok{(}\StringTok{"gaussian"}\NormalTok{, }\StringTok{"gaussian"}\NormalTok{),}
\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv), }\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{nitt =} \DecValTok{130000}\NormalTok{, }\AttributeTok{thin =} \DecValTok{100}\NormalTok{, }\AttributeTok{burnin =} \DecValTok{30000}\NormalTok{,}
\AttributeTok{prior =}\NormalTok{ prior2}\FloatTok{.3}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{save}\NormalTok{(model2}\FloatTok{.3}\NormalTok{, }\AttributeTok{file =} \StringTok{"data/MCMCglmm\_model2\_3\_LongRun.rda"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
Again we have provided the data from one such run. It can be accessed using the code:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{load}\NormalTok{(}\AttributeTok{file =} \StringTok{"data/MCMCglmm\_model2\_3\_LongRun.rda"}\NormalTok{)}
\FunctionTok{summary}\NormalTok{(model2}\FloatTok{.3}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Iterations = 30001:129901
## Thinning interval = 100
## Sample size = 1000
##
## DIC: 5641.752
##
## G-structure: ~us(at.level(sex, "1"):trait):animal
##
## post.mean
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal 1.290
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal 1.695
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal 1.695
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal 4.764
## l-95% CI
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal 0.2723
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal -0.3272
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal -0.3272
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal 0.2145
## u-95% CI
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal 2.293
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal 3.876
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal 3.876
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal 11.179
## eff.samp
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal 194.6
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal 126.2
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal 126.2
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal 119.8
##
## ~us(at.level(sex, "2"):trait):animal
##
## post.mean
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal 2.631
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal 6.797
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal 6.797
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal 21.582
## l-95% CI
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal 1.054
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal 2.275
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal 2.275
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal 9.301
## u-95% CI
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal 3.935
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal 9.555
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal 9.555
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal 29.862
## eff.samp
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal 119.98
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal 90.60
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal 90.60
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal 89.82
##
## ~us(trait):byear
##
## post.mean l-95% CI u-95% CI eff.samp
## traitbwt:traitbwt.byear 1.0808 0.533 1.704 1000.0
## traittarsus:traitbwt.byear 0.1822 -0.678 1.181 692.8
## traitbwt:traittarsus.byear 0.1822 -0.678 1.181 692.8
## traittarsus:traittarsus.byear 4.3624 1.916 7.713 1256.3
##
## ~us(trait):mother
##
## post.mean l-95% CI u-95% CI eff.samp
## traitbwt:traitbwt.mother 1.351 0.9377 1.8114 737.8
## traittarsus:traitbwt.mother -1.583 -2.1887 -0.8234 359.6
## traitbwt:traittarsus.mother -1.583 -2.1887 -0.8234 359.6
## traittarsus:traittarsus.mother 4.488 2.4913 6.9268 526.6
##
## R-structure: ~us(at.level(sex, "1"):trait):units
##
## post.mean
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units 2.425
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units 5.105
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units 5.105
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units 15.083
## l-95% CI
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units 1.499
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units 3.200
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units 3.200
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units 8.819
## u-95% CI
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units 3.385
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units 7.244
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units 7.244
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units 20.945
## eff.samp
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units 262.0
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units 204.8
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units 204.8
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units 208.9
##
## ~us(at.level(sex, "2"):trait):units
##
## post.mean
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units 1.174
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units 1.202
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units 1.202
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units 4.300
## l-95% CI
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units 0.1719
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units -0.7302
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units -0.7302
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units 0.1394
## u-95% CI
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units 2.342
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units 4.447
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units 4.447
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units 14.498
## eff.samp
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units 118.85
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units 92.25
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units 92.25
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units 73.00
##
## Location effects: cbind(bwt, tarsus) ~ trait - 1 + trait:sex
##
## post.mean l-95% CI u-95% CI eff.samp pMCMC
## traitbwt 6.37718 5.88327 6.81113 1000 <0.001 ***
## traittarsus 20.57067 19.54250 21.46986 1000 <0.001 ***
## traitbwt:sex2 1.95075 1.56879 2.34728 1000 <0.001 ***
## traittarsus:sex2 -0.04345 -0.93488 0.97908 1000 0.904
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{autocorr}\NormalTok{(model2}\FloatTok{.3}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## , , at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 1.00000000
## Lag 100 0.55397334
## Lag 500 0.17022926
## Lag 1000 0.06337109
## Lag 5000 -0.02509190
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.78900626
## Lag 100 0.48327050
## Lag 500 0.19390026
## Lag 1000 0.06793138
## Lag 5000 -0.05286532
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.78900626
## Lag 100 0.48327050
## Lag 500 0.19390026
## Lag 1000 0.06793138
## Lag 5000 -0.05286532
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.50907405
## Lag 100 0.33334457
## Lag 500 0.12791541
## Lag 1000 0.03663239
## Lag 5000 -0.06916089
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.069605265
## Lag 100 0.094995154
## Lag 500 0.058573181
## Lag 1000 -0.003320081
## Lag 5000 -0.037248927
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.07767707
## Lag 100 0.08733352
## Lag 500 0.03149552
## Lag 1000 0.01343395
## Lag 5000 -0.04572885
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.07767707
## Lag 100 0.08733352
## Lag 500 0.03149552
## Lag 1000 0.01343395
## Lag 5000 -0.04572885
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.09797335
## Lag 100 0.10223818
## Lag 500 0.05247065
## Lag 1000 0.02907861
## Lag 5000 -0.04267019
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.024065323 -0.020803022
## Lag 100 0.033328550 -0.020819486
## Lag 500 0.031430166 -0.003372663
## Lag 1000 -0.001833421 -0.049049620
## Lag 5000 -0.024994994 -0.003451790
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.020803022 -0.0624222388
## Lag 100 -0.020819486 -0.0161384292
## Lag 500 -0.003372663 0.0005029672
## Lag 1000 -0.049049620 0.0061716049
## Lag 5000 -0.003451790 -0.0670355294
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.205702950 -0.17616227
## Lag 100 -0.133488137 -0.15047963
## Lag 500 -0.084123203 -0.09271333
## Lag 1000 -0.018266043 -0.01423351
## Lag 5000 0.009790713 -0.04239881
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.17616227 -0.07667827
## Lag 100 -0.15047963 -0.11462894
## Lag 500 -0.09271333 -0.04945437
## Lag 1000 -0.01423351 0.01279152
## Lag 5000 -0.04239881 0.01063678
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.80199270
## Lag 100 -0.48489854
## Lag 500 -0.16445312
## Lag 1000 -0.02870564
## Lag 5000 0.04004574
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.65709502
## Lag 100 -0.42587753
## Lag 500 -0.17449911
## Lag 1000 -0.01887227
## Lag 5000 0.05356917
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.65709502
## Lag 100 -0.42587753
## Lag 500 -0.17449911
## Lag 1000 -0.01887227
## Lag 5000 0.05356917
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.450870623
## Lag 100 -0.302137845
## Lag 500 -0.111142577
## Lag 1000 0.006721917
## Lag 5000 0.072337816
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.04704752
## Lag 100 -0.07487166
## Lag 500 -0.03174021
## Lag 1000 0.01461737
## Lag 5000 0.03438299
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.05664210
## Lag 100 -0.06795349
## Lag 500 -0.01045678
## Lag 1000 -0.00544654
## Lag 5000 0.04865394
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.05664210
## Lag 100 -0.06795349
## Lag 500 -0.01045678
## Lag 1000 -0.00544654
## Lag 5000 0.04865394
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.07483207
## Lag 100 -0.08579319
## Lag 500 -0.03255096
## Lag 1000 -0.03116366
## Lag 5000 0.04444304
##
## , , at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.78900626
## Lag 100 0.48940534
## Lag 500 0.20162861
## Lag 1000 0.08548021
## Lag 5000 -0.03210448
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 1.00000000
## Lag 100 0.67035954
## Lag 500 0.29214704
## Lag 1000 0.09634396
## Lag 5000 -0.04654686
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 1.00000000
## Lag 100 0.67035954
## Lag 500 0.29214704
## Lag 1000 0.09634396
## Lag 5000 -0.04654686
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.85973347
## Lag 100 0.59313625
## Lag 500 0.25474467
## Lag 1000 0.07370052
## Lag 5000 -0.06238511
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.03891144
## Lag 100 0.07541352
## Lag 500 0.08719705
## Lag 1000 0.02631996
## Lag 5000 0.03707974
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.04885100
## Lag 100 0.06901723
## Lag 500 0.05108866
## Lag 1000 0.03771859
## Lag 5000 0.04393141
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.04885100
## Lag 100 0.06901723
## Lag 500 0.05108866
## Lag 1000 0.03771859
## Lag 5000 0.04393141
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.06916387
## Lag 100 0.07775943
## Lag 500 0.05195032
## Lag 1000 0.03373879
## Lag 5000 0.04725035
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.0004695241 -0.03155374
## Lag 100 0.0148795624 -0.01235856
## Lag 500 -0.0002511817 -0.03654693
## Lag 1000 -0.0242418521 -0.06974536
## Lag 5000 -0.0233571299 0.02136226
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.03155374 -0.038505768
## Lag 100 -0.01235856 -0.015447641
## Lag 500 -0.03654693 0.001018242
## Lag 1000 -0.06974536 -0.038893511
## Lag 5000 0.02136226 -0.048660419
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.15824356 -0.21569129
## Lag 100 -0.12733460 -0.17124038
## Lag 500 -0.09270524 -0.13286219
## Lag 1000 -0.09260369 -0.03180877
## Lag 5000 -0.01221180 -0.01805377
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.21569129 -0.11748729
## Lag 100 -0.17124038 -0.13156978
## Lag 500 -0.13286219 -0.10579913
## Lag 1000 -0.03180877 -0.02884682
## Lag 5000 -0.01805377 -0.02931723
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.63791397
## Lag 100 -0.43588539
## Lag 500 -0.16629095
## Lag 1000 -0.05012058
## Lag 5000 0.05620977
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.81241841
## Lag 100 -0.58511641
## Lag 500 -0.23385312
## Lag 1000 -0.04261848
## Lag 5000 0.05109275
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.81241841
## Lag 100 -0.58511641
## Lag 500 -0.23385312
## Lag 1000 -0.04261848
## Lag 5000 0.05109275
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.72502529
## Lag 100 -0.52942420
## Lag 500 -0.20277459
## Lag 1000 -0.01995455
## Lag 5000 0.06003143
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.006983857
## Lag 100 -0.045096027
## Lag 500 -0.058550089
## Lag 1000 0.011350413
## Lag 5000 -0.044368431
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.008328493
## Lag 100 -0.030043649
## Lag 500 -0.021340607
## Lag 1000 -0.014582657
## Lag 5000 -0.042776639
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.008328493
## Lag 100 -0.030043649
## Lag 500 -0.021340607
## Lag 1000 -0.014582657
## Lag 5000 -0.042776639
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.03036295
## Lag 100 -0.04347196
## Lag 500 -0.01636133
## Lag 1000 -0.02347230
## Lag 5000 -0.04789352
##
## , , at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.78900626
## Lag 100 0.48940534
## Lag 500 0.20162861
## Lag 1000 0.08548021
## Lag 5000 -0.03210448
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 1.00000000
## Lag 100 0.67035954
## Lag 500 0.29214704
## Lag 1000 0.09634396
## Lag 5000 -0.04654686
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 1.00000000
## Lag 100 0.67035954
## Lag 500 0.29214704
## Lag 1000 0.09634396
## Lag 5000 -0.04654686
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.85973347
## Lag 100 0.59313625
## Lag 500 0.25474467
## Lag 1000 0.07370052
## Lag 5000 -0.06238511
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.03891144
## Lag 100 0.07541352
## Lag 500 0.08719705
## Lag 1000 0.02631996
## Lag 5000 0.03707974
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.04885100
## Lag 100 0.06901723
## Lag 500 0.05108866
## Lag 1000 0.03771859
## Lag 5000 0.04393141
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.04885100
## Lag 100 0.06901723
## Lag 500 0.05108866
## Lag 1000 0.03771859
## Lag 5000 0.04393141
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.06916387
## Lag 100 0.07775943
## Lag 500 0.05195032
## Lag 1000 0.03373879
## Lag 5000 0.04725035
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.0004695241 -0.03155374
## Lag 100 0.0148795624 -0.01235856
## Lag 500 -0.0002511817 -0.03654693
## Lag 1000 -0.0242418521 -0.06974536
## Lag 5000 -0.0233571299 0.02136226
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.03155374 -0.038505768
## Lag 100 -0.01235856 -0.015447641
## Lag 500 -0.03654693 0.001018242
## Lag 1000 -0.06974536 -0.038893511
## Lag 5000 0.02136226 -0.048660419
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.15824356 -0.21569129
## Lag 100 -0.12733460 -0.17124038
## Lag 500 -0.09270524 -0.13286219
## Lag 1000 -0.09260369 -0.03180877
## Lag 5000 -0.01221180 -0.01805377
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.21569129 -0.11748729
## Lag 100 -0.17124038 -0.13156978
## Lag 500 -0.13286219 -0.10579913
## Lag 1000 -0.03180877 -0.02884682
## Lag 5000 -0.01805377 -0.02931723
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.63791397
## Lag 100 -0.43588539
## Lag 500 -0.16629095
## Lag 1000 -0.05012058
## Lag 5000 0.05620977
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.81241841
## Lag 100 -0.58511641
## Lag 500 -0.23385312
## Lag 1000 -0.04261848
## Lag 5000 0.05109275
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.81241841
## Lag 100 -0.58511641
## Lag 500 -0.23385312
## Lag 1000 -0.04261848
## Lag 5000 0.05109275
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.72502529
## Lag 100 -0.52942420
## Lag 500 -0.20277459
## Lag 1000 -0.01995455
## Lag 5000 0.06003143
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.006983857
## Lag 100 -0.045096027
## Lag 500 -0.058550089
## Lag 1000 0.011350413
## Lag 5000 -0.044368431
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.008328493
## Lag 100 -0.030043649
## Lag 500 -0.021340607
## Lag 1000 -0.014582657
## Lag 5000 -0.042776639
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.008328493
## Lag 100 -0.030043649
## Lag 500 -0.021340607
## Lag 1000 -0.014582657
## Lag 5000 -0.042776639
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.03036295
## Lag 100 -0.04347196
## Lag 500 -0.01636133
## Lag 1000 -0.02347230
## Lag 5000 -0.04789352
##
## , , at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.50907405
## Lag 100 0.33371522
## Lag 500 0.16786851
## Lag 1000 0.09508980
## Lag 5000 -0.01160465
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.85973347
## Lag 100 0.60647833
## Lag 500 0.30364438
## Lag 1000 0.11809885
## Lag 5000 -0.01621713
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.85973347
## Lag 100 0.60647833
## Lag 500 0.30364438
## Lag 1000 0.11809885
## Lag 5000 -0.01621713
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 1.00000000
## Lag 100 0.70856137
## Lag 500 0.32062198
## Lag 1000 0.10775691
## Lag 5000 -0.03875951
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.04172858
## Lag 100 0.08374547
## Lag 500 0.12694344
## Lag 1000 0.07590539
## Lag 5000 0.06760669
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.04446314
## Lag 100 0.07584401
## Lag 500 0.09237150
## Lag 1000 0.08060798
## Lag 5000 0.08871292
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.04446314
## Lag 100 0.07584401
## Lag 500 0.09237150
## Lag 1000 0.08060798
## Lag 5000 0.08871292
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.05047344
## Lag 100 0.07516131
## Lag 500 0.07469952
## Lag 1000 0.05866640
## Lag 5000 0.10135060
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.0017614677 -0.02521301
## Lag 100 -0.0001726206 -0.01064701
## Lag 500 -0.0088363980 -0.03224542
## Lag 1000 -0.0305569374 -0.07434829
## Lag 5000 0.0112524131 0.04361589
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.02521301 -0.017701712
## Lag 100 -0.01064701 -0.012377796
## Lag 500 -0.03224542 -0.007633601
## Lag 1000 -0.07434829 -0.048134861
## Lag 5000 0.04361589 -0.034160121
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.08643244 -0.16525481
## Lag 100 -0.09394116 -0.12630103
## Lag 500 -0.07022343 -0.14871687
## Lag 1000 -0.13080760 -0.05280664
## Lag 5000 -0.02656739 -0.01705584
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.16525481 -0.16381629
## Lag 100 -0.12630103 -0.14936492
## Lag 500 -0.14871687 -0.14524964
## Lag 1000 -0.05280664 -0.05284469
## Lag 5000 -0.01705584 -0.03496960
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.40641260
## Lag 100 -0.30186975
## Lag 500 -0.13039802
## Lag 1000 -0.05915868
## Lag 5000 0.02386413
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.68658404
## Lag 100 -0.52807612
## Lag 500 -0.22374206
## Lag 1000 -0.06443430
## Lag 5000 0.01601359
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.68658404
## Lag 100 -0.52807612
## Lag 500 -0.22374206
## Lag 1000 -0.06443430
## Lag 5000 0.01601359
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.80604743
## Lag 100 -0.61183580
## Lag 500 -0.23564929
## Lag 1000 -0.05302725
## Lag 5000 0.02795988
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.01115803
## Lag 100 -0.05709045
## Lag 500 -0.10102388
## Lag 1000 -0.03082573
## Lag 5000 -0.07768457
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.00224780
## Lag 100 -0.03128327
## Lag 500 -0.06483536
## Lag 1000 -0.05641989
## Lag 5000 -0.08909507
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.00224780
## Lag 100 -0.03128327
## Lag 500 -0.06483536
## Lag 1000 -0.05641989
## Lag 5000 -0.08909507
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.007247428
## Lag 100 -0.030170592
## Lag 500 -0.038356457
## Lag 1000 -0.048353101
## Lag 5000 -0.102268690
##
## , , at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.069605265
## Lag 100 0.072256284
## Lag 500 0.053797194
## Lag 1000 -0.000442817
## Lag 5000 0.004334379
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.038911436
## Lag 100 0.046490324
## Lag 500 0.023982452
## Lag 1000 -0.022082138
## Lag 5000 0.002024369
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.038911436
## Lag 100 0.046490324
## Lag 500 0.023982452
## Lag 1000 -0.022082138
## Lag 5000 0.002024369
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.041728579
## Lag 100 0.047590439
## Lag 500 0.020121185
## Lag 1000 -0.026853161
## Lag 5000 -0.002675927
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 1.00000000
## Lag 100 0.72687483
## Lag 500 0.29712402
## Lag 1000 0.09345869
## Lag 5000 -0.02661430
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.88935579
## Lag 100 0.68152951
## Lag 500 0.34189066
## Lag 1000 0.14877079
## Lag 5000 -0.05267066
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.88935579
## Lag 100 0.68152951
## Lag 500 0.34189066
## Lag 1000 0.14877079
## Lag 5000 -0.05267066
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.62542960
## Lag 100 0.49857858
## Lag 500 0.31564247
## Lag 1000 0.16832415
## Lag 5000 -0.06728721
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.0003564631 0.042945762
## Lag 100 0.0479576117 0.034889169
## Lag 500 0.0089261353 0.012415428
## Lag 1000 0.0094071446 -0.002266020
## Lag 5000 -0.0216588825 0.009395412
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.042945762 0.065195559
## Lag 100 0.034889169 0.007541516
## Lag 500 0.012415428 0.020598395
## Lag 1000 -0.002266020 0.019062778
## Lag 5000 0.009395412 -0.006893616
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.173818163 -0.20624430
## Lag 100 -0.100444862 -0.13620845
## Lag 500 -0.017814798 -0.09821410
## Lag 1000 -0.006221606 -0.03969443
## Lag 5000 0.008111714 0.03462103
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.20624430 -0.038671695
## Lag 100 -0.13620845 -0.032842556
## Lag 500 -0.09821410 -0.038030546
## Lag 1000 -0.03969443 -0.014755552
## Lag 5000 0.03462103 0.009608549
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.076827822
## Lag 100 -0.063673683
## Lag 500 -0.036162213
## Lag 1000 0.007324372
## Lag 5000 -0.035913144
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.032836717
## Lag 100 -0.017837859
## Lag 500 -0.003168012
## Lag 1000 0.029353832
## Lag 5000 -0.033006249
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.032836717
## Lag 100 -0.017837859
## Lag 500 -0.003168012
## Lag 1000 0.029353832
## Lag 5000 -0.033006249
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.037630508
## Lag 100 -0.020505377
## Lag 500 0.005193169
## Lag 1000 0.029491756
## Lag 5000 -0.021169345
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.91555116
## Lag 100 -0.74121973
## Lag 500 -0.31676022
## Lag 1000 -0.12984095
## Lag 5000 0.03999793
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.82737809
## Lag 100 -0.70500699
## Lag 500 -0.35753289
## Lag 1000 -0.19747432
## Lag 5000 0.07246841
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.82737809
## Lag 100 -0.70500699
## Lag 500 -0.35753289
## Lag 1000 -0.19747432
## Lag 5000 0.07246841
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.60748687
## Lag 100 -0.53785439
## Lag 500 -0.34457193
## Lag 1000 -0.21652071
## Lag 5000 0.07748732
##
## , , at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.077677066
## Lag 100 0.089976565
## Lag 500 0.094802795
## Lag 1000 0.054458884
## Lag 5000 -0.005097372
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.048851002
## Lag 100 0.056231551
## Lag 500 0.039570907
## Lag 1000 0.016174999
## Lag 5000 -0.007734496
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.048851002
## Lag 100 0.056231551
## Lag 500 0.039570907
## Lag 1000 0.016174999
## Lag 5000 -0.007734496
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.0444631415
## Lag 100 0.0430237642
## Lag 500 0.0192487930
## Lag 1000 0.0009231893
## Lag 5000 -0.0131000513
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.88935579
## Lag 100 0.67781643
## Lag 500 0.28648920
## Lag 1000 0.09666293
## Lag 5000 -0.01204019
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 1.00000000
## Lag 100 0.77771494
## Lag 500 0.39167762
## Lag 1000 0.17883082
## Lag 5000 -0.07775854
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 1.00000000
## Lag 100 0.77771494
## Lag 500 0.39167762
## Lag 1000 0.17883082
## Lag 5000 -0.07775854
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.8819391
## Lag 100 0.6973170
## Lag 500 0.4160358
## Lag 1000 0.2237797
## Lag 5000 -0.1237974
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.018892747 0.029328800
## Lag 100 0.043643815 0.038845063
## Lag 500 0.024299898 0.018874626
## Lag 1000 0.003628414 -0.012224002
## Lag 5000 -0.023241285 -0.003721765
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.029328800 0.04789443
## Lag 100 0.038845063 0.01525774
## Lag 500 0.018874626 0.02992531
## Lag 1000 -0.012224002 0.01323436
## Lag 5000 -0.003721765 -0.01456627
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.120118096 -0.25319625
## Lag 100 -0.087148924 -0.19154513
## Lag 500 -0.015094892 -0.12182897
## Lag 1000 -0.058102510 -0.10907393
## Lag 5000 0.004092938 0.05892935
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.25319625 -0.12765635
## Lag 100 -0.19154513 -0.09362701
## Lag 500 -0.12182897 -0.07507670
## Lag 1000 -0.10907393 -0.03080525
## Lag 5000 0.05892935 0.02983728
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.07683343
## Lag 100 -0.06951957
## Lag 500 -0.06593593
## Lag 1000 -0.03761794
## Lag 5000 -0.03669061
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.030749722
## Lag 100 -0.016217311
## Lag 500 -0.020696084
## Lag 1000 0.004301752
## Lag 5000 -0.032243808
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.030749722
## Lag 100 -0.016217311
## Lag 500 -0.020696084
## Lag 1000 0.004301752
## Lag 5000 -0.032243808
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.0200083239
## Lag 100 -0.0021801995
## Lag 500 -0.0001738805
## Lag 1000 0.0168307815
## Lag 5000 -0.0185350646
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.82656679
## Lag 100 -0.69378831
## Lag 500 -0.30703065
## Lag 1000 -0.12287954
## Lag 5000 0.02286923
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.92799199
## Lag 100 -0.79777083
## Lag 500 -0.40858358
## Lag 1000 -0.21574629
## Lag 5000 0.09114505
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.92799199
## Lag 100 -0.79777083
## Lag 500 -0.40858358
## Lag 1000 -0.21574629
## Lag 5000 0.09114505
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.8330298
## Lag 100 -0.7312807
## Lag 500 -0.4397168
## Lag 1000 -0.2621632
## Lag 5000 0.1220174
##
## , , at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.077677066
## Lag 100 0.089976565
## Lag 500 0.094802795
## Lag 1000 0.054458884
## Lag 5000 -0.005097372
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.048851002
## Lag 100 0.056231551
## Lag 500 0.039570907
## Lag 1000 0.016174999
## Lag 5000 -0.007734496
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.048851002
## Lag 100 0.056231551
## Lag 500 0.039570907
## Lag 1000 0.016174999
## Lag 5000 -0.007734496
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.0444631415
## Lag 100 0.0430237642
## Lag 500 0.0192487930
## Lag 1000 0.0009231893
## Lag 5000 -0.0131000513
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.88935579
## Lag 100 0.67781643
## Lag 500 0.28648920
## Lag 1000 0.09666293
## Lag 5000 -0.01204019
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 1.00000000
## Lag 100 0.77771494
## Lag 500 0.39167762
## Lag 1000 0.17883082
## Lag 5000 -0.07775854
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 1.00000000
## Lag 100 0.77771494
## Lag 500 0.39167762
## Lag 1000 0.17883082
## Lag 5000 -0.07775854
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.8819391
## Lag 100 0.6973170
## Lag 500 0.4160358
## Lag 1000 0.2237797
## Lag 5000 -0.1237974
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.018892747 0.029328800
## Lag 100 0.043643815 0.038845063
## Lag 500 0.024299898 0.018874626
## Lag 1000 0.003628414 -0.012224002
## Lag 5000 -0.023241285 -0.003721765
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.029328800 0.04789443
## Lag 100 0.038845063 0.01525774
## Lag 500 0.018874626 0.02992531
## Lag 1000 -0.012224002 0.01323436
## Lag 5000 -0.003721765 -0.01456627
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.120118096 -0.25319625
## Lag 100 -0.087148924 -0.19154513
## Lag 500 -0.015094892 -0.12182897
## Lag 1000 -0.058102510 -0.10907393
## Lag 5000 0.004092938 0.05892935
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.25319625 -0.12765635
## Lag 100 -0.19154513 -0.09362701
## Lag 500 -0.12182897 -0.07507670
## Lag 1000 -0.10907393 -0.03080525
## Lag 5000 0.05892935 0.02983728
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.07683343
## Lag 100 -0.06951957
## Lag 500 -0.06593593
## Lag 1000 -0.03761794
## Lag 5000 -0.03669061
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.030749722
## Lag 100 -0.016217311
## Lag 500 -0.020696084
## Lag 1000 0.004301752
## Lag 5000 -0.032243808
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.030749722
## Lag 100 -0.016217311
## Lag 500 -0.020696084
## Lag 1000 0.004301752
## Lag 5000 -0.032243808
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.0200083239
## Lag 100 -0.0021801995
## Lag 500 -0.0001738805
## Lag 1000 0.0168307815
## Lag 5000 -0.0185350646
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.82656679
## Lag 100 -0.69378831
## Lag 500 -0.30703065
## Lag 1000 -0.12287954
## Lag 5000 0.02286923
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.92799199
## Lag 100 -0.79777083
## Lag 500 -0.40858358
## Lag 1000 -0.21574629
## Lag 5000 0.09114505
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.92799199
## Lag 100 -0.79777083
## Lag 500 -0.40858358
## Lag 1000 -0.21574629
## Lag 5000 0.09114505
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.8330298
## Lag 100 -0.7312807
## Lag 500 -0.4397168
## Lag 1000 -0.2621632
## Lag 5000 0.1220174
##
## , , at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.09797335
## Lag 100 0.10351817
## Lag 500 0.13155871
## Lag 1000 0.10405306
## Lag 5000 -0.01867047
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.06916387
## Lag 100 0.06723792
## Lag 500 0.05661327
## Lag 1000 0.04949334
## Lag 5000 -0.02266138
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.06916387
## Lag 100 0.06723792
## Lag 500 0.05661327
## Lag 1000 0.04949334
## Lag 5000 -0.02266138
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.05047344
## Lag 100 0.03874584
## Lag 500 0.01496384
## Lag 1000 0.02169455
## Lag 5000 -0.03388536
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.62542960
## Lag 100 0.50127738
## Lag 500 0.22112585
## Lag 1000 0.08374181
## Lag 5000 0.01908858
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.88193914
## Lag 100 0.69427920
## Lag 500 0.35645051
## Lag 1000 0.16527850
## Lag 5000 -0.06295365
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.88193914
## Lag 100 0.69427920
## Lag 500 0.35645051
## Lag 1000 0.16527850
## Lag 5000 -0.06295365
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 1.0000000
## Lag 100 0.7692569
## Lag 500 0.4269991
## Lag 1000 0.2177448
## Lag 5000 -0.1294063
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.025382864 0.01689248
## Lag 100 0.026827275 0.04383334
## Lag 500 0.043938718 0.02339834
## Lag 1000 -0.004971564 -0.01928577
## Lag 5000 -0.003009890 0.00705730
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.01689248 0.026867038
## Lag 100 0.04383334 0.026505180
## Lag 500 0.02339834 0.020550082
## Lag 1000 -0.01928577 0.002852313
## Lag 5000 0.00705730 -0.006893570
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.067211266 -0.23724742
## Lag 100 -0.061081374 -0.18234569
## Lag 500 -0.019373790 -0.12701461
## Lag 1000 -0.070821037 -0.12744137
## Lag 5000 -0.001186618 0.06105046
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.23724742 -0.20886061
## Lag 100 -0.18234569 -0.13591598
## Lag 500 -0.12701461 -0.09687975
## Lag 1000 -0.12744137 -0.02449457
## Lag 5000 0.06105046 0.03190234
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.09673689
## Lag 100 -0.08548368
## Lag 500 -0.09673988
## Lag 1000 -0.09077151
## Lag 5000 -0.03453247
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.05109208
## Lag 100 -0.03188022
## Lag 500 -0.04005001
## Lag 1000 -0.02780074
## Lag 5000 -0.01852278
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.05109208
## Lag 100 -0.03188022
## Lag 500 -0.04005001
## Lag 1000 -0.02780074
## Lag 5000 -0.01852278
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -1.794858e-02
## Lag 100 2.349637e-03
## Lag 500 -4.693412e-04
## Lag 1000 3.189101e-05
## Lag 5000 5.745305e-03
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.59303753
## Lag 100 -0.51940561
## Lag 500 -0.24130575
## Lag 1000 -0.10573107
## Lag 5000 -0.01338206
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.81645230
## Lag 100 -0.71433129
## Lag 500 -0.37519197
## Lag 1000 -0.19591092
## Lag 5000 0.06790739
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.81645230
## Lag 100 -0.71433129
## Lag 500 -0.37519197
## Lag 1000 -0.19591092
## Lag 5000 0.06790739
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.9155485
## Lag 100 -0.7933145
## Lag 500 -0.4457890
## Lag 1000 -0.2536897
## Lag 5000 0.1143214
##
## , , traitbwt:traitbwt.byear
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.024065323
## Lag 100 -0.007228001
## Lag 500 -0.030438771
## Lag 1000 -0.037319806
## Lag 5000 0.024016255
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 0.0004695241
## Lag 100 0.0041014557
## Lag 500 -0.0373017266
## Lag 1000 -0.0290826060
## Lag 5000 0.0367761623
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.0004695241
## Lag 100 0.0041014557
## Lag 500 -0.0373017266
## Lag 1000 -0.0290826060
## Lag 5000 0.0367761623
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 0.001761468
## Lag 100 0.006293899
## Lag 500 -0.029212492
## Lag 1000 -0.044499342
## Lag 5000 0.039200834
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.0003564631
## Lag 100 0.0041477479
## Lag 500 0.0001227158
## Lag 1000 -0.0280572485
## Lag 5000 -0.0238953239
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.018892747
## Lag 100 0.022147308
## Lag 500 0.008227267
## Lag 1000 -0.018154492
## Lag 5000 -0.028508237
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.018892747
## Lag 100 0.022147308
## Lag 500 0.008227267
## Lag 1000 -0.018154492
## Lag 5000 -0.028508237
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.02538286
## Lag 100 0.02892916
## Lag 500 0.01888945
## Lag 1000 -0.01231105
## Lag 5000 -0.03119621
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 1.00000000 0.2416288100
## Lag 100 -0.00944118 0.0408026921
## Lag 500 -0.01192083 -0.0005245788
## Lag 1000 0.02533682 0.0234016930
## Lag 5000 0.00537850 -0.0205987850
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.2416288100 -0.004095612
## Lag 100 0.0408026921 0.040395311
## Lag 500 -0.0005245788 -0.013673594
## Lag 1000 0.0234016930 0.007103496
## Lag 5000 -0.0205987850 -0.024779270
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.04165719 -0.010865029
## Lag 100 0.02095592 -0.047153228
## Lag 500 0.03944625 -0.020680451
## Lag 1000 -0.01004724 -0.001456283
## Lag 5000 0.06987871 0.037222916
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.010865029 -0.004075442
## Lag 100 -0.047153228 0.027366151
## Lag 500 -0.020680451 -0.012918036
## Lag 1000 -0.001456283 0.002873670
## Lag 5000 0.037222916 0.004175319
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.017912666
## Lag 100 0.009367393
## Lag 500 0.034201275
## Lag 1000 0.016198289
## Lag 5000 -0.032390006
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 0.02448148
## Lag 100 -0.02348874
## Lag 500 0.06170381
## Lag 1000 0.03904823
## Lag 5000 -0.06013763
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 0.02448148
## Lag 100 -0.02348874
## Lag 500 0.06170381
## Lag 1000 0.03904823
## Lag 5000 -0.06013763
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.02835591
## Lag 100 -0.02632404
## Lag 500 0.06503529
## Lag 1000 0.05159186
## Lag 5000 -0.05336637
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.01327914
## Lag 100 -0.02432572
## Lag 500 -0.01282050
## Lag 1000 0.03378831
## Lag 5000 0.03616682
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.03186040
## Lag 100 -0.03568005
## Lag 500 -0.01732292
## Lag 1000 0.01976704
## Lag 5000 0.04292710
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.03186040
## Lag 100 -0.03568005
## Lag 500 -0.01732292
## Lag 1000 0.01976704
## Lag 5000 0.04292710
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.032835601
## Lag 100 -0.049995020
## Lag 500 -0.029651563
## Lag 1000 0.003674526
## Lag 5000 0.038069439
##
## , , traittarsus:traitbwt.byear
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.020803022
## Lag 100 -0.016047256
## Lag 500 0.008481995
## Lag 1000 -0.086888857
## Lag 5000 0.006010889
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.03155374
## Lag 100 -0.02090225
## Lag 500 -0.01282744
## Lag 1000 -0.07269148
## Lag 5000 0.00906733
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.03155374
## Lag 100 -0.02090225
## Lag 500 -0.01282744
## Lag 1000 -0.07269148
## Lag 5000 0.00906733
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.02521301
## Lag 100 -0.02097611
## Lag 500 -0.02043834
## Lag 1000 -0.04578630
## Lag 5000 0.01827006
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.04294576
## Lag 100 0.01731935
## Lag 500 0.03962076
## Lag 1000 0.02125431
## Lag 5000 0.03200298
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.029328800
## Lag 100 0.018729883
## Lag 500 0.031732275
## Lag 1000 0.027724247
## Lag 5000 0.005040522
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.029328800
## Lag 100 0.018729883
## Lag 500 0.031732275
## Lag 1000 0.027724247
## Lag 5000 0.005040522
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.016892478
## Lag 100 0.029513644
## Lag 500 0.019652539
## Lag 1000 0.022697027
## Lag 5000 -0.002977248
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.241628810 1.0000000000
## Lag 100 0.032174416 0.0390208931
## Lag 500 0.082255469 -0.0100929579
## Lag 1000 -0.008048824 -0.0076623070
## Lag 5000 0.022293709 -0.0005155025
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 1.0000000000 0.321896680
## Lag 100 0.0390208931 -0.001880506
## Lag 500 -0.0100929579 -0.008239287
## Lag 1000 -0.0076623070 -0.014062762
## Lag 5000 -0.0005155025 0.016071497
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.02648439 0.034213379
## Lag 100 -0.01160983 -0.029104077
## Lag 500 0.01386490 0.006755323
## Lag 1000 -0.01625583 -0.033438010
## Lag 5000 0.01138216 0.050537485
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.034213379 0.016340654
## Lag 100 -0.029104077 0.009692208
## Lag 500 0.006755323 0.003429620
## Lag 1000 -0.033438010 -0.022193805
## Lag 5000 0.050537485 -0.027062081
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.004458633
## Lag 100 0.030024975
## Lag 500 0.019487442
## Lag 1000 0.074642127
## Lag 5000 -0.027998599
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.001255739
## Lag 100 0.043187160
## Lag 500 0.048070249
## Lag 1000 0.063998723
## Lag 5000 -0.032351394
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.001255739
## Lag 100 0.043187160
## Lag 500 0.048070249
## Lag 1000 0.063998723
## Lag 5000 -0.032351394
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.009814135
## Lag 100 0.034259656
## Lag 500 0.043054461
## Lag 1000 0.042960162
## Lag 5000 -0.028194653
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.04867322
## Lag 100 -0.04918743
## Lag 500 -0.06575193
## Lag 1000 -0.03077705
## Lag 5000 -0.03431783
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.04503037
## Lag 100 -0.03923249
## Lag 500 -0.05136636
## Lag 1000 -0.03533135
## Lag 5000 -0.01007200
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.04503037
## Lag 100 -0.03923249
## Lag 500 -0.05136636
## Lag 1000 -0.03533135
## Lag 5000 -0.01007200
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.023526738
## Lag 100 -0.045826492
## Lag 500 -0.039709806
## Lag 1000 -0.020278982
## Lag 5000 -0.001153391
##
## , , traitbwt:traittarsus.byear
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.020803022
## Lag 100 -0.016047256
## Lag 500 0.008481995
## Lag 1000 -0.086888857
## Lag 5000 0.006010889
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.03155374
## Lag 100 -0.02090225
## Lag 500 -0.01282744
## Lag 1000 -0.07269148
## Lag 5000 0.00906733
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.03155374
## Lag 100 -0.02090225
## Lag 500 -0.01282744
## Lag 1000 -0.07269148
## Lag 5000 0.00906733
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.02521301
## Lag 100 -0.02097611
## Lag 500 -0.02043834
## Lag 1000 -0.04578630
## Lag 5000 0.01827006
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.04294576
## Lag 100 0.01731935
## Lag 500 0.03962076
## Lag 1000 0.02125431
## Lag 5000 0.03200298
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.029328800
## Lag 100 0.018729883
## Lag 500 0.031732275
## Lag 1000 0.027724247
## Lag 5000 0.005040522
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.029328800
## Lag 100 0.018729883
## Lag 500 0.031732275
## Lag 1000 0.027724247
## Lag 5000 0.005040522
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.016892478
## Lag 100 0.029513644
## Lag 500 0.019652539
## Lag 1000 0.022697027
## Lag 5000 -0.002977248
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.241628810 1.0000000000
## Lag 100 0.032174416 0.0390208931
## Lag 500 0.082255469 -0.0100929579
## Lag 1000 -0.008048824 -0.0076623070
## Lag 5000 0.022293709 -0.0005155025
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 1.0000000000 0.321896680
## Lag 100 0.0390208931 -0.001880506
## Lag 500 -0.0100929579 -0.008239287
## Lag 1000 -0.0076623070 -0.014062762
## Lag 5000 -0.0005155025 0.016071497
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.02648439 0.034213379
## Lag 100 -0.01160983 -0.029104077
## Lag 500 0.01386490 0.006755323
## Lag 1000 -0.01625583 -0.033438010
## Lag 5000 0.01138216 0.050537485
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.034213379 0.016340654
## Lag 100 -0.029104077 0.009692208
## Lag 500 0.006755323 0.003429620
## Lag 1000 -0.033438010 -0.022193805
## Lag 5000 0.050537485 -0.027062081
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.004458633
## Lag 100 0.030024975
## Lag 500 0.019487442
## Lag 1000 0.074642127
## Lag 5000 -0.027998599
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.001255739
## Lag 100 0.043187160
## Lag 500 0.048070249
## Lag 1000 0.063998723
## Lag 5000 -0.032351394
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.001255739
## Lag 100 0.043187160
## Lag 500 0.048070249
## Lag 1000 0.063998723
## Lag 5000 -0.032351394
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.009814135
## Lag 100 0.034259656
## Lag 500 0.043054461
## Lag 1000 0.042960162
## Lag 5000 -0.028194653
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.04867322
## Lag 100 -0.04918743
## Lag 500 -0.06575193
## Lag 1000 -0.03077705
## Lag 5000 -0.03431783
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.04503037
## Lag 100 -0.03923249
## Lag 500 -0.05136636
## Lag 1000 -0.03533135
## Lag 5000 -0.01007200
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.04503037
## Lag 100 -0.03923249
## Lag 500 -0.05136636
## Lag 1000 -0.03533135
## Lag 5000 -0.01007200
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.023526738
## Lag 100 -0.045826492
## Lag 500 -0.039709806
## Lag 1000 -0.020278982
## Lag 5000 -0.001153391
##
## , , traittarsus:traittarsus.byear
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.062422239
## Lag 100 0.011583874
## Lag 500 0.037441803
## Lag 1000 -0.009627398
## Lag 5000 -0.022238184
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.038505768
## Lag 100 0.001804516
## Lag 500 0.005261718
## Lag 1000 0.005983226
## Lag 5000 0.032748208
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.038505768
## Lag 100 0.001804516
## Lag 500 0.005261718
## Lag 1000 0.005983226
## Lag 5000 0.032748208
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.01770171
## Lag 100 -0.01989808
## Lag 500 -0.01086652
## Lag 1000 0.01084234
## Lag 5000 0.03557845
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.06519556
## Lag 100 0.02663892
## Lag 500 0.04808592
## Lag 1000 0.02428387
## Lag 5000 -0.02466630
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 0.04789443
## Lag 100 0.03493449
## Lag 500 0.05614210
## Lag 1000 0.04153017
## Lag 5000 -0.01844607
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.04789443
## Lag 100 0.03493449
## Lag 500 0.05614210
## Lag 1000 0.04153017
## Lag 5000 -0.01844607
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 0.026867038
## Lag 100 0.036612609
## Lag 500 0.037393390
## Lag 1000 0.044426728
## Lag 5000 0.001484634
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.004095612 0.321896680
## Lag 100 -0.002165733 -0.025763034
## Lag 500 -0.015834268 0.007271524
## Lag 1000 -0.014352739 -0.010372408
## Lag 5000 -0.014353177 0.008050074
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.321896680 1.00000000
## Lag 100 -0.025763034 -0.05471135
## Lag 500 0.007271524 0.01487689
## Lag 1000 -0.010372408 -0.01690300
## Lag 5000 0.008050074 -0.01108507
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.024243359 -0.005316487
## Lag 100 0.006840642 0.028749422
## Lag 500 0.013350099 -0.009938382
## Lag 1000 -0.055215183 -0.036685793
## Lag 5000 -0.020213641 0.022105087
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 -0.005316487 0.09836651
## Lag 100 0.028749422 0.02150474
## Lag 500 -0.009938382 -0.03193224
## Lag 1000 -0.036685793 -0.05678049
## Lag 5000 0.022105087 -0.01297283
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.009413426
## Lag 100 -0.009797256
## Lag 500 -0.005303499
## Lag 1000 0.021519876
## Lag 5000 0.015430014
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.009668697
## Lag 100 0.020499050
## Lag 500 0.017314825
## Lag 1000 -0.004472068
## Lag 5000 -0.035467618
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.009668697
## Lag 100 0.020499050
## Lag 500 0.017314825
## Lag 1000 -0.004472068
## Lag 5000 -0.035467618
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.02461835
## Lag 100 0.03735960
## Lag 500 0.03282282
## Lag 1000 -0.01328773
## Lag 5000 -0.03920844
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.0715338265
## Lag 100 -0.0471815918
## Lag 500 -0.0565789706
## Lag 1000 -0.0181400233
## Lag 5000 0.0004730541
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.062503872
## Lag 100 -0.053534956
## Lag 500 -0.056343269
## Lag 1000 -0.036210432
## Lag 5000 0.001141946
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.062503872
## Lag 100 -0.053534956
## Lag 500 -0.056343269
## Lag 1000 -0.036210432
## Lag 5000 0.001141946
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.04778303
## Lag 100 -0.04769587
## Lag 500 -0.03442670
## Lag 1000 -0.03534380
## Lag 5000 -0.01293680
##
## , , traitbwt:traitbwt.mother
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.20570295
## Lag 100 -0.16090204
## Lag 500 -0.04215661
## Lag 1000 -0.08030891
## Lag 5000 -0.03677952
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.15824356
## Lag 100 -0.14929872
## Lag 500 -0.08184054
## Lag 1000 -0.05556932
## Lag 5000 -0.04664653
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.15824356
## Lag 100 -0.14929872
## Lag 500 -0.08184054
## Lag 1000 -0.05556932
## Lag 5000 -0.04664653
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.08643244
## Lag 100 -0.08098396
## Lag 500 -0.06412749
## Lag 1000 -0.01601502
## Lag 5000 -0.03973564
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.17381816
## Lag 100 -0.08982420
## Lag 500 -0.05302974
## Lag 1000 0.07059613
## Lag 5000 0.01444299
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.12011810
## Lag 100 -0.05615154
## Lag 500 -0.04397641
## Lag 1000 0.04529630
## Lag 5000 0.01188481
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.12011810
## Lag 100 -0.05615154
## Lag 500 -0.04397641
## Lag 1000 0.04529630
## Lag 5000 0.01188481
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.067211266
## Lag 100 -0.032172212
## Lag 500 -0.034191393
## Lag 1000 0.007209103
## Lag 5000 0.013456441
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.041657190 -0.0264843891
## Lag 100 -0.039772659 0.0007355686
## Lag 500 0.046775095 0.0725614701
## Lag 1000 0.079575034 0.0716302875
## Lag 5000 0.002225709 0.0218336784
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.0264843891 -0.02424336
## Lag 100 0.0007355686 0.03089389
## Lag 500 0.0725614701 0.02650238
## Lag 1000 0.0716302875 0.01216398
## Lag 5000 0.0218336784 0.04609217
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 1.00000000 0.16959453
## Lag 100 0.08087113 0.06604294
## Lag 500 0.03061843 0.03325611
## Lag 1000 0.01129805 -0.02725858
## Lag 5000 -0.05179910 0.02287838
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.16959453 -0.15600691
## Lag 100 0.06604294 0.04752473
## Lag 500 0.03325611 -0.02095386
## Lag 1000 -0.02725858 0.01549741
## Lag 5000 0.02287838 0.04374839
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.11538180
## Lag 100 0.11931532
## Lag 500 0.03492918
## Lag 1000 0.03149579
## Lag 5000 -0.00252576
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 0.11597642
## Lag 100 0.10292257
## Lag 500 0.07497668
## Lag 1000 0.02331658
## Lag 5000 0.01885065
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 0.11597642
## Lag 100 0.10292257
## Lag 500 0.07497668
## Lag 1000 0.02331658
## Lag 5000 0.01885065
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.081696637
## Lag 100 0.070844651
## Lag 500 0.058492410
## Lag 1000 0.006484949
## Lag 5000 0.049095289
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 0.07873715
## Lag 100 0.07663687
## Lag 500 0.03807065
## Lag 1000 -0.07032803
## Lag 5000 -0.02922942
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 0.07662601
## Lag 100 0.05392363
## Lag 500 0.02480332
## Lag 1000 -0.04252999
## Lag 5000 -0.01921695
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 0.07662601
## Lag 100 0.05392363
## Lag 500 0.02480332
## Lag 1000 -0.04252999
## Lag 5000 -0.01921695
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.05874280
## Lag 100 0.02850150
## Lag 500 0.02266354
## Lag 1000 -0.01102313
## Lag 5000 -0.01522535
##
## , , traittarsus:traitbwt.mother
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.176162265
## Lag 100 -0.201942319
## Lag 500 -0.098827162
## Lag 1000 -0.107446398
## Lag 5000 0.009469247
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.215691293
## Lag 100 -0.222444593
## Lag 500 -0.090633732
## Lag 1000 -0.081329922
## Lag 5000 -0.000908406
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.215691293
## Lag 100 -0.222444593
## Lag 500 -0.090633732
## Lag 1000 -0.081329922
## Lag 5000 -0.000908406
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.16525481
## Lag 100 -0.16401330
## Lag 500 -0.05984801
## Lag 1000 -0.04662364
## Lag 5000 -0.01558248
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.206244301
## Lag 100 -0.130239112
## Lag 500 -0.073358914
## Lag 1000 -0.066630548
## Lag 5000 0.003242124
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.25319625
## Lag 100 -0.18667698
## Lag 500 -0.09190098
## Lag 1000 -0.08300273
## Lag 5000 0.02361537
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.25319625
## Lag 100 -0.18667698
## Lag 500 -0.09190098
## Lag 1000 -0.08300273
## Lag 5000 0.02361537
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.23724742
## Lag 100 -0.19117881
## Lag 500 -0.12740061
## Lag 1000 -0.09652863
## Lag 5000 0.03403616
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.010865029 0.03421338
## Lag 100 -0.049606444 -0.02147513
## Lag 500 -0.016588119 -0.01764091
## Lag 1000 0.040827431 0.05625276
## Lag 5000 0.009256267 -0.01776929
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.03421338 -0.005316487
## Lag 100 -0.02147513 -0.029822879
## Lag 500 -0.01764091 -0.046522246
## Lag 1000 0.05625276 0.015773963
## Lag 5000 -0.01776929 0.010397787
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.16959453 1.00000000
## Lag 100 0.10685964 0.23184930
## Lag 500 0.07436480 0.10358065
## Lag 1000 0.04376736 0.04438740
## Lag 5000 0.01918606 -0.03968853
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 1.00000000 0.16721552
## Lag 100 0.23184930 0.15522894
## Lag 500 0.10358065 0.05303655
## Lag 1000 0.04438740 0.04025560
## Lag 5000 -0.03968853 -0.01089828
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.07423440
## Lag 100 0.16492678
## Lag 500 0.07479791
## Lag 1000 0.08628623
## Lag 5000 -0.01352148
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 0.088466946
## Lag 100 0.172871353
## Lag 500 0.064930722
## Lag 1000 0.044248231
## Lag 5000 0.002955207
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 0.088466946
## Lag 100 0.172871353
## Lag 500 0.064930722
## Lag 1000 0.044248231
## Lag 5000 0.002955207
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.086888233
## Lag 100 0.143747033
## Lag 500 0.027155801
## Lag 1000 0.015896135
## Lag 5000 0.009780426
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 0.146008840
## Lag 100 0.080717973
## Lag 500 0.080314209
## Lag 1000 0.079015021
## Lag 5000 -0.006204512
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 0.16532024
## Lag 100 0.13361759
## Lag 500 0.09526141
## Lag 1000 0.09645763
## Lag 5000 -0.02604643
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 0.16532024
## Lag 100 0.13361759
## Lag 500 0.09526141
## Lag 1000 0.09645763
## Lag 5000 -0.02604643
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.17116901
## Lag 100 0.15482689
## Lag 500 0.11790922
## Lag 1000 0.11604903
## Lag 5000 -0.04042034
##
## , , traitbwt:traittarsus.mother
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.176162265
## Lag 100 -0.201942319
## Lag 500 -0.098827162
## Lag 1000 -0.107446398
## Lag 5000 0.009469247
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.215691293
## Lag 100 -0.222444593
## Lag 500 -0.090633732
## Lag 1000 -0.081329922
## Lag 5000 -0.000908406
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.215691293
## Lag 100 -0.222444593
## Lag 500 -0.090633732
## Lag 1000 -0.081329922
## Lag 5000 -0.000908406
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.16525481
## Lag 100 -0.16401330
## Lag 500 -0.05984801
## Lag 1000 -0.04662364
## Lag 5000 -0.01558248
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.206244301
## Lag 100 -0.130239112
## Lag 500 -0.073358914
## Lag 1000 -0.066630548
## Lag 5000 0.003242124
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.25319625
## Lag 100 -0.18667698
## Lag 500 -0.09190098
## Lag 1000 -0.08300273
## Lag 5000 0.02361537
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.25319625
## Lag 100 -0.18667698
## Lag 500 -0.09190098
## Lag 1000 -0.08300273
## Lag 5000 0.02361537
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.23724742
## Lag 100 -0.19117881
## Lag 500 -0.12740061
## Lag 1000 -0.09652863
## Lag 5000 0.03403616
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.010865029 0.03421338
## Lag 100 -0.049606444 -0.02147513
## Lag 500 -0.016588119 -0.01764091
## Lag 1000 0.040827431 0.05625276
## Lag 5000 0.009256267 -0.01776929
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.03421338 -0.005316487
## Lag 100 -0.02147513 -0.029822879
## Lag 500 -0.01764091 -0.046522246
## Lag 1000 0.05625276 0.015773963
## Lag 5000 -0.01776929 0.010397787
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.16959453 1.00000000
## Lag 100 0.10685964 0.23184930
## Lag 500 0.07436480 0.10358065
## Lag 1000 0.04376736 0.04438740
## Lag 5000 0.01918606 -0.03968853
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 1.00000000 0.16721552
## Lag 100 0.23184930 0.15522894
## Lag 500 0.10358065 0.05303655
## Lag 1000 0.04438740 0.04025560
## Lag 5000 -0.03968853 -0.01089828
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.07423440
## Lag 100 0.16492678
## Lag 500 0.07479791
## Lag 1000 0.08628623
## Lag 5000 -0.01352148
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 0.088466946
## Lag 100 0.172871353
## Lag 500 0.064930722
## Lag 1000 0.044248231
## Lag 5000 0.002955207
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 0.088466946
## Lag 100 0.172871353
## Lag 500 0.064930722
## Lag 1000 0.044248231
## Lag 5000 0.002955207
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.086888233
## Lag 100 0.143747033
## Lag 500 0.027155801
## Lag 1000 0.015896135
## Lag 5000 0.009780426
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 0.146008840
## Lag 100 0.080717973
## Lag 500 0.080314209
## Lag 1000 0.079015021
## Lag 5000 -0.006204512
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 0.16532024
## Lag 100 0.13361759
## Lag 500 0.09526141
## Lag 1000 0.09645763
## Lag 5000 -0.02604643
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 0.16532024
## Lag 100 0.13361759
## Lag 500 0.09526141
## Lag 1000 0.09645763
## Lag 5000 -0.02604643
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.17116901
## Lag 100 0.15482689
## Lag 500 0.11790922
## Lag 1000 0.11604903
## Lag 5000 -0.04042034
##
## , , traittarsus:traittarsus.mother
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.07667827
## Lag 100 -0.06226975
## Lag 500 -0.08505831
## Lag 1000 -0.02905388
## Lag 5000 0.01421665
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.117487289
## Lag 100 -0.106945179
## Lag 500 -0.080885806
## Lag 1000 -0.005649094
## Lag 5000 0.022493960
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.117487289
## Lag 100 -0.106945179
## Lag 500 -0.080885806
## Lag 1000 -0.005649094
## Lag 5000 0.022493960
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.163816288
## Lag 100 -0.163053925
## Lag 500 -0.075596807
## Lag 1000 -0.005579791
## Lag 5000 0.003557140
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.03867169
## Lag 100 -0.03628672
## Lag 500 -0.05826822
## Lag 1000 -0.09013252
## Lag 5000 0.02007633
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.12765635
## Lag 100 -0.10093720
## Lag 500 -0.07484634
## Lag 1000 -0.07105350
## Lag 5000 0.04241702
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.12765635
## Lag 100 -0.10093720
## Lag 500 -0.07484634
## Lag 1000 -0.07105350
## Lag 5000 0.04241702
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.20886061
## Lag 100 -0.15197429
## Lag 500 -0.09136479
## Lag 1000 -0.05503676
## Lag 5000 0.06974589
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.004075442 0.016340654
## Lag 100 0.038987292 0.004193689
## Lag 500 -0.011550226 0.003520238
## Lag 1000 0.013388477 0.036042597
## Lag 5000 -0.028970574 -0.026499253
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.016340654 0.0983665076
## Lag 100 0.004193689 -0.0421616973
## Lag 500 0.003520238 -0.0001802966
## Lag 1000 0.036042597 0.0818775372
## Lag 5000 -0.026499253 0.0062163478
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 -0.15600691 0.167215517
## Lag 100 0.03453838 0.159524784
## Lag 500 0.06787904 0.050768333
## Lag 1000 0.07261694 -0.006204653
## Lag 5000 0.05254556 -0.044732793
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.167215517 1.00000000
## Lag 100 0.159524784 0.20522469
## Lag 500 0.050768333 0.06686362
## Lag 1000 -0.006204653 0.02863809
## Lag 5000 -0.044732793 -0.03434318
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.015997390
## Lag 100 0.056278499
## Lag 500 0.063438707
## Lag 1000 0.047760693
## Lag 5000 -0.006960881
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 0.0243583650
## Lag 100 0.0878499058
## Lag 500 0.0669412439
## Lag 1000 0.0159489282
## Lag 5000 0.0006521949
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 0.0243583650
## Lag 100 0.0878499058
## Lag 500 0.0669412439
## Lag 1000 0.0159489282
## Lag 5000 0.0006521949
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.017638053
## Lag 100 0.126552081
## Lag 500 0.066208536
## Lag 1000 0.001173142
## Lag 5000 0.012470178
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 0.01708099
## Lag 100 0.02260996
## Lag 500 0.04166800
## Lag 1000 0.08719186
## Lag 5000 -0.03439141
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 0.07362567
## Lag 100 0.06893873
## Lag 500 0.06870851
## Lag 1000 0.08644843
## Lag 5000 -0.05929514
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 0.07362567
## Lag 100 0.06893873
## Lag 500 0.06870851
## Lag 1000 0.08644843
## Lag 5000 -0.05929514
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.12535823
## Lag 100 0.11402352
## Lag 500 0.08541868
## Lag 1000 0.07704387
## Lag 5000 -0.08580699
##
## , , at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.801992699
## Lag 100 -0.466443587
## Lag 500 -0.150327965
## Lag 1000 -0.006651214
## Lag 5000 0.021076965
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.63791397
## Lag 100 -0.39655482
## Lag 500 -0.14069619
## Lag 1000 -0.01005484
## Lag 5000 0.05652797
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.63791397
## Lag 100 -0.39655482
## Lag 500 -0.14069619
## Lag 1000 -0.01005484
## Lag 5000 0.05652797
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.40641260
## Lag 100 -0.26767324
## Lag 500 -0.07118713
## Lag 1000 0.01646522
## Lag 5000 0.07103045
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.07682782
## Lag 100 -0.08897986
## Lag 500 -0.02976621
## Lag 1000 0.01569476
## Lag 5000 0.05852136
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.076833431
## Lag 100 -0.075735828
## Lag 500 -0.001251349
## Lag 1000 0.001383869
## Lag 5000 0.054098508
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.076833431
## Lag 100 -0.075735828
## Lag 500 -0.001251349
## Lag 1000 0.001383869
## Lag 5000 0.054098508
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.096736893
## Lag 100 -0.082618341
## Lag 500 -0.013800167
## Lag 1000 -0.006945006
## Lag 5000 0.034562939
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.01791267 -0.004458633
## Lag 100 -0.04187419 -0.024525504
## Lag 500 -0.03508600 -0.042750470
## Lag 1000 -0.01219283 0.033296719
## Lag 5000 0.01354685 -0.012060362
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.004458633 0.009413426
## Lag 100 -0.024525504 -0.010905096
## Lag 500 -0.042750470 -0.015667578
## Lag 1000 0.033296719 -0.049382985
## Lag 5000 -0.012060362 0.062594734
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.11538180 0.07423440
## Lag 100 0.05980366 0.10449021
## Lag 500 0.05921057 0.03133611
## Lag 1000 0.02698496 -0.02604784
## Lag 5000 -0.03650822 0.03083495
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.07423440 0.01599739
## Lag 100 0.10449021 0.07877892
## Lag 500 0.03133611 0.03430764
## Lag 1000 -0.02604784 -0.05261433
## Lag 5000 0.03083495 0.02086879
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 1.00000000
## Lag 100 0.41493954
## Lag 500 0.13192830
## Lag 1000 -0.01775382
## Lag 5000 -0.02966363
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 0.81033812
## Lag 100 0.35818951
## Lag 500 0.13916697
## Lag 1000 -0.03014218
## Lag 5000 -0.04609000
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 0.81033812
## Lag 100 0.35818951
## Lag 500 0.13916697
## Lag 1000 -0.03014218
## Lag 5000 -0.04609000
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.52480351
## Lag 100 0.24664417
## Lag 500 0.07429736
## Lag 1000 -0.04847098
## Lag 5000 -0.06352022
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 0.04586363
## Lag 100 0.08104246
## Lag 500 0.01149950
## Lag 1000 -0.02427903
## Lag 5000 -0.04131701
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 0.056053162
## Lag 100 0.069215391
## Lag 500 -0.003748895
## Lag 1000 -0.004420742
## Lag 5000 -0.044430568
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 0.056053162
## Lag 100 0.069215391
## Lag 500 -0.003748895
## Lag 1000 -0.004420742
## Lag 5000 -0.044430568
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.075687494
## Lag 100 0.075315174
## Lag 500 0.008328905
## Lag 1000 0.007410047
## Lag 5000 -0.029042021
##
## , , at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.65709502
## Lag 100 -0.40847504
## Lag 500 -0.16003963
## Lag 1000 -0.03935069
## Lag 5000 0.02830878
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.81241841
## Lag 100 -0.54669565
## Lag 500 -0.21812611
## Lag 1000 -0.05053514
## Lag 5000 0.04040407
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.81241841
## Lag 100 -0.54669565
## Lag 500 -0.21812611
## Lag 1000 -0.05053514
## Lag 5000 0.04040407
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.68658404
## Lag 100 -0.47347056
## Lag 500 -0.18207397
## Lag 1000 -0.03170447
## Lag 5000 0.05682175
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.032836717
## Lag 100 -0.052172861
## Lag 500 -0.065393159
## Lag 1000 0.002569735
## Lag 5000 -0.012092021
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.030749722
## Lag 100 -0.037358966
## Lag 500 -0.023624561
## Lag 1000 -0.006129874
## Lag 5000 -0.025591759
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.030749722
## Lag 100 -0.037358966
## Lag 500 -0.023624561
## Lag 1000 -0.006129874
## Lag 5000 -0.025591759
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.051092084
## Lag 100 -0.041228235
## Lag 500 -0.007009306
## Lag 1000 -0.002970648
## Lag 5000 -0.043902013
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.02448148 -0.001255739
## Lag 100 -0.02416855 -0.003961776
## Lag 500 -0.02450656 -0.021931503
## Lag 1000 0.02605736 0.057231093
## Lag 5000 0.02546182 -0.043285219
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.001255739 -0.009668697
## Lag 100 -0.003961776 0.005789834
## Lag 500 -0.021931503 -0.011394547
## Lag 1000 0.057231093 0.005297096
## Lag 5000 -0.043285219 0.040669279
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.11597642 0.08846695
## Lag 100 0.07877164 0.11938945
## Lag 500 0.07500059 0.05973425
## Lag 1000 0.07809152 -0.01347518
## Lag 5000 -0.00851789 0.01673535
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.08846695 0.02435836
## Lag 100 0.11938945 0.09460292
## Lag 500 0.05973425 0.07354709
## Lag 1000 -0.01347518 -0.01282799
## Lag 5000 0.01673535 0.03529550
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.810338117
## Lag 100 0.358071789
## Lag 500 0.123817221
## Lag 1000 0.005141641
## Lag 5000 -0.036699607
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 1.000000000
## Lag 100 0.476755183
## Lag 500 0.179577504
## Lag 1000 -0.003352382
## Lag 5000 -0.024575546
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 1.000000000
## Lag 100 0.476755183
## Lag 500 0.179577504
## Lag 1000 -0.003352382
## Lag 5000 -0.024575546
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.86214847
## Lag 100 0.42290678
## Lag 500 0.14579411
## Lag 1000 -0.01087899
## Lag 5000 -0.03361878
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.009637802
## Lag 100 0.035505918
## Lag 500 0.039910212
## Lag 1000 -0.028636833
## Lag 5000 0.033100911
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.013857108
## Lag 100 0.014173844
## Lag 500 0.003568133
## Lag 1000 -0.007944374
## Lag 5000 0.039786826
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.013857108
## Lag 100 0.014173844
## Lag 500 0.003568133
## Lag 1000 -0.007944374
## Lag 5000 0.039786826
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.007675143
## Lag 100 0.020430102
## Lag 500 -0.017179153
## Lag 1000 -0.004490036
## Lag 5000 0.055145587
##
## , , at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.65709502
## Lag 100 -0.40847504
## Lag 500 -0.16003963
## Lag 1000 -0.03935069
## Lag 5000 0.02830878
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.81241841
## Lag 100 -0.54669565
## Lag 500 -0.21812611
## Lag 1000 -0.05053514
## Lag 5000 0.04040407
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.81241841
## Lag 100 -0.54669565
## Lag 500 -0.21812611
## Lag 1000 -0.05053514
## Lag 5000 0.04040407
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.68658404
## Lag 100 -0.47347056
## Lag 500 -0.18207397
## Lag 1000 -0.03170447
## Lag 5000 0.05682175
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.032836717
## Lag 100 -0.052172861
## Lag 500 -0.065393159
## Lag 1000 0.002569735
## Lag 5000 -0.012092021
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.030749722
## Lag 100 -0.037358966
## Lag 500 -0.023624561
## Lag 1000 -0.006129874
## Lag 5000 -0.025591759
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.030749722
## Lag 100 -0.037358966
## Lag 500 -0.023624561
## Lag 1000 -0.006129874
## Lag 5000 -0.025591759
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.051092084
## Lag 100 -0.041228235
## Lag 500 -0.007009306
## Lag 1000 -0.002970648
## Lag 5000 -0.043902013
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.02448148 -0.001255739
## Lag 100 -0.02416855 -0.003961776
## Lag 500 -0.02450656 -0.021931503
## Lag 1000 0.02605736 0.057231093
## Lag 5000 0.02546182 -0.043285219
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.001255739 -0.009668697
## Lag 100 -0.003961776 0.005789834
## Lag 500 -0.021931503 -0.011394547
## Lag 1000 0.057231093 0.005297096
## Lag 5000 -0.043285219 0.040669279
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.11597642 0.08846695
## Lag 100 0.07877164 0.11938945
## Lag 500 0.07500059 0.05973425
## Lag 1000 0.07809152 -0.01347518
## Lag 5000 -0.00851789 0.01673535
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.08846695 0.02435836
## Lag 100 0.11938945 0.09460292
## Lag 500 0.05973425 0.07354709
## Lag 1000 -0.01347518 -0.01282799
## Lag 5000 0.01673535 0.03529550
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.810338117
## Lag 100 0.358071789
## Lag 500 0.123817221
## Lag 1000 0.005141641
## Lag 5000 -0.036699607
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 1.000000000
## Lag 100 0.476755183
## Lag 500 0.179577504
## Lag 1000 -0.003352382
## Lag 5000 -0.024575546
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 1.000000000
## Lag 100 0.476755183
## Lag 500 0.179577504
## Lag 1000 -0.003352382
## Lag 5000 -0.024575546
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 0.86214847
## Lag 100 0.42290678
## Lag 500 0.14579411
## Lag 1000 -0.01087899
## Lag 5000 -0.03361878
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.009637802
## Lag 100 0.035505918
## Lag 500 0.039910212
## Lag 1000 -0.028636833
## Lag 5000 0.033100911
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.013857108
## Lag 100 0.014173844
## Lag 500 0.003568133
## Lag 1000 -0.007944374
## Lag 5000 0.039786826
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.013857108
## Lag 100 0.014173844
## Lag 500 0.003568133
## Lag 1000 -0.007944374
## Lag 5000 0.039786826
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.007675143
## Lag 100 0.020430102
## Lag 500 -0.017179153
## Lag 1000 -0.004490036
## Lag 5000 0.055145587
##
## , , at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.45087062
## Lag 100 -0.28739699
## Lag 500 -0.13251881
## Lag 1000 -0.07316564
## Lag 5000 0.02041486
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.72502529
## Lag 100 -0.50401217
## Lag 500 -0.23102776
## Lag 1000 -0.09178023
## Lag 5000 0.02295265
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.72502529
## Lag 100 -0.50401217
## Lag 500 -0.23102776
## Lag 1000 -0.09178023
## Lag 5000 0.02295265
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.80604743
## Lag 100 -0.56710392
## Lag 500 -0.24509310
## Lag 1000 -0.08099421
## Lag 5000 0.04598436
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.03763051
## Lag 100 -0.05789881
## Lag 500 -0.10643108
## Lag 1000 -0.02645220
## Lag 5000 -0.05174777
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.02000832
## Lag 100 -0.03719279
## Lag 500 -0.06000913
## Lag 1000 -0.03409990
## Lag 5000 -0.07553958
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.02000832
## Lag 100 -0.03719279
## Lag 500 -0.06000913
## Lag 1000 -0.03409990
## Lag 5000 -0.07553958
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.01794858
## Lag 100 -0.02709659
## Lag 500 -0.02554672
## Lag 1000 -0.02556049
## Lag 5000 -0.10300608
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 0.02835591 0.0098141346
## Lag 100 -0.01675738 0.0006238442
## Lag 500 -0.02958551 -0.0029616561
## Lag 1000 0.04529217 0.0609602246
## Lag 5000 0.01376647 -0.0551569939
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 0.0098141346 -0.02461835
## Lag 100 0.0006238442 0.01494464
## Lag 500 -0.0029616561 0.01031961
## Lag 1000 0.0609602246 0.02249221
## Lag 5000 -0.0551569939 0.02659077
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.08169664 0.08688823
## Lag 100 0.07148052 0.07764612
## Lag 500 0.07347463 0.08186580
## Lag 1000 0.10413747 0.01735513
## Lag 5000 0.01169024 0.01560827
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.08688823 0.01763805
## Lag 100 0.07764612 0.09506980
## Lag 500 0.08186580 0.10621181
## Lag 1000 0.01735513 0.02274044
## Lag 5000 0.01560827 0.02807370
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.52480351
## Lag 100 0.25056226
## Lag 500 0.09551214
## Lag 1000 0.03062863
## Lag 5000 -0.01678266
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 0.862148469
## Lag 100 0.435184658
## Lag 500 0.171023540
## Lag 1000 0.031505725
## Lag 5000 0.002557116
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 0.862148469
## Lag 100 0.435184658
## Lag 500 0.171023540
## Lag 1000 0.031505725
## Lag 5000 0.002557116
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 1.00000000
## Lag 100 0.49007488
## Lag 500 0.18034430
## Lag 1000 0.03288922
## Lag 5000 -0.01114696
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.00202713
## Lag 100 0.04482181
## Lag 500 0.07846680
## Lag 1000 -0.00584738
## Lag 5000 0.07083952
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 -0.026565121
## Lag 100 0.008433569
## Lag 500 0.034277377
## Lag 1000 0.017663601
## Lag 5000 0.086095462
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.026565121
## Lag 100 0.008433569
## Lag 500 0.034277377
## Lag 1000 0.017663601
## Lag 5000 0.086095462
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 -0.032600926
## Lag 100 -0.003735265
## Lag 500 -0.006518046
## Lag 1000 0.019798996
## Lag 5000 0.111861825
##
## , , at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.04704752
## Lag 100 -0.03379234
## Lag 500 -0.03949223
## Lag 1000 0.01961744
## Lag 5000 -0.02397025
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.006983857
## Lag 100 -0.004436391
## Lag 500 -0.013243079
## Lag 1000 0.032487992
## Lag 5000 -0.005502160
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.006983857
## Lag 100 -0.004436391
## Lag 500 -0.013243079
## Lag 1000 0.032487992
## Lag 5000 -0.005502160
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.01115803
## Lag 100 -0.01462379
## Lag 500 -0.01419143
## Lag 1000 0.02730378
## Lag 5000 0.01717430
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.9155511556
## Lag 100 -0.7376972476
## Lag 500 -0.2894597816
## Lag 1000 -0.0869605591
## Lag 5000 -0.0004460346
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.82656679
## Lag 100 -0.69942848
## Lag 500 -0.34348383
## Lag 1000 -0.15386769
## Lag 5000 0.03083674
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.82656679
## Lag 100 -0.69942848
## Lag 500 -0.34348383
## Lag 1000 -0.15386769
## Lag 5000 0.03083674
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.59303753
## Lag 100 -0.52183597
## Lag 500 -0.31378397
## Lag 1000 -0.18108537
## Lag 5000 0.05354292
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.01327914 -0.0486732184
## Lag 100 -0.04083554 -0.0422553353
## Lag 500 -0.02462994 -0.0068642332
## Lag 1000 -0.01300135 -0.0075606736
## Lag 5000 0.03640609 0.0008585885
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.0486732184 -0.071533826
## Lag 100 -0.0422553353 -0.016020360
## Lag 500 -0.0068642332 0.001483358
## Lag 1000 -0.0075606736 -0.017481658
## Lag 5000 0.0008585885 0.025539143
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.078737155 0.14600884
## Lag 100 0.060454058 0.13497831
## Lag 500 0.027674464 0.06425591
## Lag 1000 -0.012343901 0.03845873
## Lag 5000 -0.007546837 -0.03386923
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.14600884 0.0170809947
## Lag 100 0.13497831 -0.0037653532
## Lag 500 0.06425591 -0.0003468626
## Lag 1000 0.03845873 0.0273309637
## Lag 5000 -0.03386923 -0.0135559407
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.04586363
## Lag 100 0.04312396
## Lag 500 0.02474044
## Lag 1000 -0.02246748
## Lag 5000 0.04051100
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.0096378021
## Lag 100 -0.0125008943
## Lag 500 -0.0009416032
## Lag 1000 -0.0279523801
## Lag 5000 0.0306544784
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.0096378021
## Lag 100 -0.0125008943
## Lag 500 -0.0009416032
## Lag 1000 -0.0279523801
## Lag 5000 0.0306544784
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.002027130
## Lag 100 -0.010997227
## Lag 500 0.002190049
## Lag 1000 -0.018910704
## Lag 5000 0.001803533
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 1.00000000
## Lag 100 0.75428810
## Lag 500 0.31054236
## Lag 1000 0.12371798
## Lag 5000 -0.01140046
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 0.8861603
## Lag 100 0.7245493
## Lag 500 0.3612800
## Lag 1000 0.1966026
## Lag 5000 -0.0473486
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 0.8861603
## Lag 100 0.7245493
## Lag 500 0.3612800
## Lag 1000 0.1966026
## Lag 5000 -0.0473486
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.64958299
## Lag 100 0.56329308
## Lag 500 0.34683230
## Lag 1000 0.22412105
## Lag 5000 -0.06170875
##
## , , at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.05664210
## Lag 100 -0.06189130
## Lag 500 -0.08037954
## Lag 1000 -0.03811311
## Lag 5000 -0.02556470
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.008328493
## Lag 100 -0.013077572
## Lag 500 -0.023186615
## Lag 1000 -0.003441329
## Lag 5000 -0.009315017
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.008328493
## Lag 100 -0.013077572
## Lag 500 -0.023186615
## Lag 1000 -0.003441329
## Lag 5000 -0.009315017
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.002247800
## Lag 100 -0.002414942
## Lag 500 -0.005501438
## Lag 1000 0.003638247
## Lag 5000 0.012285311
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.827378091
## Lag 100 -0.698680660
## Lag 500 -0.293915297
## Lag 1000 -0.075432320
## Lag 5000 -0.006566931
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.92799199
## Lag 100 -0.79641710
## Lag 500 -0.40507130
## Lag 1000 -0.17373081
## Lag 5000 0.05921217
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.92799199
## Lag 100 -0.79641710
## Lag 500 -0.40507130
## Lag 1000 -0.17373081
## Lag 5000 0.05921217
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.8164523
## Lag 100 -0.7148697
## Lag 500 -0.4233260
## Lag 1000 -0.2322959
## Lag 5000 0.1088463
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.031860402 -0.0450303697
## Lag 100 -0.042640252 -0.0353347165
## Lag 500 -0.044714928 -0.0139632410
## Lag 1000 -0.006691448 0.0016932093
## Lag 5000 0.033951411 -0.0002862914
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.0450303697 -0.06250387
## Lag 100 -0.0353347165 -0.01569968
## Lag 500 -0.0139632410 -0.02419114
## Lag 1000 0.0016932093 -0.01187690
## Lag 5000 -0.0002862914 0.02038815
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.076626007 0.16532024
## Lag 100 0.062502660 0.16966030
## Lag 500 0.015121122 0.09309132
## Lag 1000 0.021606506 0.09915393
## Lag 5000 0.002410372 -0.05035693
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.16532024 0.07362567
## Lag 100 0.16966030 0.05538368
## Lag 500 0.09309132 0.03584330
## Lag 1000 0.09915393 0.05064644
## Lag 5000 -0.05035693 -0.02384110
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.05605316
## Lag 100 0.06061711
## Lag 500 0.06053849
## Lag 1000 0.01942958
## Lag 5000 0.05005479
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.013857108
## Lag 100 -0.007575433
## Lag 500 0.013108517
## Lag 1000 -0.007812007
## Lag 5000 0.036032541
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.013857108
## Lag 100 -0.007575433
## Lag 500 0.013108517
## Lag 1000 -0.007812007
## Lag 5000 0.036032541
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.0265651214
## Lag 100 -0.0290120498
## Lag 500 -0.0005810146
## Lag 1000 -0.0103869383
## Lag 5000 0.0070159721
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 0.886160329
## Lag 100 0.717772991
## Lag 500 0.321665315
## Lag 1000 0.103897403
## Lag 5000 -0.007716877
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 1.00000000
## Lag 100 0.82518740
## Lag 500 0.42756493
## Lag 1000 0.20452354
## Lag 5000 -0.07400879
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 1.00000000
## Lag 100 0.82518740
## Lag 500 0.42756493
## Lag 1000 0.20452354
## Lag 5000 -0.07400879
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.8952943
## Lag 100 0.7599734
## Lag 500 0.4540322
## Lag 1000 0.2628428
## Lag 5000 -0.1084533
##
## , , at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.05664210
## Lag 100 -0.06189130
## Lag 500 -0.08037954
## Lag 1000 -0.03811311
## Lag 5000 -0.02556470
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.008328493
## Lag 100 -0.013077572
## Lag 500 -0.023186615
## Lag 1000 -0.003441329
## Lag 5000 -0.009315017
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.008328493
## Lag 100 -0.013077572
## Lag 500 -0.023186615
## Lag 1000 -0.003441329
## Lag 5000 -0.009315017
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.002247800
## Lag 100 -0.002414942
## Lag 500 -0.005501438
## Lag 1000 0.003638247
## Lag 5000 0.012285311
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.827378091
## Lag 100 -0.698680660
## Lag 500 -0.293915297
## Lag 1000 -0.075432320
## Lag 5000 -0.006566931
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.92799199
## Lag 100 -0.79641710
## Lag 500 -0.40507130
## Lag 1000 -0.17373081
## Lag 5000 0.05921217
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.92799199
## Lag 100 -0.79641710
## Lag 500 -0.40507130
## Lag 1000 -0.17373081
## Lag 5000 0.05921217
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.8164523
## Lag 100 -0.7148697
## Lag 500 -0.4233260
## Lag 1000 -0.2322959
## Lag 5000 0.1088463
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.031860402 -0.0450303697
## Lag 100 -0.042640252 -0.0353347165
## Lag 500 -0.044714928 -0.0139632410
## Lag 1000 -0.006691448 0.0016932093
## Lag 5000 0.033951411 -0.0002862914
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.0450303697 -0.06250387
## Lag 100 -0.0353347165 -0.01569968
## Lag 500 -0.0139632410 -0.02419114
## Lag 1000 0.0016932093 -0.01187690
## Lag 5000 -0.0002862914 0.02038815
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.076626007 0.16532024
## Lag 100 0.062502660 0.16966030
## Lag 500 0.015121122 0.09309132
## Lag 1000 0.021606506 0.09915393
## Lag 5000 0.002410372 -0.05035693
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.16532024 0.07362567
## Lag 100 0.16966030 0.05538368
## Lag 500 0.09309132 0.03584330
## Lag 1000 0.09915393 0.05064644
## Lag 5000 -0.05035693 -0.02384110
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.05605316
## Lag 100 0.06061711
## Lag 500 0.06053849
## Lag 1000 0.01942958
## Lag 5000 0.05005479
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 -0.013857108
## Lag 100 -0.007575433
## Lag 500 0.013108517
## Lag 1000 -0.007812007
## Lag 5000 0.036032541
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.013857108
## Lag 100 -0.007575433
## Lag 500 0.013108517
## Lag 1000 -0.007812007
## Lag 5000 0.036032541
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.0265651214
## Lag 100 -0.0290120498
## Lag 500 -0.0005810146
## Lag 1000 -0.0103869383
## Lag 5000 0.0070159721
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 0.886160329
## Lag 100 0.717772991
## Lag 500 0.321665315
## Lag 1000 0.103897403
## Lag 5000 -0.007716877
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 1.00000000
## Lag 100 0.82518740
## Lag 500 0.42756493
## Lag 1000 0.20452354
## Lag 5000 -0.07400879
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 1.00000000
## Lag 100 0.82518740
## Lag 500 0.42756493
## Lag 1000 0.20452354
## Lag 5000 -0.07400879
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 0.8952943
## Lag 100 0.7599734
## Lag 500 0.4540322
## Lag 1000 0.2628428
## Lag 5000 -0.1084533
##
## , , at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
##
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.07483207
## Lag 100 -0.08775348
## Lag 500 -0.11285363
## Lag 1000 -0.08634561
## Lag 5000 -0.01371202
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.animal
## Lag 0 -0.030362949
## Lag 100 -0.039754967
## Lag 500 -0.033011864
## Lag 1000 -0.029514426
## Lag 5000 -0.001985781
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.030362949
## Lag 100 -0.039754967
## Lag 500 -0.033011864
## Lag 1000 -0.029514426
## Lag 5000 -0.001985781
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.animal
## Lag 0 -0.007247428
## Lag 100 -0.006535742
## Lag 500 0.002198411
## Lag 1000 -0.008798406
## Lag 5000 0.027857103
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.60748687
## Lag 100 -0.53691093
## Lag 500 -0.23923956
## Lag 1000 -0.05612519
## Lag 5000 -0.02437865
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.animal
## Lag 0 -0.83302980
## Lag 100 -0.72884123
## Lag 500 -0.38274139
## Lag 1000 -0.15431830
## Lag 5000 0.05605301
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.83302980
## Lag 100 -0.72884123
## Lag 500 -0.38274139
## Lag 1000 -0.15431830
## Lag 5000 0.05605301
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.animal
## Lag 0 -0.9155485
## Lag 100 -0.7951014
## Lag 500 -0.4466991
## Lag 1000 -0.2211369
## Lag 5000 0.1205559
## traitbwt:traitbwt.byear traittarsus:traitbwt.byear
## Lag 0 -0.03283560 -0.02352674
## Lag 100 -0.03437166 -0.03785607
## Lag 500 -0.06320365 -0.02280501
## Lag 1000 0.01146433 0.01610589
## Lag 5000 0.02158539 -0.01180713
## traitbwt:traittarsus.byear traittarsus:traittarsus.byear
## Lag 0 -0.02352674 -0.047783029
## Lag 100 -0.03785607 -0.024774047
## Lag 500 -0.02280501 -0.022139916
## Lag 1000 0.01610589 -0.010756876
## Lag 5000 -0.01180713 0.008288347
## traitbwt:traitbwt.mother traittarsus:traitbwt.mother
## Lag 0 0.058742804 0.17116901
## Lag 100 0.037828928 0.16463544
## Lag 500 0.008847859 0.10397439
## Lag 1000 0.033392605 0.11174287
## Lag 5000 0.009361929 -0.05008362
## traitbwt:traittarsus.mother traittarsus:traittarsus.mother
## Lag 0 0.17116901 0.12535823
## Lag 100 0.16463544 0.11202094
## Lag 500 0.10397439 0.06571377
## Lag 1000 0.11174287 0.03808944
## Lag 5000 -0.05008362 -0.02117192
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traitbwt.units
## Lag 0 0.07568749
## Lag 100 0.08048639
## Lag 500 0.08624431
## Lag 1000 0.06311937
## Lag 5000 0.05152981
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traitbwt.units
## Lag 0 0.007675143
## Lag 100 0.016491158
## Lag 500 0.025505457
## Lag 1000 0.010905754
## Lag 5000 0.026867691
## at.level(sex, "1"):traitbwt:at.level(sex, "1"):traittarsus.units
## Lag 0 0.007675143
## Lag 100 0.016491158
## Lag 500 0.025505457
## Lag 1000 0.010905754
## Lag 5000 0.026867691
## at.level(sex, "1"):traittarsus:at.level(sex, "1"):traittarsus.units
## Lag 0 -0.032600926
## Lag 100 -0.028707200
## Lag 500 -0.005860625
## Lag 1000 -0.005761967
## Lag 5000 -0.014063341
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traitbwt.units
## Lag 0 0.649582994
## Lag 100 0.557866006
## Lag 500 0.268804888
## Lag 1000 0.081158620
## Lag 5000 0.009810318
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traitbwt.units
## Lag 0 0.89529429
## Lag 100 0.75772487
## Lag 500 0.40719270
## Lag 1000 0.18139226
## Lag 5000 -0.06899758
## at.level(sex, "2"):traitbwt:at.level(sex, "2"):traittarsus.units
## Lag 0 0.89529429
## Lag 100 0.75772487
## Lag 500 0.40719270
## Lag 1000 0.18139226
## Lag 5000 -0.06899758
## at.level(sex, "2"):traittarsus:at.level(sex, "2"):traittarsus.units
## Lag 0 1.0000000
## Lag 100 0.8350320
## Lag 500 0.4726187
## Lag 1000 0.2497905
## Lag 5000 -0.1132751
\end{verbatim}
Evaluation of the statistical support for these sex-specific correlations is straightforward. Because we imposed no constraint on their estimation, we can evaluate the extent to which the posterior distributions overlap zero or overlap each other:
Neither or these posterior distributions overlaps zero, so we can consider them both statistically supported.
\hypertarget{brms-2}{%
\section{brms}\label{brms-2}}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(brms)}
\NormalTok{Amat }\OtherTok{\textless{}{-}} \FunctionTok{as.matrix}\NormalTok{(nadiv}\SpecialCharTok{::}\FunctionTok{makeA}\NormalTok{(gryphonped))}
\NormalTok{bf\_bwt }\OtherTok{\textless{}{-}} \FunctionTok{bf}\NormalTok{(bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|}\NormalTok{ p }\SpecialCharTok{|} \FunctionTok{gr}\NormalTok{(animal, }\AttributeTok{cov =}\NormalTok{ Amat)))}
\NormalTok{bf\_tarsus }\OtherTok{\textless{}{-}} \FunctionTok{bf}\NormalTok{(tarsus }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|}\NormalTok{ p }\SpecialCharTok{|} \FunctionTok{gr}\NormalTok{(animal, }\AttributeTok{cov =}\NormalTok{ Amat)))}
\NormalTok{brms\_m2}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{brm}\NormalTok{(}
\NormalTok{ bf\_bwt }\SpecialCharTok{+}\NormalTok{ bf\_tarsus }\SpecialCharTok{+} \FunctionTok{set\_rescor}\NormalTok{(}\ConstantTok{TRUE}\NormalTok{),}
\AttributeTok{data =}\NormalTok{ gryphon,}
\AttributeTok{data2 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{Amat =}\NormalTok{ Amat),}
\AttributeTok{chains =} \DecValTok{2}\NormalTok{, }\AttributeTok{cores =} \DecValTok{2}\NormalTok{, }\AttributeTok{iter =} \DecValTok{1000}
\NormalTok{)}
\FunctionTok{save}\NormalTok{(brms\_m2}\FloatTok{.1}\NormalTok{, }\AttributeTok{file =} \StringTok{"data/brms\_m2\_1.rda"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{load}\NormalTok{(}\StringTok{"data/brms\_m2\_1.rda"}\NormalTok{)}
\FunctionTok{summary}\NormalTok{(brms\_m2}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Warning: Parts of the model have not converged (some Rhats are > 1.05). Be
## careful when analysing the results! We recommend running more iterations and/or
## setting stronger priors.
\end{verbatim}
\begin{verbatim}
## Family: MV(gaussian, gaussian)
## Links: mu = identity; sigma = identity
## mu = identity; sigma = identity
## Formula: bwt ~ 1 + (1 | p | gr(animal, cov = Amat))
## tarsus ~ 1 + (1 | p | gr(animal, cov = Amat))
## Data: gryphon (Number of observations: 683)
## Samples: 2 chains, each with iter = 1000; warmup = 500; thin = 1;
## total post-warmup samples = 1000
##
## Group-Level Effects:
## ~animal (Number of levels: 683)
## Estimate Est.Error l-95% CI u-95% CI Rhat
## sd(bwt_Intercept) 1.81 0.21 1.41 2.20 1.06
## sd(tarsus_Intercept) 3.44 0.43 2.49 4.25 1.05
## cor(bwt_Intercept,tarsus_Intercept) 0.38 0.14 0.08 0.62 1.02
## Bulk_ESS Tail_ESS
## sd(bwt_Intercept) 32 192
## sd(tarsus_Intercept) 61 173
## cor(bwt_Intercept,tarsus_Intercept) 101 232
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## bwt_Intercept 7.49 0.16 7.20 7.79 1.00 607 839
## tarsus_Intercept 20.47 0.30 19.92 21.03 1.00 868 803
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma_bwt 1.97 0.16 1.66 2.28 1.06 27 172
## sigma_tarsus 4.24 0.30 3.63 4.82 1.04 71 162
##
## Residual Correlations:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## rescor(bwt,tarsus) 0.39 0.09 0.21 0.55 1.02 95 179
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(brms\_m2}\FloatTok{.1}\NormalTok{, }\AttributeTok{ask =} \ConstantTok{FALSE}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-108-1.pdf} \includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-108-2.pdf}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{VarCorr}\NormalTok{(brms\_m2}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## $animal
## $animal$sd
## Estimate Est.Error Q2.5 Q97.5
## bwt_Intercept 1.808171 0.2050233 1.412824 2.204805
## tarsus_Intercept 3.438368 0.4283612 2.491218 4.245264
##
## $animal$cor
## , , bwt_Intercept
##
## Estimate Est.Error Q2.5 Q97.5
## bwt_Intercept 1.0000000 0.0000000 1.00000000 1.0000000
## tarsus_Intercept 0.3814062 0.1380014 0.07581464 0.6209038
##
## , , tarsus_Intercept
##
## Estimate Est.Error Q2.5 Q97.5
## bwt_Intercept 0.3814062 0.1380014 0.07581464 0.6209038
## tarsus_Intercept 1.0000000 0.0000000 1.00000000 1.0000000
##
##
## $animal$cov
## , , bwt_Intercept
##
## Estimate Est.Error Q2.5 Q97.5
## bwt_Intercept 3.311473 0.7430185 1.9960721 4.861167
## tarsus_Intercept 2.440166 1.0901689 0.3870783 4.668720
##
## , , tarsus_Intercept
##
## Estimate Est.Error Q2.5 Q97.5
## bwt_Intercept 2.440166 1.090169 0.3870783 4.66872
## tarsus_Intercept 12.005688 2.918741 6.2061701 18.02226
##
##
##
## $residual__
## $residual__$sd
## Estimate Est.Error Q2.5 Q97.5
## bwt 1.970532 0.1597581 1.658782 2.276074
## tarsus 4.244704 0.2984518 3.632824 4.820109
##
## $residual__$cor
## , , bwt
##
## Estimate Est.Error Q2.5 Q97.5
## bwt 1.0000000 0.00000000 1.0000000 1.0000000
## tarsus 0.3888754 0.08510488 0.2127907 0.5526631
##
## , , tarsus
##
## Estimate Est.Error Q2.5 Q97.5
## bwt 0.3888754 0.08510488 0.2127907 0.5526631
## tarsus 1.0000000 0.00000000 1.0000000 1.0000000
##
##
## $residual__$cov
## , , bwt
##
## Estimate Est.Error Q2.5 Q97.5
## bwt 3.908493 0.6282892 2.751557 5.180511
## tarsus 3.289995 0.9305960 1.572647 5.147133
##
## , , tarsus
##
## Estimate Est.Error Q2.5 Q97.5
## bwt 3.289995 0.930596 1.572647 5.147133
## tarsus 18.106495 2.530138 13.197409 23.233452
\end{verbatim}
\hypertarget{stan-1}{%
\section{stan}\label{stan-1}}
to do
\hypertarget{rep_measures}{%
\chapter{A repeated measures animal model}\label{rep_measures}}
This tutorial will demonstrate how to run a univariate animal model for a trait with repeated observations using different R packages and example data files provided.
\hypertarget{scenario-and-data-2}{%
\section{Scenario and data}\label{scenario-and-data-2}}
\hypertarget{scenario-2}{%
\subsection{scenario}\label{scenario-2}}
Since gryphons are iteroparous, multiple observations of reproductive traits are available for some individuals. Here we have repeated measures of lay date (measured in days after January 1) for individual females varying in age from 2 (age of maturation) up until age 6. Not all females lay every year so the number of observations per female is variable. We want to know how repeatable the trait is, and (assuming it is repeatable) how heritable it is.
\hypertarget{data-files-2}{%
\subsection{Data files}\label{data-files-2}}
The pedigree file \texttt{gryphonped.csv} is that used in the preceding tutorials but we now use a new data file \texttt{gryphonRM.csv}. Columns correspond to individual identity (\texttt{animal}), birth year (\texttt{byear}), age in years (\texttt{age}), year of measurement (\texttt{year}) and lay date (\texttt{laydate}). Each row of the data file corresponds to a single phenotypic observation. Here data are sorted by identity and then age so that the repeated observations on individuals are readily apparent. However this is not a requirement for analysis - data could equally be sorted by some other variable (\emph{e.g.}, measurement year) or be in a random order.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{str}\NormalTok{(gryphonRM)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## 'data.frame': 1607 obs. of 5 variables:
## $ animal : Factor w/ 469 levels "1","2","3","8",..: 1 1 1 1 1 2 2 2 3 3 ...
## $ byear : Factor w/ 34 levels "968","970","971",..: 22 22 22 22 22 22 22 22 22 22 ...
## $ age : Factor w/ 5 levels "2","3","4","5",..: 1 2 3 4 5 1 2 3 1 2 ...
## $ year : Factor w/ 39 levels "970","971","972",..: 23 24 25 26 27 23 24 25 23 24 ...
## $ laydate: num 19 23 24 23 29 21 17 21 20 20 ...
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{head}\NormalTok{(gryphonRM)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal byear age year laydate
## 1 1 990 2 992 19
## 2 1 990 3 993 23
## 3 1 990 4 994 24
## 4 1 990 5 995 23
## 5 1 990 6 996 29
## 6 2 990 2 992 21
\end{verbatim}
\begin{verbatim}
## 'data.frame': 1309 obs. of 3 variables:
## $ id : int 1306 1304 1298 1293 1290 1288 1284 1283 1282 1278 ...
## $ father: int NA NA NA NA NA NA NA NA NA NA ...
## $ mother: int NA NA NA NA NA NA NA NA NA NA ...
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{gryphonped}\SpecialCharTok{$}\NormalTok{id }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphonped}\SpecialCharTok{$}\NormalTok{id)}
\NormalTok{gryphonped}\SpecialCharTok{$}\NormalTok{father }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphonped}\SpecialCharTok{$}\NormalTok{father)}
\NormalTok{gryphonped}\SpecialCharTok{$}\NormalTok{mother }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(gryphonped}\SpecialCharTok{$}\NormalTok{mother)}
\end{Highlighting}
\end{Shaded}
\hypertarget{asreml-r-2}{%
\section{Asreml-R}\label{asreml-r-2}}
\hypertarget{estimating-repeatability}{%
\subsection{Estimating repeatability}\label{estimating-repeatability}}
With repeated measures on individuals it is often of interest, prior to fitting a genetic model, to see how repeatable a trait is. We can estimate the repeatability of a trait as the proportion of phenotypic variance explained by individual identity.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelv }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ laydate }\SpecialCharTok{\textasciitilde{}} \DecValTok{1}\NormalTok{,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{animal,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphonRM,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:42 2021
## LogLik Sigma2 DF wall cpu
## 1 -10182.83 1.0 1606 10:23:42 0.0
## 2 -8266.10 1.0 1606 10:23:42 0.0
## 3 -6145.01 1.0 1606 10:23:42 0.0
## 4 -4651.57 1.0 1606 10:23:42 0.0
## 5 -3819.31 1.0 1606 10:23:42 0.0
## 6 -3554.22 1.0 1606 10:23:42 0.0
## 7 -3501.56 1.0 1606 10:23:42 0.0
## 8 -3497.58 1.0 1606 10:23:42 0.0
## 9 -3497.54 1.0 1606 10:23:42 0.0
## 10 -3497.54 1.0 1606 10:23:42 0.0
\end{verbatim}
Note that since we want to estimate the amount of variance explained by individual identity (rather than by additive effects), we fit \texttt{animal} as a normal random effect and we don't associate it with the pedigree.
This model partitions the phenotypic variance in \texttt{laydate} as follows:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modelv)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## animal 11.08634 1.1794319 9.399728 P 0
## units!units 21.29643 0.8896196 23.938798 P 0
## units!R 1.00000 NA NA F 0
\end{verbatim}
Between-individual variance is given by the \texttt{animal} component, while the residual component (\texttt{units!units}) represents within-individual variance. Here then the repeatability of the trait can be determined by hand as 0.34 (\emph{i.e.}, as 11.086/(11.086 + 21.296)).
Mean lay date might change with age, so we could ask what the repeatability of lay date is after conditioning on age. This would be done by adding \texttt{age} into the model as a fixed effect.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelw }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ laydate }\SpecialCharTok{\textasciitilde{}}\NormalTok{ age,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{animal,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphonRM,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:42 2021
## LogLik Sigma2 DF wall cpu
## 1 -8402.968 1.0 1602 10:23:42 0.0
## 2 -6912.361 1.0 1602 10:23:42 0.0
## 3 -5274.379 1.0 1602 10:23:42 0.0
## 4 -4143.634 1.0 1602 10:23:42 0.0
## 5 -3541.895 1.0 1602 10:23:42 0.0
## 6 -3372.909 1.0 1602 10:23:42 0.0
## 7 -3347.670 1.0 1602 10:23:42 0.0
## 8 -3346.655 1.0 1602 10:23:42 0.0
## 9 -3346.652 1.0 1602 10:23:42 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modelw)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## animal 12.28982 1.156115 10.63027 P 0
## units!units 16.37989 0.686619 23.85586 P 0
## units!R 1.00000 NA NA F 0
\end{verbatim}
The repeatability of lay date, after accounting for age effects, is now estimated as 0.43 (\emph{i.e.}, as 12.29/(12.29 + 16.38)). So, just as we saw when estimating \(h^2\) in Tutorial 1, the inclusion of fixed effects will alter the estimated effect size if we determine total phenotypic variance as the sum of the variance components. Thus, proper interpretation is vital.
Here age is modelled as a 5-level factor (specified using the function \texttt{as.factor()} at the beginning of the analysis). We could equally have fitted it as a continuous variable, in which case, given potential for a late life decline, we would probably also include a quadratic term.
\hypertarget{partitioning-additive-and-permanent-environment-effects}{%
\subsection{Partitioning additive and permanent environment effects}\label{partitioning-additive-and-permanent-environment-effects}}
Generally we expect that the repeatability will set the upper limit for heritability since, while additive genetic effects will cause among-individual variation, so will other types of effects. Non-additive contributions to fixed among-individual differences are normally referred to as \emph{permanent environment effects}. If a trait has repeated measures then it is necessary to model permanent environment effects in an animal model to prevent upward bias in \(V_A\).
To illustrate this fit the animal model:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelx }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ laydate }\SpecialCharTok{\textasciitilde{}}\NormalTok{ age,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{vm}\NormalTok{(animal, ainv),}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphonRM,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:42 2021
## LogLik Sigma2 DF wall cpu
## 1 -8751.390 1.0 1602 10:23:42 0.0
## 2 -7169.205 1.0 1602 10:23:42 0.0
## 3 -5427.604 1.0 1602 10:23:42 0.0
## 4 -4219.598 1.0 1602 10:23:42 0.0
## 5 -3569.815 1.0 1602 10:23:42 0.0
## 6 -3382.341 1.0 1602 10:23:42 0.0
## 7 -3352.867 1.0 1602 10:23:43 0.0
## 8 -3351.565 1.0 1602 10:23:43 0.0
## 9 -3351.560 1.0 1602 10:23:43 0.0
\end{verbatim}
Variance components are almost unchanged:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary.asreml}\NormalTok{(modelx)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## vm(animal, ainv) 13.91784 1.443968 9.638607 P 0
## units!units 16.84008 0.707365 23.806768 P 0
## units!R 1.00000 NA NA F 0
\end{verbatim}
This suggests that all of the among-individual variance is -- rightly or wrongly -- being partitioned as \(V_A\) here. To instead obtain an unbiased estimate of \(V_A\) we need to allow for both additive genetic \emph{and} non-genetic sources of individual variation. We do this by fitting \texttt{animal} twice, once with a pedigree, and once without a pedigree (using \texttt{ide()}).
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modely }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ laydate }\SpecialCharTok{\textasciitilde{}}\NormalTok{ age,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+} \FunctionTok{ide}\NormalTok{(animal),}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphonRM,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:43 2021
## LogLik Sigma2 DF wall cpu
## 1 -7731.394 1.0 1602 10:23:43 0.0
## 2 -6426.548 1.0 1602 10:23:43 0.0
## 3 -4997.252 1.0 1602 10:23:43 0.0
## 4 -4018.486 1.0 1602 10:23:43 0.0
## 5 -3504.988 1.0 1602 10:23:43 0.0
## 6 -3363.160 1.0 1602 10:23:43 0.0
## 7 -3341.611 1.0 1602 10:23:43 0.0
## 8 -3340.682 1.0 1602 10:23:43 0.0
## 9 -3340.679 1.0 1602 10:23:43 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modely)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## vm(animal, ainv) 4.876101 1.8087709 2.695809 P 0
## ide(animal) 7.400983 1.7280113 4.282948 P 0
## units!units 16.380188 0.6866189 23.856300 P 0
## units!R 1.000000 NA NA F 0
\end{verbatim}
The estimate of \(V_A\) is now much lower since the additive and permanent environment effects are being properly separated. We can estimate \(h^2\) and the repeatability from this model:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modely, h2 }\SpecialCharTok{\textasciitilde{}}\NormalTok{ V1 }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V2 }\SpecialCharTok{+}\NormalTok{ V3))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## h2 0.1701523 0.06073974
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{vpredict}\NormalTok{(modely, repeatability }\SpecialCharTok{\textasciitilde{}}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V2) }\SpecialCharTok{/}\NormalTok{ (V1 }\SpecialCharTok{+}\NormalTok{ V2 }\SpecialCharTok{+}\NormalTok{ V3))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Estimate SE
## repeatability 0.4284108 0.02741602
\end{verbatim}
\hypertarget{adding-additional-effects-and-testing-significance}{%
\subsection{Adding additional effects and testing significance}\label{adding-additional-effects-and-testing-significance}}
Models of repeated measures can be extended to include other fixed or random effects. For example try including year of measurement (\texttt{year}) and birth year (\texttt{byear}) as random effects.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{modelz }\OtherTok{\textless{}{-}} \FunctionTok{asreml}\NormalTok{(}
\AttributeTok{fixed =}\NormalTok{ laydate }\SpecialCharTok{\textasciitilde{}}\NormalTok{ age,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{vm}\NormalTok{(animal, ainv) }\SpecialCharTok{+} \FunctionTok{ide}\NormalTok{(animal) }\SpecialCharTok{+}
\NormalTok{ year }\SpecialCharTok{+}\NormalTok{ byear,}
\AttributeTok{residual =} \SpecialCharTok{\textasciitilde{}} \FunctionTok{idv}\NormalTok{(units),}
\AttributeTok{data =}\NormalTok{ gryphonRM,}
\AttributeTok{na.action =} \FunctionTok{na.method}\NormalTok{(}\AttributeTok{x =} \StringTok{"omit"}\NormalTok{, }\AttributeTok{y =} \StringTok{"omit"}\NormalTok{)}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Model fitted using the sigma parameterization.
## ASReml 4.1.0 Fri May 21 10:23:43 2021
## LogLik Sigma2 DF wall cpu
## 1 -4650.748 1.0 1602 10:23:43 0.0
## 2 -4088.264 1.0 1602 10:23:43 0.0
## 3 -3494.147 1.0 1602 10:23:43 0.0
## 4 -3127.161 1.0 1602 10:23:43 0.0 (1 restrained)
## 5 -2976.449 1.0 1602 10:23:43 0.0 (1 restrained)
## 6 -2955.785 1.0 1602 10:23:43 0.0 (1 restrained)
## 7 -2955.097 1.0 1602 10:23:43 0.0 (1 restrained)
## 8 -2955.095 1.0 1602 10:23:43 0.0 (1 restrained)
## 9 -2955.095 1.0 1602 10:23:43 0.0
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{summary}\NormalTok{(modelz)}\SpecialCharTok{$}\NormalTok{varcomp}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## component std.error z.ratio bound %ch
## byear 1.650876e-07 NA NA B 0
## year 7.938576e+00 1.9344619 4.103765 P 0
## vm(animal, ainv) 4.815136e+00 1.6682351 2.886365 P 0
## ide(animal) 8.433325e+00 1.5495778 5.442337 P 0
## units!units 7.795560e+00 0.3324411 23.449443 P 0
## units!R 1.000000e+00 NA NA F 0
\end{verbatim}
This model will return additional variance components corresponding to variation in lay dates between years of measurement and between birth cohorts of females. \(V_{byear}\) is very low and if you compare this model to a reduced model with byear excluded the log-likelihood remains unchanged.
\texttt{year} effects could alternatively be included as fixed effects (try this!). This will reduce \(V_R\) and increase the estimates of heritability and repeatability, which must now be interpreted as proportions of phenotypic variance after conditioning on both age and year of measurement effects.
\hypertarget{gremlin-3}{%
\section{gremlin}\label{gremlin-3}}
TODO (maybe just bother Matthew to do it)
Meanwhile
\begin{figure}
\includegraphics[width=1\linewidth]{images/Gizmo} \caption{Keep it dry and do no feed after midnight.}\label{fig:unnamed-chunk-121}
\end{figure}
\hypertarget{mcmcglmm-3}{%
\section{MCMCglmm}\label{mcmcglmm-3}}
\hypertarget{estimating-repeatability-1}{%
\subsection{Estimating repeatability}\label{estimating-repeatability-1}}
With repeated measures on individuals it is often of interest, prior to fitting a genetic model, to see how repeatable a trait is. We can estimate the repeatability of a trait as the proportion of phenotypic variance explained by individual identity using the commands below
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{p.var }\OtherTok{\textless{}{-}} \FunctionTok{var}\NormalTok{(gryphonRM}\SpecialCharTok{$}\NormalTok{laydate, }\AttributeTok{na.rm =} \ConstantTok{TRUE}\NormalTok{)}
\NormalTok{prior3}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{)), }\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}
\AttributeTok{V =} \DecValTok{1}\NormalTok{,}
\AttributeTok{nu =} \FloatTok{0.002}
\NormalTok{))}
\NormalTok{model3}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(laydate }\SpecialCharTok{\textasciitilde{}} \DecValTok{1}\NormalTok{,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{animal, }\AttributeTok{data =}\NormalTok{ gryphonRM,}
\AttributeTok{prior =}\NormalTok{ prior3}\FloatTok{.1}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{posterior.mode}\NormalTok{(model3}\FloatTok{.1}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal units
## 11.00957 20.97417
\end{verbatim}
Note the use of the term \texttt{animal}, since neither the \texttt{pedigree} nor the \texttt{ginv} argument are provided, it represents the between-individual variance, while the residual compo- nent (Variance) therefore represents within-individual variance. Here then the repeata- bility of the trait can be determined by as 0.353 (i.e., 11.4532/(11.4532+21.1432). Given that we set up the simulation such that mean lay date changes with age (initially increas- ing to age 5 before a late life decline) we might ask what the repeatability of lay date is after conditioning on age effect. This would be done by adding age into the model as a fixed effect.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model3}\FloatTok{.2} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(laydate }\SpecialCharTok{\textasciitilde{}}\NormalTok{ age,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{animal, }\AttributeTok{data =}\NormalTok{ gryphonRM,}
\AttributeTok{prior =}\NormalTok{ prior3}\FloatTok{.1}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{plot}\NormalTok{(model3}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{Sol)}
\end{Highlighting}
\end{Shaded}
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-123-1.pdf} \includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-123-2.pdf}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{plot}\NormalTok{(model3}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\includegraphics{wam_tuto_files/figure-latex/unnamed-chunk-123-3.pdf}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(model3}\FloatTok{.2}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal units
## 12.85222 16.24052
\end{verbatim}
Note that the random effect structure has remained unchaged, and so we have not modified the prior between models 3.1 and 3.2. So that the repeatability of laydate, after accounting for age effects, is now estimated as 0.445 (i.e., 12.6379/(12.6379+16.465). So, just as we saw when estimating h 2 in tutorial 1, the inclusion of fixed effects will alter the estimated effect size if we determine total phenotypic variance as the sum of the variance components. Thus, proper interpretation is vital.
Here age is modelled as a 5 level factor (see the convertion of age to a factor in section 3.2). We could equally have fitted it as a continuous variable instead in which case, given the late life decline, we would probably also include a quadratic term.
\hypertarget{partitioning-additive-and-permanent-environment-effects-1}{%
\subsection{Partitioning additive and permanent environment effects}\label{partitioning-additive-and-permanent-environment-effects-1}}
Generally we expect that the repeatability will set the upper limit for heritability since, while additive genetic effects will cause among-individual variation, so will other types of effect. Non-additive contributions to fixed among-individual differences are normally referred to as `permanent environment effects', although `non-heritable effects' that are consistent within individuals may be a better way to think of modelling this effect. If a trait has repeated measures then it is necessary to model permanent environment effects in an animal model to prevent upward bias in V A . To illustrate this fit the animal model
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{Ainv }\OtherTok{\textless{}{-}} \FunctionTok{inverseA}\NormalTok{(gryphonped)}\SpecialCharTok{$}\NormalTok{Ainv}
\NormalTok{model3}\FloatTok{.3} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(laydate }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ age,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{animal, }\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv),}
\AttributeTok{data =}\NormalTok{ gryphonRM, }\AttributeTok{prior =}\NormalTok{ prior3}\FloatTok{.1}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{posterior.mode}\NormalTok{(model3}\FloatTok{.3}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal units
## 13.87540 17.00617
\end{verbatim}
This suggests that all of the among-individual variance is - rightly or wrongly - being partitioned as V A here. In fact here the partition is wrong since the simulation included both additive genetic effects and additional fixed heterogeneity that was not associated with the pedigree structure (i.e.~permanent environment effects).
In order to fit both permanent environemnt and additive genetic effects, we need to fit the individual identity twice in the model: once linked to the pedigree (genetic effect) and once not linked to the pedigree (permanenet environemnt effect).
To do so, we need to duplicate the variable containing the individual identity and give it a new name.
An more appropriate estimate of V A is given by the model:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{gryphonRM}\SpecialCharTok{$}\NormalTok{animal\_pe }\OtherTok{\textless{}{-}}\NormalTok{ gryphonRM}\SpecialCharTok{$}\NormalTok{animal}
\NormalTok{p.var }\OtherTok{\textless{}{-}} \FunctionTok{var}\NormalTok{(gryphonRM}\SpecialCharTok{$}\NormalTok{laydate, }\AttributeTok{na.rm =} \ConstantTok{TRUE}\NormalTok{)}
\NormalTok{prior3}\FloatTok{.4} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{), }\AttributeTok{G2 =} \FunctionTok{list}\NormalTok{(}
\AttributeTok{V =} \DecValTok{1}\NormalTok{,}
\AttributeTok{nu =} \FloatTok{0.002}
\NormalTok{)), }\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{))}
\NormalTok{model3}\FloatTok{.4} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(laydate }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ age,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{ animal }\SpecialCharTok{+}\NormalTok{ animal\_pe,}
\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv), }\AttributeTok{data =}\NormalTok{ gryphonRM, }\AttributeTok{prior =}\NormalTok{ prior3}\FloatTok{.4}\NormalTok{, }\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{posterior.mode}\NormalTok{(model3}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal animal_pe units
## 4.499468 7.407864 16.472613
\end{verbatim}
The estimate of V A is now much lower (reduced from 13.6735 to 5.1238) since the ad- ditive and permanent environment effects are being properly separated. We could obtain estimates of h 2 and of the repeatability from this model using the following commands:
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model3.}\FloatTok{4.}\NormalTok{VP }\OtherTok{\textless{}{-}}\NormalTok{ model3}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model3}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal\_pe"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model3}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"units"}\NormalTok{]}
\NormalTok{model3.}\FloatTok{4.}\NormalTok{PE\_VA }\OtherTok{\textless{}{-}}\NormalTok{ model3}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{+}\NormalTok{ model3}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal\_pe"}\NormalTok{]}
\FunctionTok{posterior.mode}\NormalTok{(model3.}\FloatTok{4.}\NormalTok{PE\_VA }\SpecialCharTok{/}\NormalTok{ model3.}\FloatTok{4.}\NormalTok{VP)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.4236285
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{posterior.mode}\NormalTok{(model3}\FloatTok{.4}\SpecialCharTok{$}\NormalTok{VCV[, }\StringTok{"animal"}\NormalTok{] }\SpecialCharTok{/}\NormalTok{ model3.}\FloatTok{4.}\NormalTok{VP)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## var1
## 0.1585904
\end{verbatim}
\hypertarget{adding-additional-effects-and-testing-significance-1}{%
\subsection{Adding additional effects and testing significance}\label{adding-additional-effects-and-testing-significance-1}}
Models of repeated measures can be extended to include other fixed or random effects.
For example try including year of measurement (year).
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{p.var }\OtherTok{\textless{}{-}} \FunctionTok{var}\NormalTok{(gryphonRM}\SpecialCharTok{$}\NormalTok{laydate, }\AttributeTok{na.rm =} \ConstantTok{TRUE}\NormalTok{)}
\NormalTok{prior3}\FloatTok{.5} \OtherTok{\textless{}{-}} \FunctionTok{list}\NormalTok{(}\AttributeTok{G =} \FunctionTok{list}\NormalTok{(}\AttributeTok{G1 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{), }\AttributeTok{G2 =} \FunctionTok{list}\NormalTok{(}
\AttributeTok{V =} \DecValTok{1}\NormalTok{,}
\AttributeTok{nu =} \FloatTok{0.002}
\NormalTok{), }\AttributeTok{G3 =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{), }\AttributeTok{G4 =} \FunctionTok{list}\NormalTok{(}
\AttributeTok{V =} \DecValTok{1}\NormalTok{,}
\AttributeTok{nu =} \FloatTok{0.002}
\NormalTok{)), }\AttributeTok{R =} \FunctionTok{list}\NormalTok{(}\AttributeTok{V =} \DecValTok{1}\NormalTok{, }\AttributeTok{nu =} \FloatTok{0.002}\NormalTok{))}
\NormalTok{model3}\FloatTok{.5} \OtherTok{\textless{}{-}} \FunctionTok{MCMCglmm}\NormalTok{(laydate }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ age,}
\AttributeTok{random =} \SpecialCharTok{\textasciitilde{}}\NormalTok{ animal }\SpecialCharTok{+}\NormalTok{ animal\_pe }\SpecialCharTok{+}
\NormalTok{ year }\SpecialCharTok{+}\NormalTok{ byear, }\AttributeTok{ginv =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Ainv), }\AttributeTok{data =}\NormalTok{ gryphonRM, }\AttributeTok{prior =}\NormalTok{ prior3}\FloatTok{.5}\NormalTok{,}
\AttributeTok{verbose =} \ConstantTok{FALSE}
\NormalTok{)}
\FunctionTok{posterior.mode}\NormalTok{(model3}\FloatTok{.5}\SpecialCharTok{$}\NormalTok{VCV)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## animal animal_pe year byear units
## 4.831458287 7.851537615 7.925240216 0.001934676 7.751678678
\end{verbatim}
This model will return additional variance components corresponding to year of mea- surement effects and birth year (of the female effects). The latter were not simulated as should be apparent from the parameter estimate (and by the support interval derivable from the posterior distribution and from DIC-based comparison of model3.5 and a model from which the birth year term had been eliminated, see tutorial 1). However, year ef- fects were simulated as should be apparent from the from the modal estimate and from the support interval (try this yourself using HPDinterval()) and this could be formally confirmed by comparison of DIC. year effects could alternatively be included as fixed effects (try this, you should be able to handle the new prior specification at this point). Since we simulated large year of measurement effects this treatment will reduce V R and increase the the estimates of heritability and repeatability which must now be interpreted as proportions of phenotypic variance after conditioning on both age and year of measurement effects.
\hypertarget{brms-3}{%
\section{brms}\label{brms-3}}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(brms)}
\NormalTok{Amat }\OtherTok{\textless{}{-}}\NormalTok{ nadiv}\SpecialCharTok{::}\FunctionTok{makeA}\NormalTok{(Ped)}
\NormalTok{model\_simple1}\FloatTok{.1} \OtherTok{\textless{}{-}} \FunctionTok{brm}\NormalTok{(}
\NormalTok{ bwt }\SpecialCharTok{\textasciitilde{}} \DecValTok{1} \SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|}\NormalTok{ animal) }\SpecialCharTok{+}\NormalTok{ (}\DecValTok{1} \SpecialCharTok{|}\NormalTok{ animal\_pe),}
\AttributeTok{data =}\NormalTok{ gryphonRM,}
\AttributeTok{family =} \FunctionTok{gaussian}\NormalTok{(), }\AttributeTok{cov\_ranef =} \FunctionTok{list}\NormalTok{(}\AttributeTok{animal =}\NormalTok{ Amat),}
\AttributeTok{chains =} \DecValTok{2}\NormalTok{, }\AttributeTok{cores =} \DecValTok{2}\NormalTok{, }\AttributeTok{iter =} \DecValTok{1000}
\NormalTok{)}
\FunctionTok{summary}\NormalTok{(model\_simple1}\FloatTok{.1}\NormalTok{)}
\FunctionTok{plot}\NormalTok{(model\_simple1}\FloatTok{.1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\hypertarget{stan-2}{%
\section{stan}\label{stan-2}}
to do
\hypertarget{quick-comparison-of-codes}{%
\chapter{Quick comparison of codes}\label{quick-comparison-of-codes}}
\hypertarget{univariate-model-with-repeated-measures}{%
\section{Univariate model with repeated measures}\label{univariate-model-with-repeated-measures}}
\hypertarget{asreml-r-3}{%
\subsection{Asreml-R}\label{asreml-r-3}}
\hypertarget{gremlin-4}{%
\subsection{gremlin}\label{gremlin-4}}
\hypertarget{mcmcglmm-4}{%
\subsection{MCMCglmm}\label{mcmcglmm-4}}
\hypertarget{brms-4}{%
\subsection{brms}\label{brms-4}}
\hypertarget{bivariate-model}{%
\section{bivariate model}\label{bivariate-model}}
\hypertarget{asreml-r-4}{%
\subsection{Asreml-R}\label{asreml-r-4}}
\hypertarget{gremlin-5}{%
\subsection{gremlin}\label{gremlin-5}}
\hypertarget{mcmcglmm-5}{%
\subsection{MCMCglmm}\label{mcmcglmm-5}}
\hypertarget{brms-5}{%
\subsection{brms}\label{brms-5}}
\bibliography{book.bib,packages.bib}
\printindex
\end{document}
|
{"hexsha": "ecd9b4852978972ed0146563386369668e44f907", "size": 457700, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/wam_tuto.tex", "max_stars_repo_name": "wamwiki/wam_tuto", "max_stars_repo_head_hexsha": "c8e275d0c91421a09602293e31168a5534705979", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-08T15:11:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T16:22:02.000Z", "max_issues_repo_path": "docs/wam_tuto.tex", "max_issues_repo_name": "MathieuVID/wam_tuto", "max_issues_repo_head_hexsha": "4922bdd0ea12bcc56d1c63d42e403d19ba905012", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-09-29T12:14:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-20T21:50:41.000Z", "max_forks_repo_path": "docs/wam_tuto.tex", "max_forks_repo_name": "MathieuVID/wam_tuto", "max_forks_repo_head_hexsha": "4922bdd0ea12bcc56d1c63d42e403d19ba905012", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-03-08T15:09:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-09T09:19:52.000Z", "avg_line_length": 63.1746031746, "max_line_length": 1130, "alphanum_fraction": 0.5066419052, "num_tokens": 124243}
|
# maintained by rajivak@utexas.edu
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
def format_numsel(numsel):
ss = ''
for i,j in enumerate(numsel):
ss = ss + " %d:%d " %(i,j)
return ss
def get_train_testindices(n, ntest, seed):
np.random.seed(seed)
testindices = np.random.choice(n,ntest,replace=False)
trainindices = np.setdiff1d( range(n), testindices)
return trainindices, testindices
def exit(str):
print(str)
exit(1)
def dir_exists(filename):
"""Creates the directory of a file if the directory does not exist.
Raises:
IOError: If the directory could not be created (and the directory does not
exist). This may be due to for instance permissions issues or a race
condition in which the directory is created right before makdirs runs.
"""
dir = os.path.dirname(filename)
if not os.path.exists(dir):
os.makedirs(dir)
|
{"hexsha": "4a77a0ffdc93c5e262c47320e055e138b37f2dbd", "size": 1019, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/mmd/MMD-critic/Helper.py", "max_stars_repo_name": "sthagen/christophM-interpretable-ml-book", "max_stars_repo_head_hexsha": "d8b82b8e6ab82c78d95de784a601e71025621ab2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4164, "max_stars_repo_stars_event_min_datetime": "2017-12-03T19:28:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:31:51.000Z", "max_issues_repo_path": "scripts/mmd/MMD-critic/Helper.py", "max_issues_repo_name": "sthagen/christophM-interpretable-ml-book", "max_issues_repo_head_hexsha": "d8b82b8e6ab82c78d95de784a601e71025621ab2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 257, "max_issues_repo_issues_event_min_datetime": "2017-12-04T07:19:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T06:00:07.000Z", "max_forks_repo_path": "scripts/mmd/MMD-critic/Helper.py", "max_forks_repo_name": "sthagen/christophM-interpretable-ml-book", "max_forks_repo_head_hexsha": "d8b82b8e6ab82c78d95de784a601e71025621ab2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 915, "max_forks_repo_forks_event_min_datetime": "2017-12-03T16:54:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T06:52:34.000Z", "avg_line_length": 27.5405405405, "max_line_length": 80, "alphanum_fraction": 0.6908734053, "include": true, "reason": "import numpy", "num_tokens": 245}
|
[STATEMENT]
lemma benv_in_eval:
assumes "\<forall>\<beta>'\<in>benv_in_ve ve. Q \<beta>'"
and "Q \<beta>"
shows "\<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
proof(cases v)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>x1. v = L x1 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>x21 x22. v = R x21 x22 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
3. \<And>x31 x32. v = C x31 x32 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
4. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
case (R _ var)
[PROOF STATE]
proof (state)
this:
v = R x21_ var
goal (4 subgoals):
1. \<And>x1. v = L x1 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>x21 x22. v = R x21 x22 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
3. \<And>x31 x32. v = C x31 x32 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
4. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
v = R x21_ var
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
proof (cases "\<beta> (fst var)")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>v = R x21_ var; \<beta> (fst var) = None\<rbrakk> \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>a. \<lbrakk>v = R x21_ var; \<beta> (fst var) = Some a\<rbrakk> \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
case None
[PROOF STATE]
proof (state)
this:
\<beta> (fst var) = None
goal (2 subgoals):
1. \<lbrakk>v = R x21_ var; \<beta> (fst var) = None\<rbrakk> \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>a. \<lbrakk>v = R x21_ var; \<beta> (fst var) = Some a\<rbrakk> \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
with R
[PROOF STATE]
proof (chain)
picking this:
v = R x21_ var
\<beta> (fst var) = None
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
v = R x21_ var
\<beta> (fst var) = None
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
goal (1 subgoal):
1. \<And>a. \<lbrakk>v = R x21_ var; \<beta> (fst var) = Some a\<rbrakk> \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a. \<lbrakk>v = R x21_ var; \<beta> (fst var) = Some a\<rbrakk> \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
case (Some cnt)
[PROOF STATE]
proof (state)
this:
\<beta> (fst var) = Some cnt
goal (1 subgoal):
1. \<And>a. \<lbrakk>v = R x21_ var; \<beta> (fst var) = Some a\<rbrakk> \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
proof (cases "ve (var,cnt)")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ve (var, cnt) = None \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>a. ve (var, cnt) = Some a \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
case None
[PROOF STATE]
proof (state)
this:
ve (var, cnt) = None
goal (2 subgoals):
1. ve (var, cnt) = None \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>a. ve (var, cnt) = Some a \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
with Some R
[PROOF STATE]
proof (chain)
picking this:
\<beta> (fst var) = Some cnt
v = R x21_ var
ve (var, cnt) = None
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<beta> (fst var) = Some cnt
v = R x21_ var
ve (var, cnt) = None
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
goal (1 subgoal):
1. \<And>a. ve (var, cnt) = Some a \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a. ve (var, cnt) = Some a \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
case (Some d)
[PROOF STATE]
proof (state)
this:
ve (var, cnt) = Some d
goal (1 subgoal):
1. \<And>a. ve (var, cnt) = Some a \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
hence "d \<in> ran ve"
[PROOF STATE]
proof (prove)
using this:
ve (var, cnt) = Some d
goal (1 subgoal):
1. d \<in> ran ve
[PROOF STEP]
unfolding ran_def
[PROOF STATE]
proof (prove)
using this:
ve (var, cnt) = Some d
goal (1 subgoal):
1. d \<in> {b. \<exists>a. ve a = Some b}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
d \<in> ran ve
goal (1 subgoal):
1. \<And>a. ve (var, cnt) = Some a \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
d \<in> ran ve
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
using Some \<open>\<beta> (fst var) = Some cnt\<close> R assms(1)
[PROOF STATE]
proof (prove)
using this:
d \<in> ran ve
ve (var, cnt) = Some d
\<beta> (fst var) = Some cnt
v = R x21_ var
\<forall>\<beta>'\<in>benv_in_ve ve. Q \<beta>'
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
unfolding benv_in_ve_def
[PROOF STATE]
proof (prove)
using this:
d \<in> ran ve
ve (var, cnt) = Some d
\<beta> (fst var) = Some cnt
v = R x21_ var
\<forall>\<beta>'\<in>\<Union> {benv_in_d d |d. d \<in> ran ve}. Q \<beta>'
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
goal (3 subgoals):
1. \<And>x1. v = L x1 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>x31 x32. v = C x31 x32 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
3. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>x1. v = L x1 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>x31 x32. v = C x31 x32 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
3. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
case (L l)
[PROOF STATE]
proof (state)
this:
v = L l
goal (3 subgoals):
1. \<And>x1. v = L x1 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>x31 x32. v = C x31 x32 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
3. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
v = L l
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
v = L l
Q \<beta>
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
goal (2 subgoals):
1. \<And>x31 x32. v = C x31 x32 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x31 x32. v = C x31 x32 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
case C
[PROOF STATE]
proof (state)
this:
v = C x31_ x32_
goal (2 subgoals):
1. \<And>x31 x32. v = C x31 x32 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
2. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
v = C x31_ x32_
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
goal (1 subgoal):
1. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
case P
[PROOF STATE]
proof (state)
this:
v = P x4_
goal (1 subgoal):
1. \<And>x4. v = P x4 \<Longrightarrow> \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
v = P x4_
goal (1 subgoal):
1. \<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>\<beta>\<in>benv_in_d (\<A> v \<beta> ve). Q \<beta>
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4615, "file": "Shivers-CFA_ExCFSV", "length": 41}
|
###_______________________________ SymPy ___________________________________###
# SymPy es una biblioteca de Python para matemática simbólica. Apunta a convertirse
# en un sistema de algebra computacional (CAS) con todas sus prestaciones manteniendo
# el código tan simple como sea posible para manterlo comprensible y fácilmente extensible.
# SymPy está escrito totalmente en Python y no requiere bibliotecas adicionales.
# Este proyecto comenzó en 2005, fue lanzado al público en 2007 y a él han contribuido
# durante estos años cientos de personas.
##_________________________ Creating Symbols ___________________________##
from sympy import init_session, Symbol, symbols, pi, I, E, cos, sin, exp, tan, simplify, expand, factor, collect, apart, cancel, expand_trig, diff, Derivative, Function, integrate, limit, series, Eq, solve, dsolve, Matrix, N
init_session(use_latex='matplotlib')
# Creamos el símbolo a
a = Symbol('a')
print((a + pi) ** 2)
print(type(a))
b = 2 * a
print(type(b))
print(b)
# Como Python permite que el tipo de una variable cambie, si ahora le asigno a
# a un valor float deja de ser un símbolo
a = 2.26492
print(type(a))
# __Las conclusiones son:_
# * __Si quiero usar una variable como símbolo debo crearla previamente.__
# * Las operaciones con símbolos devuelven símbolos.
# * Si una varibale que almacenaba un símbolo recibe otra asignación, cambia de tipo.
# Las variables de tipo Symbol actúan como contenedores en los que no sabemos qué
# hay (un real, un complejo, una lista...). Hay que tener en cuenta que: una cosa es
# el nombre de la variable y otra el símbolo con el que se representa
#creación de símbolos
coef_traccion = Symbol('c_T')
# Diferencia entre variable y símbolo
a = symbols('b')
x, y, z, t = symbols('x y z t')
# símbolos griegos:
w = symbols('omega')
W = symbols('Omega')
# Por defecto, SymPy entiende que los símbolos son números complejos. Esto puede
# producir resultados inesperados ante determinadas operaciones como, por ejemplo,
# lo logaritmos. Podemos indicar que la variable es real, entera... en el momento
# de la creación
# Creamos símbolos reales
x, y, z, t = symbols('x y z t', real=True)
# Podemos ver las asunciones de un símbolo
print(x.assumptions0)
##_________________________ Expressions ___________________________##
expr = cos(x)**2 + sin(x)**2
print(expr)
# Podemos pedirle que simplifique la expresión anterior
print(simplify(expr))
# En algunas ocasiones necesitaremos sustituir una variable por otra, por otra
# expresión o por un valor, using 'subs'
# Sustituimos x por y ** 2
print(expr.subs(x, y**2))
# ¡Pero la expresión no cambia!
# Para que cambie
expr = expr.subs(sin(x), exp(x))
print(expr)
# Particulariza la expresión sin(x)+3x en x=pi
print((sin(x) + 3 * x).subs(x, pi))
# Aunque si lo que queremos es obtener el valor numérico lo mejor es .evalf()
print((sin(x) + 3 * x).subs(x, pi).evalf(25)) # Nº of decimal places is the n isnide ()
# el mismo resultado se obtiene ocn la función N()
print(N(pi,25))
##_________________________ Simplification ___________________________##
# SymPy ofrece numerosas funciones para __simplificar y manipular expresiones__.
# Entre otras, destacan:
# * `expand()`
# * `factor()`
# * `collect()`
# * `apart()`
# * `cancel()`
# Puedes consultar en la documentación de SymPy lo que hace cada una y algunos ejemplos.
# Existen también funciones específicas de simplificación para funciones trigonométricas,
# potencias y logaritmos. Abre [esta documentación](http://docs.sympy.org/latest/tutorial/simplification.html) si lo necesitas.
#_____________ Examples _____________##
expr1 = (x**3 + 3*y +2)**2
print(expand(expr1))
expr2 = (3*x**2 - 2*x +1) / ((x-1)**2)
print(apart(expr2))
expr3 = (x**3 + 9*x**2 +27*x + 27)
print(factor(expr3))
expr4 = sin(x + 2 * y)
print(expand_trig(expr4))
print(expand(expr4, trig=True))
##_____________ Derivatives _____________##
# Puedes derivar una expresion usando el método .diff() y la función dif()
#creamos una expresión
expr = cos(x)
#obtenemos la derivada primera con funcion
print(diff(expr, x)) #or:
print(expr.diff(x))
# Third derivative?
print(expr.diff(x,x,x))
print(expr.diff(x,3))
# Many derivatives?
expr_xy = y ** 3 * sin(x) ** 2 + x ** 2 * cos(y)
print(diff(expr_xy, x, 2, y, 2))
# Si queremos que la deje indicada, usamos Derivative()
print(Derivative(expr_xy, x, 2, y))
# Can SymPy apply the chain rule?
# Creamos una función F #
F = Function('F')
print(F(x))
# Creamos una función G
G = Function('G')
print(G(x))
# Derivamos la función compuesta F(G(x))
print(F(G(x)).diff(x))
# If we know the functions: #
# definimos una f
f = 2 * y * exp(x)
# definimos una g(f)
g = f **2 * cos(x) + f
#la derivamos
print(diff(g,x))
##_____________ Integrals _____________##
int1 = cos(x)**2
print(int1.integrate(x))
int2 = 1 / sin(x)
print(int2.integrate(x))
x, a = symbols('x a', real=True)
int3 = 1 / (x**2 + a**2)**2
print(int3.integrate(x))
##_________________________ Limits ___________________________##
# Calculemos este límite sacado del libro _Cálculo: definiciones, teoremas y
# resultados_, de Juan de Burgos:
# $$\lim_{x \to 0} \left(\frac{x}{\tan{\left (x \right )}}\right)^{\frac{1}{x^{2}}}$$
# Primero creamos la expresión:
x = symbols('x', real=True)
expr = (x / tan(x)) ** (1 / x**2)
#Obtenemos el límite con la función limit() y si queremos dejarlo indicado,
# podemos usar Limit():
print(limit(expr, x, 0))
##_________________________ Series ___________________________##
#creamos la expresión
expr = exp(x)
print(series(expr))
# Se puede especificar el número de términos pasándole un argumento n=....
# El número que le pasemos será el primer término que desprecie.
# Indicando el número de términos
print(series(expr, n=10))
# Si nos molesta el O(x**10) lo podemos quitar con removeO():
print(series(expr, n=10).removeO())
print(series(sin(x), n=8, x0=pi/3).removeO().subs(x, x-pi/3))
##_________________________ Eq resolution ___________________________##
# Como se ha mencionado anteriormente las ecuaciones no se pueden crear con el =
#creamos la ecuación
equation = Eq(x ** 2 - x, 3)
# También la podemos crear como
print(Eq(x ** 2 - x -3))
# Solve it:
print(solve(equation))
# To solve using symbols: #
# Creamos los símbolos y la ecuación
a, x, t, C = symbols('a, x, t, C', real=True)
equation2 = Eq(a * exp(x/t), C)
print(solve(equation2 ,x))
##_________________________ Diff equations ___________________________##
# dsolve is the solver for differential equations
x = symbols('x')
y = Function('y')
ecuacion_dif = Eq(y(x).diff(x,2) + y(x).diff(x) + y(x), cos(x))
#resolvemos
# print(dsolve(ecuacion_dif, f(x)))
##_________________________ Matrices ___________________________##
#creamos una matriz llena de símbolos
a, b, c, d = symbols('a b c d')
A = Matrix([
[a, b],
[c, d]
])
#sacamos autovalores
print(A.eigenvals())
#inversa
print(A.inv())
#elevamos al cuadrado la matriz
print(A ** 2)
|
{"hexsha": "9cf8d0e957967da7b5eceea824704c0222e6b602", "size": 7000, "ext": "py", "lang": "Python", "max_stars_repo_path": "MyScripts/040-SymPy.py", "max_stars_repo_name": "diegoomataix/Curso_AeroPython", "max_stars_repo_head_hexsha": "c2cf71a938062bc70dbbf7c2f21e09653fa2cedd", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MyScripts/040-SymPy.py", "max_issues_repo_name": "diegoomataix/Curso_AeroPython", "max_issues_repo_head_hexsha": "c2cf71a938062bc70dbbf7c2f21e09653fa2cedd", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MyScripts/040-SymPy.py", "max_forks_repo_name": "diegoomataix/Curso_AeroPython", "max_forks_repo_head_hexsha": "c2cf71a938062bc70dbbf7c2f21e09653fa2cedd", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3333333333, "max_line_length": 224, "alphanum_fraction": 0.714, "include": true, "reason": "from sympy", "num_tokens": 2095}
|
[STATEMENT]
lemma PO_m3_inv1_keys_init [iff]:
"init m3 \<subseteq> m3_inv1_keys"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. init m3 \<subseteq> m3_inv1_keys
[PROOF STEP]
by (auto simp add: PO_hoare_def m3_defs intro!: m3_inv1_keysI)
|
{"llama_tokens": 112, "file": "Security_Protocol_Refinement_Auth_simple_m3_enc", "length": 1}
|
theory Proof_1_6
imports HandDryer VCTheoryLemmas Extra
begin
theorem proof_1_6:
"VC6 inv1 s0 hands_value"
apply(simp only: VC6_def inv1_def R1_def dryer_def)
apply(rule impI; rule conjI)
proof -
print_state
assume VC: "((toEnvP s0 \<and>
(\<forall>s1 s2.
substate s1 s2 \<and>
substate s2 s0 \<and>
toEnvP s1 \<and>
toEnvP s2 \<and>
toEnvNum s1 s2 = hands \<and>
getVarBool s1 hands = OFF \<and>
getVarBool s1 (Suc (Suc 0)) = OFF \<and>
getVarBool s2 hands = ON \<longrightarrow>
(\<exists>s4. toEnvP s4 \<and>
substate s2 s4 \<and>
substate s4 s0 \<and>
toEnvNum s2 s4 \<le> hands \<and>
getVarBool s4 (Suc (Suc 0)) = ON \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s2 s3 \<and>
substate s3 s4 \<and> s3 \<noteq> s4 \<longrightarrow>
getVarBool s3 hands = ON)))) \<and>
extraInv s0) \<and>
env (setVarAny s0 hands_value) hands_value \<and>
getPstate (setVarAny s0 hands_value) Ctrl =
drying \<and>
getVarBool (setVarAny s0 hands_value) hands \<noteq>
ON \<and>
10 \<le> ltimeEnv (setVarAny s0 hands_value)
Ctrl"
show " toEnvP
(toEnv
(setPstate (setVarAny s0 hands_value) Ctrl
waiting)) \<and>
(\<forall>s1 s2.
substate s1 s2 \<and>
substate s2
(toEnv
(setPstate (setVarAny s0 hands_value) Ctrl
waiting)) \<and>
toEnvP s1 \<and>
toEnvP s2 \<and>
toEnvNum s1 s2 = hands \<and>
getVarBool s1 hands = OFF \<and>
getVarBool s1 (Suc (Suc 0)) = OFF \<and>
getVarBool s2 hands = ON \<longrightarrow>
(\<exists>s4. toEnvP s4 \<and>
substate s2 s4 \<and>
substate s4
(toEnv
(setPstate
(setVarAny s0 hands_value) Ctrl
waiting)) \<and>
toEnvNum s2 s4 \<le> hands \<and>
getVarBool s4 (Suc (Suc 0)) = ON \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s2 s3 \<and>
substate s3 s4 \<and> s3 \<noteq> s4 \<longrightarrow>
getVarBool s3 hands = ON)))"
apply(rule conjI)
apply(simp)
proof(rule allI; rule allI; rule impI)
fix s1 s2
assume req_prems: " substate s1 s2 \<and>
substate s2
(toEnv
(setPstate
(setVarAny s0 hands_value) Ctrl
waiting)) \<and>
toEnvP s1 \<and>
toEnvP s2 \<and>
toEnvNum s1 s2 = hands \<and>
getVarBool s1 hands = OFF \<and>
getVarBool s1 (Suc (Suc 0)) = OFF \<and>
getVarBool s2 hands = ON "
show " \<exists>s4. toEnvP s4 \<and>
substate s2 s4 \<and>
substate s4
(toEnv
(setPstate
(setVarAny s0 hands_value) Ctrl
waiting)) \<and>
toEnvNum s2 s4 \<le> hands \<and>
getVarBool s4 (Suc (Suc 0)) = ON \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s2 s3 \<and>
substate s3 s4 \<and>
s3 \<noteq> s4 \<longrightarrow>
getVarBool s3 hands = ON)"
proof cases
assume 1: "s2 = (toEnv
(setPstate
(setVarAny s0 hands_value) Ctrl
waiting))"
have " (toEnvP s2 \<and>
substate s2 s2 \<and>
substate s2
(toEnv
(setPstate
(setVarAny s0 hands_value) Ctrl
waiting)) \<and>
toEnvNum s2 s2 \<le> hands \<and>
getVarBool s2 (Suc (Suc 0)) = ON) \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s2 s3 \<and>
substate s3 s2 \<and>
s3 \<noteq> s2 \<longrightarrow>
getVarBool s3 hands = ON)"
proof
from 1 VC req_prems show "toEnvP s2 \<and>
substate s2 s2 \<and>
substate s2
(toEnv
(setPstate
(setVarAny s0 hands_value) Ctrl
waiting)) \<and>
toEnvNum s2 s2 \<le> hands \<and>
getVarBool s2 (Suc (Suc 0)) = ON" by auto
next
from substate_asym show " \<forall>s3. toEnvP s3 \<and>
substate s2 s3 \<and>
substate s3 s2 \<and> s3 \<noteq> s2 \<longrightarrow>
getVarBool s3 hands = ON"
by auto
qed
thus ?thesis by blast
next
assume 1: "s2 \<noteq> (toEnv
(setPstate
(setVarAny s0 hands_value) Ctrl
waiting))"
with req_prems have 2: "substate s2 s0"
by (simp split: if_splits)
from VC req_prems 2 obtain "\<exists>s4. toEnvP s4 \<and>
substate s2 s4 \<and>
substate s4 s0 \<and>
toEnvNum s2 s4 \<le> hands \<and>
getVarBool s4 (Suc (Suc 0)) = ON \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s2 s3 \<and>
substate s3 s4 \<and> s3 \<noteq> s4 \<longrightarrow>
getVarBool s3 hands = ON)"by auto
then obtain s4 where 3: "toEnvP s4 \<and>
substate s2 s4 \<and>
substate s4 s0 \<and>
toEnvNum s2 s4 \<le> hands \<and>
getVarBool s4 (Suc (Suc 0)) = ON \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s2 s3 \<and>
substate s3 s4 \<and> s3 \<noteq> s4 \<longrightarrow>
getVarBool s3 hands = ON)" ..
have "toEnvP s4 \<and>
substate s2 s4 \<and>
substate s4
(toEnv
(setPstate
(setVarAny s0 hands_value) Ctrl
waiting)) \<and>
toEnvNum s2 s4 \<le> hands \<and>
getVarBool s4 (Suc (Suc 0)) = ON \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s2 s3 \<and>
substate s3 s4 \<and> s3 \<noteq> s4 \<longrightarrow>
getVarBool s3 hands = ON)"
using 3 by auto
thus ?thesis ..
qed
qed
next
assume VC: "((toEnvP s0 \<and>
(\<forall>s1 s2.
substate s1 s2 \<and>
substate s2 s0 \<and>
toEnvP s1 \<and>
toEnvP s2 \<and>
toEnvNum s1 s2 = hands \<and>
getVarBool s1 hands = OFF \<and>
getVarBool s1 (Suc (Suc 0)) = OFF \<and>
getVarBool s2 hands = ON \<longrightarrow>
(\<exists>s4. toEnvP s4 \<and>
substate s2 s4 \<and>
substate s4 s0 \<and>
toEnvNum s2 s4 \<le> hands \<and>
getVarBool s4 (Suc (Suc 0)) = ON \<and>
(\<forall>s3. toEnvP s3 \<and>
substate s2 s3 \<and>
substate s3 s4 \<and> s3 \<noteq> s4 \<longrightarrow>
getVarBool s3 hands = ON)))) \<and>
extraInv s0) \<and>
env (setVarAny s0 hands_value) hands_value \<and>
getPstate (setVarAny s0 hands_value) Ctrl =
drying \<and>
getVarBool (setVarAny s0 hands_value) hands \<noteq>
ON \<and>
10 \<le> ltimeEnv (setVarAny s0 hands_value)
Ctrl"
print_state
with extra6 show "extraInv
(toEnv
(setPstate (setVarAny s0 hands_value) Ctrl
waiting))"
by (auto simp add: VC6_def)
qed
end
|
{"author": "ivchernenko", "repo": "post_vcgenerator", "sha": "fadfff131086870a027d6bd1c78b8d5a3baf183b", "save_path": "github-repos/isabelle/ivchernenko-post_vcgenerator", "path": "github-repos/isabelle/ivchernenko-post_vcgenerator/post_vcgenerator-fadfff131086870a027d6bd1c78b8d5a3baf183b/case-studies/HandDryer/Proof_1_6.thy"}
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module abstracts the ctypes EstimatorReplay interface for python."""
import copy
import ctypes
import re
import sys
import gflags
from makani.analysis.control.replay import estimator_replay as replay
from makani.control import control_types
import numpy as np
import scipy.io as sio
def LoadMessages(filename,
path='/messages/kAioNodeControllerA/kMessageTypeControlDebug',
flight=None):
"""Load ControlTelemetry messages for reprocessing the estimator.
Args:
filename: A string describing the full path to the HDF5 kite log file.
path: A string describing the HDF5 message path.
flight: A string describing the flight name (e.g., rpx02) or None.
Returns:
A ctypes array of ControlTelemetry messages.
"""
num_messages = replay.H5GetNumMessages(filename, path)
messages = (replay.ControlTelemetry * num_messages)()
replay.H5GetControlTelemetryMessages(filename, path, flight, num_messages,
messages)
return messages
class Estimator(object):
"""Python interface to EstimatorReplay."""
def __init__(self, name=None):
self.name = name
self._params = copy.deepcopy(replay.GetControlParams().contents.estimator)
self._fault_subsystems = set()
self._outputs = []
self.Reset()
def _SetParamByPath(self, params, path, value):
p = path.split('.', 1)
if len(p) == 1 and hasattr(params, p[0]):
setattr(params, p[0], value)
elif len(p) > 1 and hasattr(params, p[0]):
self._SetParamByPath(getattr(params, p[0]), p[1], value)
else:
raise ValueError('Invalid parameter path: ' + path)
def UpdateParam(self, path, value):
"""Update a parameter in EstimatorParams.
Args:
path: A string describing the dot path to the parameter.
value: A numerical value to assign.
"""
self._SetParamByPath(self._params, path, value)
def UpdateParams(self, params):
"""Update multiple parameters in EstimatorParams.
Args:
params: A dict mapping the parameter dot path to the assignment value.
"""
for k, v in params.iteritems():
self.UpdateParam(k, v)
def SaturateIndices(self, messages, first_index=0, last_index=None):
"""Ensure message indices are within the bounds of the messages array.
Args:
messages: A ctypes array of ControlTelemetry messages.
first_index: An integer describing the first index in the messages array.
last_index: An integer describing the last index in the messages array.
Returns:
Tuple (first_index, last_index), where first_index and last_index are on
interval [0, num_messages - 1].
"""
num_messages = len(messages)
if last_index is None:
last_index = num_messages - 1
first_index = max(0, min(first_index, num_messages - 1))
last_index = max(0, min(last_index, num_messages - 1))
return first_index, last_index
def Reset(self):
"""Reinitialize the estimator."""
self._outputs = []
self.ResetState(replay.FlightMode(), replay.EstimatorState())
replay.EstimatorReplayInit(ctypes.byref(self._params),
ctypes.byref(self._flight_mode),
ctypes.byref(self._state))
def ResetState(self, flight_mode, state):
"""Reset the estimator state.
Args:
flight_mode: A ctypes FlightMode enum value.
state: A ctypes EstimatorState structure.
"""
self._flight_mode = copy.deepcopy(flight_mode)
self._state = copy.deepcopy(state)
def ResetMessages(self, original, first_index, last_index, messages):
"""Reset the fault state of messages on interval [first_index, last_index].
Args:
original: A ctypes array of unmodified ControlTelemetry messages.
first_index: An integer describing the first index in the messages array.
last_index: An integer describing the last index in the messages array.
messages: A ctypes array of modified ControlTelemetry messages. This
function updates interval [first_index, last_index].
"""
first_index, last_index = self.SaturateIndices(original, first_index,
last_index)
# Reset all faults in all subsystems back to their original value.
subsystems = range(control_types.kNumSubsystems)
labels = self._GetSubsystemLabelsArray(subsystems)
replay.ClearControlTelemetryFaults(first_index, last_index, original,
len(labels), labels, 0xFFFFFFFF,
messages)
# Set all possible faults in selected subsystems.
set_fault_mask = 0xFFFFFFFF
labels = self._GetSubsystemLabelsArray(self._fault_subsystems)
replay.SetControlTelemetryFaults(first_index, last_index, len(labels),
labels, set_fault_mask, messages)
def _SubsystemsParameterToSet(self, subsystems):
"""Convert subsystems parameters to a set."""
if not isinstance(subsystems, (set, list)):
subsystems = [subsystems]
return set(subsystems)
def _GetSubsystemLabelsArray(self, subsystems):
"""Translate a list of subsystems to a ctypes SubsystemLabel array."""
subsystems = self._SubsystemsParameterToSet(subsystems)
labels = (replay.SubsystemLabel * len(subsystems))()
for i, subsys in enumerate(subsystems):
labels[i] = subsys
return labels
def ClearFaults(self, subsystems):
"""Clear faults for a list of subsystems."""
subsystems = self._SubsystemsParameterToSet(subsystems)
self._fault_subsystems -= subsystems
def ClearAllFaults(self):
"""Clear all faults."""
self.ClearFaults(range(control_types.kNumSubsystems))
def SetFaults(self, subsystems):
"""Set faults for a list of subsystems."""
subsystems = self._SubsystemsParameterToSet(subsystems)
self._fault_subsystems |= subsystems
def SetAllFaults(self):
"""Set faults in all controller subsystems."""
self.SetFaults(range(control_types.kNumSubsystems))
def ClearImuAccelGyroFaults(self):
"""Clear IMU accelerometer and gyro subsystem faults."""
subsystems = [control_types.kSubsysImuAAcc,
control_types.kSubsysImuAGyro,
control_types.kSubsysImuBAcc,
control_types.kSubsysImuBGyro,
control_types.kSubsysImuCAcc,
control_types.kSubsysImuCGyro]
self.ClearFaults(subsystems)
def SetGpsCrosswindFaults(self):
"""Set faults in the GPS subsystem for the crosswind antenna."""
subsystems = [control_types.kSubsysWingGpsCrosswindPos,
control_types.kSubsysWingGpsCrosswindVel]
self.SetFaults(subsystems)
def SetGpsHoverFaults(self):
"""Set faults in the GPS subsystem for the hover antenna."""
subsystems = [control_types.kSubsysWingGpsHoverPos,
control_types.kSubsysWingGpsHoverVel]
self.SetFaults(subsystems)
def SetGpsPortFaults(self):
"""Set faults in the GPS subsystem for the port wingtip antenna."""
subsystems = [control_types.kSubsysWingGpsPortPos,
control_types.kSubsysWingGpsPortVel]
self.SetFaults(subsystems)
def SetGpsStarboardFaults(self):
"""Set faults in the GPS subsystem for the starboard wingtip antenna."""
subsystems = [control_types.kSubsysWingGpsStarPos,
control_types.kSubsysWingGpsStarVel]
self.SetFaults(subsystems)
def SetGpsFaults(self):
"""Set faults in all wing GPS subsystems."""
self.SetGpsCrosswindFaults()
self.SetGpsHoverFaults()
self.SetGpsPortFaults()
self.SetGpsStarboardFaults()
def SetGsGpsFaults(self):
"""Set faults in the ground station GPS subsystem."""
subsystems = [control_types.kSubsysGsCompass,
control_types.kSubsysGsGpsPos,
control_types.kSubsysGsGpsVel]
self.SetFaults(subsystems)
def SetGsgFaults(self):
"""Set faults in the ground side gimble subsystems."""
subsystems = [control_types.kSubsysGsgAAzi,
control_types.kSubsysGsgAEle,
control_types.kSubsysGsgBAzi,
control_types.kSubsysGsgBEle]
self.SetFaults(subsystems)
def SetGlasFaults(self):
"""Set faults in the ground line angle sensing subsystems."""
self.SetGsgFaults()
self.SetLevelwindFaults()
self.SetLoadcellFaults()
self.SetPerchAziFaults()
def SetLevelwindFaults(self):
"""Set faults in the levelwind subsystems."""
subsystems = [control_types.kSubsysLevelwindEleA,
control_types.kSubsysLevelwindEleB]
self.SetFaults(subsystems)
def SetLoadcellFaults(self):
"""Set faults in the loadcell subsystems."""
subsystems = [control_types.kSubsysLoadcellSensorPort0,
control_types.kSubsysLoadcellSensorPort1,
control_types.kSubsysLoadcellSensorStarboard0,
control_types.kSubsysLoadcellSensorStarboard1]
self.SetFaults(subsystems)
def SetMagFaults(self):
"""Set faults in the magnetometer subsystems."""
subsystems = [control_types.kSubsysImuAMag,
control_types.kSubsysImuBMag,
control_types.kSubsysImuCMag]
self.SetFaults(subsystems)
def SetPerchAziFaults(self):
"""Set faults in the perch azimuth subsystems."""
subsystems = [control_types.kSubsysPerchAziA,
control_types.kSubsysPerchAziB]
self.SetFaults(subsystems)
def SetPitotFaults(self):
"""Set faults in the pitot tube subsystems."""
subsystems = [control_types.kSubsysPitotSensorHighSpeedAlpha,
control_types.kSubsysPitotSensorHighSpeedBeta,
control_types.kSubsysPitotSensorHighSpeedDynamic,
control_types.kSubsysPitotSensorHighSpeedStatic,
control_types.kSubsysPitotSensorLowSpeedAlpha,
control_types.kSubsysPitotSensorLowSpeedBeta,
control_types.kSubsysPitotSensorLowSpeedDynamic,
control_types.kSubsysPitotSensorLowSpeedStatic]
self.SetFaults(subsystems)
def SetWeatherFaults(self):
"""Set faults in the weather subsystems."""
subsystems = [control_types.kSubsysWeather]
self.SetFaults(subsystems)
def SetWindFaults(self):
"""Set faults in the wind subsystems."""
subsystems = [control_types.kSubsysWindSensor]
self.SetFaults(subsystems)
def Iterate(self, messages, first_index, last_index, states, estimates):
"""Iterate the state estimate from first_index to last_index.
Args:
messages: A ctypes array of ControlTelemetry messages to process.
first_index: An integer describing the first index to process.
last_index: An integer describing the last index to process.
states: A ctypes array of EstimatorStates output states, equal in length
to the messages array.
estimates: A ctypes array of StateEstimate output estimates, equal in
length to the messages array.
"""
assert first_index <= last_index
first_index, last_index = self.SaturateIndices(messages, first_index,
last_index)
replay.EstimatorReplayIterateArray(ctypes.byref(self._params),
first_index, last_index, messages,
ctypes.byref(self._flight_mode),
ctypes.byref(self._state), states,
estimates)
def IterateSegment(self, flight_mode_z1, state_z1, first_index, last_index,
messages, modified_messages, states, estimates):
self.ResetState(flight_mode_z1, state_z1)
self.ResetMessages(messages, first_index, last_index, modified_messages)
self.Iterate(modified_messages, first_index, last_index, states, estimates)
def ComputeOutputs(self, messages, first_index, last_index, states,
estimates):
"""Compute estimator outputs from first_index to last_index.
Note that this function also stores the outputs in an array for each
interval. Use property 'output' to access this array.
Args:
messages: A ctypes array of ControlTelemetry messages to process.
first_index: An integer describing the first index to process.
last_index: An integer describing the last index to process.
states: A ctypes array of EstimatorStates output states, equal in length
to the messages array.
estimates: A ctypes array of StateEstimate output estimates, equal in
length to the messages array.
Returns:
An EstimatorOutput object.
"""
output = EstimatorOutput(self.initializing, messages, states, estimates,
first_index, last_index)
self._outputs.append(output)
return output
def ComputeErrorMetrics(self, references):
return [ErrorMetrics(o, r) for o, r in zip(self._outputs, references)]
@property
def params(self):
return self._params
@property
def flight_mode(self):
return copy.deepcopy(self._flight_mode)
@property
def state(self):
return copy.deepcopy(self._state)
@property
def initializing(self):
return replay.GetEstimatorTelemetry().contents.initializing
@property
def outputs(self):
return self._outputs
@property
def output_interval(self):
return max([o.segment_time for o in self._outputs])
class EstimatorMetrics(object):
"""Base class to store estimator outputs over a given interval."""
def __init__(self, messages, first_index, last_index):
self._first_index = first_index
self._last_index = last_index
self._indices = range(first_index, last_index + 1)
self._num_messages = last_index - first_index + 1
self._valid = np.zeros(self._num_messages, dtype=bool)
self._time = np.array([messages[i].time for i in self._indices])
self._position = np.zeros((self._num_messages, 3))
self._velocity = np.zeros((self._num_messages, 3))
self._attitude = np.zeros((self._num_messages, 3, 3))
self._gyro_bias = np.zeros((self._num_messages, 3))
self._flight_modes = np.unique([messages[i].flight_mode
for i in self._indices])
def SetValid(self, valid):
if isinstance(valid, bool):
self._valid = valid * np.ones(self._num_messages, dtype=bool)
else:
self._valid = valid
def SetPosition(self, position):
self._position = position
def SetVelocity(self, velocity):
self._velocity = velocity
def SetAttitude(self, attitude):
self._attitude = attitude
def SetGyroBias(self, gyro_bias):
self._gyro_bias = gyro_bias
@property
def first_index(self):
return self._first_index
@property
def last_index(self):
return self._last_index
@property
def indices(self):
return self._indices
@property
def num_messages(self):
return self._num_messages
@property
def segment_time(self):
return self._num_messages * replay.GetSystemParams().contents.ts
@property
def valid(self):
return self._valid
@property
def position(self):
return self._position
@property
def velocity(self):
return self._velocity
@property
def attitude(self):
return self._attitude
@property
def gyro_bias(self):
return self._gyro_bias
@property
def time(self):
return self._time
@property
def flight_modes(self):
return self._flight_modes
class EstimatorOutput(EstimatorMetrics):
"""Store the estimator outputs."""
def __init__(self, initializing, messages, states, estimates, first_index,
last_index):
super(EstimatorOutput, self).__init__(messages, first_index, last_index)
self.SetPosition(self.ExtractPositionEstimate(estimates))
self.SetVelocity(self.ExtractVelocityEstimate(estimates))
self.SetAttitude(self.ExtractAttitudeEstimate(estimates))
self.SetGyroBias(self.ExtractGyroBiasEstimate(states))
self.SetValid(not initializing)
def ExtractPositionEstimate(self, estimates):
"""Extract the estimator position estimates."""
position = np.zeros((self.num_messages, 3))
for i in xrange(self.num_messages):
m = self.first_index + i
position[i, 0] = estimates[m].Xg.x
position[i, 1] = estimates[m].Xg.y
position[i, 2] = estimates[m].Xg.z
return position
def ExtractVelocityEstimate(self, estimates):
"""Extract the estimator velocity estimates."""
velocity = np.zeros((self.num_messages, 3))
for i in xrange(self.num_messages):
m = self.first_index + i
velocity[i, 0] = estimates[m].Vg.x
velocity[i, 1] = estimates[m].Vg.y
velocity[i, 2] = estimates[m].Vg.z
return velocity
def ExtractAttitudeEstimate(self, estimates):
"""Extract the estimator attitude estimates."""
attitude = np.zeros((self.num_messages, 3, 3))
for i in xrange(self.num_messages):
m = self.first_index + i
for j in range(3):
for k in range(3):
attitude[i, j, k] = estimates[m].dcm_g2b.d[j][k]
return attitude
def ExtractGyroBiasEstimate(self, states):
"""Extract the estimator gyro bias estimates."""
gyro_bias = np.zeros((self.num_messages, 3))
for i in xrange(self.num_messages):
m = self.first_index + i
imu = states[m].nav.last_used_imu
gyro_bias[i, 0] = states[m].nav.attitude[imu].filter.gyro_bias.x
gyro_bias[i, 1] = states[m].nav.attitude[imu].filter.gyro_bias.y
gyro_bias[i, 2] = states[m].nav.attitude[imu].filter.gyro_bias.z
return gyro_bias
class ErrorMetrics(object):
"""Compute error between two EstimatorMetrics objects."""
def __init__(self, a, b):
"""Instantiate an ErrorMetrics object.
Args:
a: An EstimatorMetrics object.
b: An EstimatorMetrics object.
"""
assert a.first_index == b.first_index
assert a.last_index == b.last_index
# Compute error over valid trajectory indices.
ii = np.where(a.valid)[0]
ii = ii[b.valid[ii]]
# Compute position error.
self._position_error = np.linalg.norm(a.position[ii] - b.position[ii],
axis=1)
# Compute velocity error.
self._velocity_error = np.linalg.norm(a.velocity[ii] - b.velocity[ii],
axis=1)
# Compute attitude error as the norm of the small angle rotation vector.
attitude_error = np.zeros((len(a.attitude), 3))
for i in ii:
dcm_a = np.matrix(a.attitude[i])
dcm_b = np.matrix(b.attitude[i])
delta = dcm_b.transpose() * dcm_a
attitude_error[i, 0] = -delta[1, 2]
attitude_error[i, 1] = delta[0, 2]
attitude_error[i, 2] = -delta[0, 1]
self._attitude_error = np.linalg.norm(attitude_error[ii], axis=1)
# Compute gyro bias error.
self._gyro_bias_error = np.linalg.norm(a.gyro_bias[ii] - b.gyro_bias[ii],
axis=1)
# Store time and flight modes. These quantities should be common.
self._time = a.time[ii]
self._flight_modes = a.flight_modes
@property
def flight_modes(self):
return self._flight_modes
@property
def time(self):
return self._time
@property
def position_error(self):
return self._position_error
@property
def position_mae(self):
return np.sum(np.abs(self._position_error)) / len(self._position_error)
@property
def position_maxe(self):
return np.max(self._position_error)
@property
def position_rmse(self):
return np.std(self._position_error)
@property
def velocity_error(self):
return self._velocity_error
@property
def velocity_mae(self):
return np.sum(np.abs(self._velocity_error)) / len(self._velocity_error)
@property
def velocity_maxe(self):
return np.max(self._velocity_error)
@property
def velocity_rmse(self):
return np.std(self._velocity_error)
@property
def attitude_error(self):
return self._attitude_error
@property
def attitude_mae(self):
return np.sum(np.abs(self._attitude_error)) / len(self._attitude_error)
@property
def attitude_maxe(self):
return np.max(self._attitude_error)
@property
def attitude_rmse(self):
return np.std(self._attitude_error)
@property
def gyro_bias_error(self):
return self._gyro_bias_error
@property
def gyro_bias_mae(self):
return np.sum(np.abs(self._gyro_bias_error)) / len(self._gyro_bias_error)
@property
def gyro_bias_maxe(self):
return np.max(self._gyro_bias_error)
@property
def gyro_bias_rmse(self):
return np.std(self._gyro_bias_error)
def ComputeCdf(error_metrics, attribute):
x = np.sort(np.array([getattr(e, attribute) for e in error_metrics
if len(e.time)]))
y = np.linspace(0.0, 1.0, len(x))
return x, y
def ComputeEstimatorErrorCdfs(ref_estimator, test_estimators, t0=-float('inf'),
t1=float('inf')):
"""Compute error CDFs by comparing each test estimator against the reference.
Args:
ref_estimator: An Estimator object.
test_estimators: A list of Estimator objects.
t0: A float describing the minimum time to consider.
t1: A float describing the maximum time to consider.
Returns:
A dict that maps the test estimator name to its error metric CDFs.
"""
output = {}
for est in test_estimators:
error_metrics = est.ComputeErrorMetrics(ref_estimator.outputs)
error_metrics = [o for o in error_metrics
if o.time.size > 0 and t0 <= np.min(o.time)
and np.max(o.time) <= t1]
pos_maxe, prob = ComputeCdf(error_metrics, 'position_maxe')
pos_mae, _ = ComputeCdf(error_metrics, 'position_mae')
pos_rmse, _ = ComputeCdf(error_metrics, 'position_rmse')
vel_maxe, _ = ComputeCdf(error_metrics, 'velocity_maxe')
vel_mae, _ = ComputeCdf(error_metrics, 'velocity_mae')
vel_rmse, _ = ComputeCdf(error_metrics, 'velocity_rmse')
att_maxe, _ = ComputeCdf(error_metrics, 'attitude_maxe')
att_mae, _ = ComputeCdf(error_metrics, 'attitude_mae')
att_rmse, _ = ComputeCdf(error_metrics, 'attitude_rmse')
bg_maxe, _ = ComputeCdf(error_metrics, 'gyro_bias_maxe')
bg_mae, _ = ComputeCdf(error_metrics, 'gyro_bias_mae')
bg_rmse, _ = ComputeCdf(error_metrics, 'gyro_bias_rmse')
output[est.name] = {
'name': est.name,
'prob': prob,
'pos_maxe': pos_maxe,
'pos_mae': pos_mae,
'pos_rmse': pos_rmse,
'vel_maxe': vel_maxe,
'vel_mae': vel_mae,
'vel_rmse': vel_rmse,
'att_maxe': att_maxe,
'att_mae': att_mae,
'att_rmse': att_rmse,
'bg_maxe': bg_maxe,
'bg_mae': bg_mae,
'bg_rmse': bg_rmse,
}
return output
def SaveEstimatorErrorCdfsToMatFile(output, filename):
# Replace invalid variable name characters with an underscore.
mat = {re.sub(r'[^(A-Za-z0-9_)]', r'_', k): v for k, v in output.iteritems()}
sio.savemat(filename, mat)
def ProcessEstimatorSegments(messages, increment, seg_length, ref_estimator,
test_estimators):
"""Periodically process test estimator segments from the reference estimator.
This function helps understand the relative performance between two or more
estimator configurations. It iterates the reference estimator forward in steps
of 'increment' messages. At each increment, it iterates all estimators for
'seg_length' messages from the current reference estimator state. Each
estimator then stores its output trajectory within its own object structure.
Args:
messages: A ctypes array of ControlTelemetry messages.
increment: An integer number of messages to iterate between each segment.
seg_length: An integer number of messages to iterate for each segment.
ref_estimator: An Estimator object.
test_estimators: A list of Estimator objects.
"""
assert increment > 0
assert seg_length > 0
# Allocate memory.
num_messages = len(messages)
states = (replay.EstimatorState * num_messages)()
estimates = (replay.StateEstimate * num_messages)()
modified_messages = copy.deepcopy(messages)
num_segments = (num_messages + increment - 1) / increment
# Set initial state and clear previous outputs.
ref_estimator.Reset()
for est in test_estimators:
est.Reset()
first_index_z1 = 0
flight_mode_z1 = ref_estimator.flight_mode
state_z1 = ref_estimator.state
# Iterate for each increment.
for segment in range(num_segments):
first_index = segment * increment
last_index = min(segment * increment + seg_length, num_messages) - 1
# Advance reference estimator to the segment start.
if first_index_z1 < first_index - 1:
ref_estimator.IterateSegment(flight_mode_z1, state_z1, first_index_z1,
first_index - 1, messages, modified_messages,
states, estimates)
first_index_z1 = first_index
flight_mode_z1 = ref_estimator.flight_mode
state_z1 = ref_estimator.state
# Iterate reference estimator over the current segment.
ref_estimator.IterateSegment(flight_mode_z1, state_z1, first_index,
last_index, messages, modified_messages,
states, estimates)
ref_estimator.ComputeOutputs(modified_messages, first_index, last_index,
states, estimates)
# Iterate test configurations over the current segment.
for est in test_estimators:
est.IterateSegment(flight_mode_z1, state_z1, first_index, last_index,
messages, modified_messages, states, estimates)
est.ComputeOutputs(modified_messages, first_index, last_index, states,
estimates)
def CreatePureInertialScenario(ref_estimator, name='Pure inertial'):
est = copy.deepcopy(ref_estimator)
est.name = name
est.SetAllFaults()
est.ClearImuAccelGyroFaults()
return est
def CreateGpsDropoutScenario(ref_estimator, name='Full GPS dropout'):
est = copy.deepcopy(ref_estimator)
est.name = name
est.SetGpsFaults()
return est
def main(argv):
"""Implement a simple demo for computing error CDFs."""
# Input/output flags.
gflags.DEFINE_string('input_file', None, 'Full path to wing HDF5 log file.')
gflags.MarkFlagAsRequired('input_file')
gflags.DEFINE_string('output_file', None, 'Full path to output MAT file.')
gflags.MarkFlagAsRequired('output_file')
# Segment processing flags.
gflags.DEFINE_integer('increment', 100,
'Integer number of messages between segments.')
gflags.DEFINE_integer('seg_length', 1000,
'Integer number of messages in each segment.')
# Evaluate segments over a specific time interval.
gflags.DEFINE_float('start_time', -float('inf'),
'Start time to evaluate segment errors.')
gflags.DEFINE_float('end_time', float('inf'),
'End time to evaluate segment errors.')
# Override default parameters.
gflags.DEFINE_list('params', [],
'A comma-separated list of param=value tokens, where '
'each param describes the dot path to a parameter in '
'EstimatorParams.')
gflags.RegisterValidator('params',
lambda l: all(len(s.split('=')) == 2 for s in l),
message='Invalid key=value parameter syntax.')
# Scenarios to process.
gflags.DEFINE_bool('scenario_pure_inertial', False,
'Process pure inertial scenario.')
gflags.DEFINE_bool('scenario_gps_dropout', False,
'Process GPS dropout scenario.')
# Common faults to introduce.
gflags.DEFINE_bool('fault_weather', False,
'Fault weather subsystems to avoid an assert when '
'reprocessing historical data.')
gflags.DEFINE_bool('fault_glas', False, 'Fault GLAS subsystems.')
# Specify flight for special handling.
gflags.DEFINE_string('flight', None,
'Fix known issues associated with the given flight.')
try:
argv = gflags.FLAGS(argv)
except gflags.FlagsError, e:
print '{}\nUsage: {} ARGS\n{}'.format(e, sys.argv[0], gflags.FLAGS)
sys.exit(1)
flags = gflags.FLAGS
ref_estimator = Estimator('Reference')
if flags.fault_glas:
ref_estimator.SetGlasFaults()
if flags.fault_weather:
ref_estimator.SetWeatherFaults()
for param_value in flags.params:
param, value = param_value.split('=', 1)
ref_estimator.UpdateParam(param, float(value))
test_estimators = []
if flags.scenario_pure_inertial:
test_estimators.append(CreatePureInertialScenario(ref_estimator))
if flags.scenario_gps_dropout:
test_estimators.append(CreateGpsDropoutScenario(ref_estimator))
messages = LoadMessages(flags.input_file, flight=flags.flight)
ProcessEstimatorSegments(messages, flags.increment, flags.seg_length,
ref_estimator, test_estimators)
output = ComputeEstimatorErrorCdfs(ref_estimator, test_estimators,
t0=flags.start_time, t1=flags.end_time)
SaveEstimatorErrorCdfsToMatFile(output, flags.output_file)
if __name__ == '__main__':
main(sys.argv)
|
{"hexsha": "31b2f46e7786004b90af6206c189ee907be116a8", "size": 29994, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/control/replay/estimator_helper.py", "max_stars_repo_name": "leozz37/makani", "max_stars_repo_head_hexsha": "c94d5c2b600b98002f932e80a313a06b9285cc1b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1178, "max_stars_repo_stars_event_min_datetime": "2020-09-10T17:15:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:59:35.000Z", "max_issues_repo_path": "analysis/control/replay/estimator_helper.py", "max_issues_repo_name": "leozz37/makani", "max_issues_repo_head_hexsha": "c94d5c2b600b98002f932e80a313a06b9285cc1b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-22T05:22:35.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-22T05:22:35.000Z", "max_forks_repo_path": "analysis/control/replay/estimator_helper.py", "max_forks_repo_name": "leozz37/makani", "max_forks_repo_head_hexsha": "c94d5c2b600b98002f932e80a313a06b9285cc1b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 107, "max_forks_repo_forks_event_min_datetime": "2020-09-10T17:29:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T09:00:14.000Z", "avg_line_length": 35.0397196262, "max_line_length": 80, "alphanum_fraction": 0.6836033873, "include": true, "reason": "import numpy,import scipy", "num_tokens": 7056}
|
[STATEMENT]
lemma sinvar_mono_I_proofrule_simple:
"\<lbrakk> (\<forall> G nP. sinvar G nP = (\<forall> (e1, e2) \<in> edges G. P e1 e2 nP) ) \<rbrakk> \<Longrightarrow> sinvar_mono"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>G nP. sinvar G nP = (\<forall>(e1, e2)\<in>edges G. P e1 e2 nP) \<Longrightarrow> sinvar_mono
[PROOF STEP]
apply(simp add: sinvar_mono_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>G nP. sinvar G nP = (\<forall>x\<in>edges G. case x of (e1, e2) \<Rightarrow> P e1 e2 nP) \<Longrightarrow> \<forall>nP N E'. (\<exists>E. wf_graph \<lparr>nodes = N, edges = E\<rparr> \<and> E' \<subseteq> E \<and> (\<forall>x\<in>E. case x of (e1, e2) \<Rightarrow> P e1 e2 nP)) \<longrightarrow> (\<forall>x\<in>E'. case x of (e1, e2) \<Rightarrow> P e1 e2 nP)
[PROOF STEP]
apply(clarify)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>nP N E' a b E. \<lbrakk>\<forall>G nP. sinvar G nP = (\<forall>(e1, e2)\<in>edges G. P e1 e2 nP); (a, b) \<in> E'; wf_graph \<lparr>nodes = N, edges = E\<rparr>; E' \<subseteq> E; \<forall>(e1, e2)\<in>E. P e1 e2 nP\<rbrakk> \<Longrightarrow> P a b nP
[PROOF STEP]
apply(fast)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 559, "file": "Network_Security_Policy_Verification_TopoS_withOffendingFlows", "length": 4}
|
from contextlib import ExitStack as does_not_raise # noqa: N813
import numpy as np
import pandas as pd
import pytest
from sid.msm import _flatten_index
from sid.msm import _harmonize_input
from sid.msm import _is_diagonal
from sid.msm import get_diag_weighting_matrix
from sid.msm import get_flat_moments
from sid.msm import get_msm_func
def dummy_simulate(_params): # noqa: U101
return pd.Series([1, 2])
def dummy_calc_moments(df):
return df
@pytest.mark.end_to_end
def test_estimation_with_msm():
s = pd.Series([1, 2])
msm_func = get_msm_func(
simulate=dummy_simulate,
calc_moments=dummy_calc_moments,
empirical_moments=s,
replace_nans=lambda x: x,
additional_outputs={"sr": lambda x: x},
)
result = msm_func(None)
expected = {
"value": 0,
"root_contributions": pd.Series([0.0, 0.0], ["0_0", "0_1"]),
"empirical_moments": {0: s},
"simulated_moments": {0: s},
"sr": pd.Series([1, 2]),
}
for k, v in result.items():
if k == "value":
assert v == expected[k]
else:
if isinstance(v, dict):
for kk, vv in v.items():
vv.equals(expected[k][kk])
else:
v.equals(expected[k])
@pytest.mark.integration
@pytest.mark.parametrize(
"empirical_moments, weights, expected",
[({"a": pd.Series([1]), "b": pd.Series([2])}, None, np.eye(2))],
)
def test_get_diag_weighting_matrix(empirical_moments, weights, expected):
result = get_diag_weighting_matrix(empirical_moments, weights)
assert np.all(result == expected)
@pytest.mark.integration
def test_get_diag_weighting_matrix_with_scalar_weights():
emp_moms = {0: pd.Series([1, 2]), 1: pd.Series([2, 3, 4])}
weights = {0: 0.3, 1: 0.7}
result = get_diag_weighting_matrix(emp_moms, weights)
expected = np.diag([0.3] * 2 + [0.7] * 3)
assert np.all(result == expected)
@pytest.mark.integration
@pytest.mark.parametrize(
"moments, expected",
[
({0: pd.Series([1]), 1: pd.Series([2])}, pd.Series([1, 2], ["0_0", "1_0"])),
(
{"a": pd.DataFrame([[1, 2]], columns=["b", "c"])},
pd.Series([1, 2], ["a_b_0", "a_c_0"]),
),
],
)
def test_get_flat_moments(moments, expected):
result = get_flat_moments(moments)
assert result.equals(expected)
def _func(): # pragma: no cover
pass
@pytest.mark.unit
@pytest.mark.parametrize(
"data, expectation, expected",
[
(pd.Series([1]), does_not_raise(), {0: pd.Series([1])}),
(pd.DataFrame([[1]]), does_not_raise(), {0: pd.DataFrame([[1]])}),
(_func, does_not_raise(), {0: _func}),
({1: 2}, does_not_raise(), {1: 2}),
({1, 2}, pytest.raises(ValueError, match="Moments must be"), None),
],
)
def test_harmonize_input(data, expectation, expected):
with expectation:
result = _harmonize_input(data)
for k, v in result.items():
if isinstance(v, (pd.Series, pd.DataFrame)):
assert v.equals(expected[k])
else:
assert result == expected
@pytest.mark.unit
def test_flatten_index():
data = {
"a": pd.Series(data=[1]),
"b": pd.Series(index=["b"], data=[2]),
"c": pd.DataFrame({"c": [3]}),
"d": pd.Series(data=[4], name="e", index=[4]),
}
result = _flatten_index(data)
expected = pd.Series(index=["a_0", "b_b", "c_c_0", "d_4"], data=[1, 2, 3, 4])
assert result.equals(expected)
@pytest.mark.unit
@pytest.mark.parametrize(
"mat, expected", [(np.arange(4).reshape(2, 2), False), (np.eye(2), True)]
)
def test_is_diagonal(mat, expected):
result = _is_diagonal(mat)
assert result == expected
|
{"hexsha": "15dea6b997cdf3f126269b3a6e883217dcfdae17", "size": 3776, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_msm.py", "max_stars_repo_name": "covid-19-impact-lab/sid", "max_stars_repo_head_hexsha": "d867f55d4d005b01c672bd2edd0e1dc974cb182b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2020-04-18T09:18:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-19T02:42:39.000Z", "max_issues_repo_path": "tests/test_msm.py", "max_issues_repo_name": "covid-19-impact-lab/sid", "max_issues_repo_head_hexsha": "d867f55d4d005b01c672bd2edd0e1dc974cb182b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 143, "max_issues_repo_issues_event_min_datetime": "2020-04-18T16:58:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T22:16:03.000Z", "max_forks_repo_path": "tests/test_msm.py", "max_forks_repo_name": "covid-19-impact-lab/sid", "max_forks_repo_head_hexsha": "d867f55d4d005b01c672bd2edd0e1dc974cb182b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-07T07:38:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-07T07:38:53.000Z", "avg_line_length": 28.3909774436, "max_line_length": 84, "alphanum_fraction": 0.6003707627, "include": true, "reason": "import numpy", "num_tokens": 1064}
|
"""
This module is a part of system for the automatic enrichment
of a WordNet-like taxonomy.
Copyright 2020 Ivan Bondarenko, Tatiana Batura
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import ArgumentParser
import codecs
from datetime import datetime
import json
import multiprocessing
import os
import random
import nltk
import numpy as np
from hyponyms_loading import load_terms_for_submission, inflect_terms_for_submission
from text_processing import load_news, load_wiki, prepare_senses_index_for_search
from text_processing import calculate_sense_occurrences_in_texts, join_sense_occurrences_in_texts
from text_processing import load_sense_occurrences_in_texts
N_MAX_SENTENCES_PER_MORPHO = 5
MIN_SENTENCE_LENGTH = 7
MAX_SENTENCE_LENGTH = 30
def main():
random.seed(142)
np.random.seed(142)
parser = ArgumentParser()
parser.add_argument('-d', '--data', dest='data_source', type=str, required=True,
choices=['wiki', 'news', 'librusec'],
help='A data source kind (wiki, news or librusec), prepared for '
'the Taxonomy Enrichment competition.')
parser.add_argument('-p', '--path', dest='source_path', required=True, type=str,
help='Path to the source data file or directory.')
parser.add_argument('-j', '--json', dest='json_file', type=str, required=True,
help='The JSON file with found contexts of terms for submission.')
parser.add_argument('-b', '--public', dest='public_data', type=str, required=True,
help='A text file with a list of unseen hyponyms for public submission.')
parser.add_argument('-r', '--private', dest='private_data', type=str, required=True,
help='A text file with a list of unseen hyponyms for private submission.')
parser.add_argument('-t', '--track', dest='track_name', type=str, required=True, choices=['nouns', 'verbs'],
help='A competition track name (nouns or verbs).')
args = parser.parse_args()
nltk.download('punkt')
public_data_name = os.path.normpath(args.public_data)
assert os.path.isfile(public_data_name)
private_data_name = os.path.normpath(args.private_data)
assert os.path.isfile(private_data_name)
data_for_public_submission = load_terms_for_submission(public_data_name)
print('Number of hyponyms for public submission is {0}.'.format(len(data_for_public_submission)))
data_for_private_submission = load_terms_for_submission(private_data_name)
print('Number of hyponyms for private submission is {0}.'.format(len(data_for_private_submission)))
print('')
full_path = os.path.normpath(args.source_path)
if args.data_source == "news":
assert os.path.isdir(full_path), 'The directory "{0}" does not exist!'.format(full_path)
else:
assert os.path.isfile(full_path), 'The file "{0}" does not exist!'.format(full_path)
result_file_name = os.path.normpath(args.json_file)
result_file_dir = os.path.dirname(result_file_name)
if len(result_file_dir) > 0:
assert os.path.isdir(result_file_dir), 'The directory "{0}" does not exist!'.format(result_file_dir)
assert not os.path.isdir(result_file_name), '"{0}" is a directory, but a file is expected.'.format(result_file_name)
senses = inflect_terms_for_submission(data_for_public_submission + data_for_private_submission,
"NOUN" if args.track_name == 'nouns' else "VERB")
print("All terms for submission have been inflected using the PyMorphy2.")
print("")
search_index = prepare_senses_index_for_search(senses)
if os.path.isfile(result_file_name):
all_occurrences_of_senses = load_sense_occurrences_in_texts(result_file_name)
else:
all_occurrences_of_senses = dict()
generator = load_news(full_path) if args.data_source == "news" else load_wiki(full_path)
counter = 0
n_processes = os.cpu_count()
if n_processes > 1:
pool = multiprocessing.Pool(processes=n_processes)
else:
pool = None
max_buffer_size = 30000 * max(1, n_processes)
buffer = []
for new_text in generator:
buffer.append(new_text)
if len(buffer) >= max_buffer_size:
if pool is None:
new_occurrences_of_senses = calculate_sense_occurrences_in_texts(
source_texts=buffer, senses_dict=senses, search_index_for_senses=search_index,
min_sentence_length=MIN_SENTENCE_LENGTH, max_sentence_length=MAX_SENTENCE_LENGTH,
n_sentences_per_morpho=N_MAX_SENTENCES_PER_MORPHO
)
else:
n_data_part = int(np.ceil(len(buffer) / float(n_processes)))
parts_of_buffer = [(buffer[(idx * n_data_part):((idx + 1) * n_data_part)], senses, search_index,
N_MAX_SENTENCES_PER_MORPHO, MIN_SENTENCE_LENGTH, MAX_SENTENCE_LENGTH)
for idx in range(n_processes - 1)]
parts_of_buffer.append((buffer[((n_processes - 1) * n_data_part):], senses, search_index,
N_MAX_SENTENCES_PER_MORPHO, MIN_SENTENCE_LENGTH, MAX_SENTENCE_LENGTH))
parts_of_result = list(pool.starmap(calculate_sense_occurrences_in_texts, parts_of_buffer))
new_occurrences_of_senses = join_sense_occurrences_in_texts(parts_of_result, N_MAX_SENTENCES_PER_MORPHO)
del parts_of_buffer, parts_of_result
all_occurrences_of_senses = join_sense_occurrences_in_texts(
[all_occurrences_of_senses, new_occurrences_of_senses],
N_MAX_SENTENCES_PER_MORPHO
)
del new_occurrences_of_senses
counter += len(buffer)
buffer.clear()
print("{0}: {1} texts have been processed.".format(
datetime.now().strftime("%A, %d %B %Y, %I:%M %p"), counter
))
print(' {0} terms (senses) from {1} have been found.'.format(len(all_occurrences_of_senses), len(senses)))
if len(buffer) > 0:
if pool is None:
new_occurrences_of_senses = calculate_sense_occurrences_in_texts(
source_texts=buffer, senses_dict=senses, search_index_for_senses=search_index,
min_sentence_length=MIN_SENTENCE_LENGTH, max_sentence_length=MAX_SENTENCE_LENGTH,
n_sentences_per_morpho=N_MAX_SENTENCES_PER_MORPHO
)
else:
n_data_part = int(np.ceil(len(buffer) / float(n_processes)))
parts_of_buffer = [(buffer[(idx * n_data_part):((idx + 1) * n_data_part)], senses, search_index,
N_MAX_SENTENCES_PER_MORPHO, MIN_SENTENCE_LENGTH, MAX_SENTENCE_LENGTH)
for idx in range(n_processes - 1)]
parts_of_buffer.append((buffer[((n_processes - 1) * n_data_part):], senses, search_index,
N_MAX_SENTENCES_PER_MORPHO, MIN_SENTENCE_LENGTH, MAX_SENTENCE_LENGTH))
parts_of_result = list(pool.starmap(calculate_sense_occurrences_in_texts, parts_of_buffer))
new_occurrences_of_senses = join_sense_occurrences_in_texts(parts_of_result, N_MAX_SENTENCES_PER_MORPHO)
all_occurrences_of_senses = join_sense_occurrences_in_texts(
[all_occurrences_of_senses, new_occurrences_of_senses],
N_MAX_SENTENCES_PER_MORPHO
)
del new_occurrences_of_senses
counter += len(buffer)
print("{0}: {1} texts have been processed.".format(
datetime.now().strftime("%A, %d %B %Y, %I:%M:%S %p"), counter
))
print(' {0} terms (senses) from {1} have been found.'.format(len(all_occurrences_of_senses), len(senses)))
with codecs.open(filename=result_file_name, mode="w", encoding="utf-8", errors="ignore") as fp:
json.dump(all_occurrences_of_senses, fp, ensure_ascii=False, indent=4, sort_keys=True)
if __name__ == '__main__':
main()
|
{"hexsha": "e4c1efdfa3ced5b4314eede90dfcbb9ae2201e9b", "size": 8607, "ext": "py", "lang": "Python", "max_stars_repo_path": "prepare_contexts_for_submission.py", "max_stars_repo_name": "CT2020Hypernym/Hypernym", "max_stars_repo_head_hexsha": "50ab2c38f93d596dd78cdfe84cb6c8adae21b6ca", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "prepare_contexts_for_submission.py", "max_issues_repo_name": "CT2020Hypernym/Hypernym", "max_issues_repo_head_hexsha": "50ab2c38f93d596dd78cdfe84cb6c8adae21b6ca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-03-31T19:33:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:28:32.000Z", "max_forks_repo_path": "prepare_contexts_for_submission.py", "max_forks_repo_name": "CT2020Hypernym/Hypernym", "max_forks_repo_head_hexsha": "50ab2c38f93d596dd78cdfe84cb6c8adae21b6ca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.5389221557, "max_line_length": 120, "alphanum_fraction": 0.6794469618, "include": true, "reason": "import numpy", "num_tokens": 1931}
|
import argparse
import os
import subprocess
import SimpleITK as sitk
import numpy as np
from nipype.interfaces.ants import N4BiasFieldCorrection
from natsort import natsorted
def ReadImage(file_path):
''' This code returns the numpy nd array for a MR image at path'''
return sitk.GetArrayFromImage(sitk.ReadImage(file_path)).astype(np.float32)
def N4ITK(filepath, output_name):
print('N4ITK working on: %s' %filepath)
n4 = N4BiasFieldCorrection()
n4.inputs.dimension = 3
n4.inputs.input_image = filepath
n4.inputs.output_image = output_name
n4.run()
def RegisterBrain(t1_path, ref_path, subject2mni_mat, mni2subject_mat):
print('Working on registration!')
# Create the affine transformation matrix from subject space to MNI152 1mm space
subprocess.call(["flirt", "-in", t1_path, "-ref", ref_path, "-omat", subject2mni_mat])
subprocess.call(["convert_xfm", "-omat", mni2subject_mat, "-inverse", subject2mni_mat])
print('Finish this subject!')
def RegisterLabels2Subject(refVol_path, bp_filepaths, mni2subject_mat, temp_dir):
''' register indivudial labels from MNI 152 space to subject space '''
for j in range(len(bp_filepaths)):
label_name = os.path.join(temp_dir, "lab"+str(j+1)+".nii.gz")
# Register Brain Labels to Subject Space
subprocess.call(["flirt", "-in", bp_filepaths[j], "-ref", refVol_path, "-out", label_name, "-init", mni2subject_mat, "-applyxfm"])
def SubjectLabels2ParcellationArgmax(subject_bp_filepaths, subject_name):
print('Mapping brain parcellation to subject')
subjectBrainParcellations = np.zeros((len(subject_bp_filepaths)+1, 155, 240, 240), dtype=np.float32)
img = sitk.ReadImage(subject_bp_filepaths[0])
for j, bp in enumerate(subject_bp_filepaths):
subjectBrainParcellations[j+1,:] = ReadImage(bp)
brainParcellation = np.argmax(subjectBrainParcellations, axis=0)
brainParcellationFloat = brainParcellation.astype(np.float32)
brainParcellationFloat_img = sitk.GetImageFromArray(brainParcellationFloat)
brainParcellationFloat_img.CopyInformation(img)
sitk.WriteImage(brainParcellationFloat_img, subject_name)
def Remove(filepaths):
for file in filepaths:
os.remove(file)
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="the input directory to a BraTS subject T1 image", type=str)
parser.add_argument("-o", "--output", help="the output directory to save the subject's brain parcellation", type=str)
parser.add_argument("-n", "--name", help="subject name", type=str)
args = parser.parse_args()
print("Creating a HarvardOxford Subcortical Brain Parcellation in the Subject Space!!!")
filepath = args.input
output_dir = args.output
root_dir = os.path.split(filepath)[0]
file_name = os.path.split(filepath)[1]
temp_dir = os.path.join(root_dir, '.temp_bp')
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
# Apply N4ITK bias correction on MR t1 images
N4ITK_name = file_name[:file_name.find(".nii.gz")]+'_temp.nii.gz'
N4ITK_path = os.path.join(temp_dir, N4ITK_name)
#N4ITK(filepath, N4ITK_path)
# Registration
mni152_1mm_path = './MNI152_T1_1mm_brain.nii.gz'
subject2mni_path = os.path.join(temp_dir, file_name[:file_name.index('.nii.gz')]+'_invol2refvol.mat')
mni2subject_path = os.path.join(temp_dir, file_name[:file_name.index('.nii.gz')]+'_refvol2invol.mat')
#RegisterBrain(N4ITK_path, mni152_1mm_path, subject2mni_path, mni2subject_path)
# Mapping individual brain parcellation to subject
brain_parcellation_path = './atlases/HarvardOxford'
bp_filepaths = [os.path.join(root, name) for root, dirs, files in os.walk(brain_parcellation_path) for name in files if name.endswith('.nii.gz')]
bp_filepaths = natsorted(bp_filepaths, key=lambda y: y.lower())
refVol_path = filepath
#RegisterLabels2Subject(refVol_path, bp_filepaths, mni2subject_path, temp_dir)
# Merge individual labels to the brain parcellation in subject space using argmax
subject_bp_filepaths = [os.path.join(root, name) for root, dirs, files in os.walk(temp_dir) for name in files if 'HarvardOxford' not in name and 'lab' in name and name.endswith('.nii.gz')]
subject_bp_filepaths = natsorted(subject_bp_filepaths, key=lambda y: y.lower())
subject_name = os.path.join(output_dir, args.name+'_HarvardOxford-sub.nii.gz')
SubjectLabels2ParcellationArgmax(subject_bp_filepaths, subject_name)
Remove(subject_bp_filepaths)
os.remove(N4ITK_path)
os.remove(subject2mni_path)
os.remove(mni2subject_path)
os.rmdir(temp_dir)
|
{"hexsha": "2c332ecb8add7b7f1f4549b682051e9b4dfd96f0", "size": 4429, "ext": "py", "lang": "Python", "max_stars_repo_path": "createBrainParcellation.py", "max_stars_repo_name": "pykao/BraTS2018-tumor-segmentation", "max_stars_repo_head_hexsha": "6c81ab670f7bd035312f7ccd729776c5c05c47a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 86, "max_stars_repo_stars_event_min_datetime": "2018-09-07T08:45:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T18:26:24.000Z", "max_issues_repo_path": "createBrainParcellation.py", "max_issues_repo_name": "pykao/BraTS2018-tumor-segmentation", "max_issues_repo_head_hexsha": "6c81ab670f7bd035312f7ccd729776c5c05c47a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-09-16T07:59:47.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-16T07:20:26.000Z", "max_forks_repo_path": "createBrainParcellation.py", "max_forks_repo_name": "pykao/BraTS2018-tumor-segmentation", "max_forks_repo_head_hexsha": "6c81ab670f7bd035312f7ccd729776c5c05c47a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2018-09-11T05:08:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T16:02:35.000Z", "avg_line_length": 44.7373737374, "max_line_length": 188, "alphanum_fraction": 0.7735380447, "include": true, "reason": "import numpy", "num_tokens": 1202}
|
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
import copy
import utils.pytorch_util as ptu
def eval_np(module, *args, **kwargs):
"""
Eval this module with a numpy interface
Same as a call to __call__ except all Variable input/outputs are
replaced with numpy equivalents.
Assumes the output is either a single object or a tuple of objects.
"""
torch_args = tuple(torch_ify(x) for x in args)
torch_kwargs = {k: torch_ify(v) for k, v in kwargs.items()}
outputs = module(*torch_args, **torch_kwargs)
if isinstance(outputs, tuple):
return tuple(np_ify(x) for x in outputs)
else:
return np_ify(outputs)
def torch_ify(np_array_or_other):
if isinstance(np_array_or_other, np.ndarray):
return ptu.from_numpy(np_array_or_other)
else:
return np_array_or_other
def np_ify(tensor_or_other):
if isinstance(tensor_or_other, torch.autograd.Variable):
return ptu.get_numpy(tensor_or_other)
else:
return tensor_or_other
def _elem_or_tuple_to_variable(elem_or_tuple):
if isinstance(elem_or_tuple, tuple):
return tuple(
_elem_or_tuple_to_variable(e) for e in elem_or_tuple
)
return ptu.from_numpy(elem_or_tuple).float()
def _filter_batch(np_batch):
for k, v in np_batch.items():
if v.dtype == np.bool:
yield k, v.astype(int)
else:
yield k, v
def np_to_pytorch_batch(np_batch):
return {
k: _elem_or_tuple_to_variable(x)
for k, x in _filter_batch(np_batch)
if x.dtype != np.dtype('O') # ignore object (e.g. dictionaries)
}
def optimize_policy(policy, policy_optimizer, buffer, init_policy, action_space, obj_func,
batch_size=128, num_actions=10, upper_bound=False, iterations=150, out_dir='', epoch=0,
save_fig=False):
dataset = np.copy(buffer.get_dataset())
ptu.copy_model_params_from_to(init_policy, policy)
zero_tensor = torch.tensor(0.)
losses = []
norms = []
best_loss = -np.inf
for it in range(iterations):
random.shuffle(dataset)
start = 0
losses_ = []
norms_ = []
batch_size = dataset.shape[0]
while start < dataset.shape[0]:
states = torch_ify(dataset[start:start + batch_size])
iters = 1
prev_actions, policy_mean, policy_log_std, log_pi, *_ = policy(
obs=states, reparameterize=True, return_log_prob=True, deterministic=True
)
for i in range(iters):
target_actions, policy_mean, policy_log_std, log_pi, *_ = policy(
obs=states, reparameterize=True, return_log_prob=True, deterministic=True
)
if torch.isclose(torch.norm(prev_actions - target_actions), zero_tensor, atol=1e-3) and i != 0:
#print("Actions are the same, Stoping")
break
obj = obj_func(states, target_actions, upper_bound)
##upper_bound (in some way)
policy_loss = (-obj).mean()
policy_optimizer.zero_grad()
policy_loss.backward()
policy_optimizer.step()
norm = grad_norm(policy)
losses_.append(np.asscalar(ptu.get_numpy(policy_loss)))
norms_.append(np.asscalar(norm))
#print("Gradient Norm:", norm)
if torch.isclose(norm, zero_tensor, atol=1e-3):
#print("Gradient Norm is zero, Stopping")
break
prev_actions = target_actions
start += batch_size
curr_loss = -np.mean(losses_)
losses.append(curr_loss)
norms.append(np.mean(norms_))
if curr_loss > best_loss:
best_loss = curr_loss
best_params = copy.deepcopy(policy.state_dict())
# if curr_loss != best_loss:
# policy.load_state_dict(best_params)
if save_fig:
fig, ax = plt.subplots()
# make a plot
ax.plot(losses, color="red", label='Q')
# set x-axis label
# set y-axis label
ax.set_ylabel("Q", color="red", fontsize=14)
# twin object for two different y-axis on the sample plot
ax2 = ax.twinx()
# make a plot with different y-axis using second axis object
ax2.plot(norms, color="blue", label='grad norm')
ax2.set_ylabel("Grad Norm", color="blue", fontsize=14)
#plt.show()
# save the plot as a file
fig.savefig(out_dir + '/' + ('upper_bound_' if upper_bound else '') + 'policy_opt_' + str(epoch) + '.jpg',
format='jpeg',
dpi=100,
bbox_inches='tight')
plt.close(fig)
print("Optimized")
return policy
def grad_norm(model):
total_norm = 0
for p in model.parameters():
try:
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
except:
pass
total_norm = total_norm ** (1. / 2)
return total_norm
|
{"hexsha": "7d6befd6cc380fb082bc726abc53978029f4893a", "size": 5153, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/core.py", "max_stars_repo_name": "amarildolikmeta/oac-explore", "max_stars_repo_head_hexsha": "e3d63992a4ff33c8df593941f498457e94f81eb8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/core.py", "max_issues_repo_name": "amarildolikmeta/oac-explore", "max_issues_repo_head_hexsha": "e3d63992a4ff33c8df593941f498457e94f81eb8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/core.py", "max_forks_repo_name": "amarildolikmeta/oac-explore", "max_forks_repo_head_hexsha": "e3d63992a4ff33c8df593941f498457e94f81eb8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-13T15:38:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-13T15:38:41.000Z", "avg_line_length": 34.5838926174, "max_line_length": 114, "alphanum_fraction": 0.5982922569, "include": true, "reason": "import numpy", "num_tokens": 1170}
|
import os
import numpy as np
from torch.utils import data
from .parsers.atis import readATISFile
class PropheseeNCars(data.Dataset):
"""Prophesee N-Cars dataset from:
Amos Sironi, Manuele Brambilla, Nicolas Bourdis, Xavier Lagorce, Ryad Benosman
“HATS: Histograms of Averaged Time Surfaces for Robust Event-based Object Classification”.
To appear in IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018
Available for download: https://www.prophesee.ai/2018/03/13/dataset-n-cars/
"""
def __init__(self, path: str, is_train: bool = True, transforms=None):
sub_path = "train" if is_train else "test"
path = os.path.join(path, sub_path)
assert os.path.exists(path)
self._files = []
self._labels = []
for root, dirs, files in os.walk(path):
label = os.path.basename(root)
for file in files:
if file.endswith(".dat"):
self._files.append(os.path.join(root, file))
self._labels.append(label)
self._files = np.asarray(self._files)
self._labels = np.asarray(self._labels)
self.transforms = transforms
def __len__(self):
return self._labels.size
def __getitem__(self, index):
spike_train = readATISFile(self._files[index])
spike_train.width = 120
spike_train.height = 100
spike_train.duration = 100000 # 100ms
if self.transforms is not None:
spike_train = self.transforms(spike_train)
return spike_train, self._labels[index]
|
{"hexsha": "b34633ed569a725692defe073ddc391a99d60d1e", "size": 1597, "ext": "py", "lang": "Python", "max_stars_repo_path": "ebdataset/vision/prophesee_ncars.py", "max_stars_repo_name": "tihbe/python-ebdataset", "max_stars_repo_head_hexsha": "4d16822a3a6b45882124a8d7f7e124bd39a75868", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-07-30T09:31:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T10:49:23.000Z", "max_issues_repo_path": "ebdataset/vision/prophesee_ncars.py", "max_issues_repo_name": "tihbe/python-ebdataset", "max_issues_repo_head_hexsha": "4d16822a3a6b45882124a8d7f7e124bd39a75868", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-01-15T07:12:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-07T02:59:32.000Z", "max_forks_repo_path": "ebdataset/vision/prophesee_ncars.py", "max_forks_repo_name": "tihbe/python-ebdataset", "max_forks_repo_head_hexsha": "4d16822a3a6b45882124a8d7f7e124bd39a75868", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-01T13:27:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-01T13:27:06.000Z", "avg_line_length": 36.2954545455, "max_line_length": 94, "alphanum_fraction": 0.6487163431, "include": true, "reason": "import numpy", "num_tokens": 384}
|
[STATEMENT]
lemma continuous_on_const[continuous_intros,simp]: "continuous_on s (\<lambda>x. c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on s (\<lambda>x. c)
[PROOF STEP]
unfolding continuous_on_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x\<in>s. ((\<lambda>x. c) \<longlongrightarrow> c) (at x within s)
[PROOF STEP]
by auto
|
{"llama_tokens": 150, "file": null, "length": 2}
|
# Adapted from : VGG 16 model : https://github.com/machrisaa/tensorflow-vgg
import time
import os
import inspect
import numpy as np
from termcolor import colored
import tensorflow as tf
from fcn.losses import sigmoid_cross_entropy_balanced
from fcn.utils.io import IO
class PickNet():
def __init__(self, cfgs, run='training'):
self.cfgs = cfgs
self.io = IO()
self.inputs = tf.placeholder(tf.float32, [None, None, self.cfgs[run]['data_width'], self.cfgs[run]['n_channels']])
self.groundtruths = tf.placeholder(tf.float32, [None , None, self.cfgs[run]['data_width'], 1])
self.define_model()
def define_model(self):
"""
PickNet model based on RSRN
Add branch layers (with deconv) after each CONV block
"""
start_time = time.time()
# block 1
self.conv1_1 = self.conv_layer_vgg(self.inputs, name="conv1_1",
kh=self.cfgs['b1_convh'], kw=self.cfgs['b1_convw'],
n_out=64, dh=1, dw=1)
self.conv1_2 = self.conv_layer_vgg(self.conv1_1, name="conv1_2",
kh=self.cfgs['b1_convh'], kw=self.cfgs['b1_convw'],
n_out=64, dh=1, dw=1)
self.pool1 = self.max_pool(self.conv1_2, name="pool1", kh=1, kw=2, dh=1, dw=2)
# block 2
self.conv2_1 = self.conv_layer_vgg(self.pool1, name="conv2_1",
kh=self.cfgs['b2_convh'], kw=self.cfgs['b2_convw'],
n_out=128, dh=1, dw=1)
self.conv2_2 = self.conv_layer_vgg(self.conv2_1, name="conv2_2",
kh=self.cfgs['b2_convh'], kw=self.cfgs['b2_convw'],
n_out=128, dh=1, dw=1)
self.pool2 = self.max_pool(self.conv2_2, name="pool2", kh=1, kw=2, dh=1, dw=2)
# # block 3
self.conv3_1 = self.conv_layer_vgg(self.pool2, name="conv3_1",
kh=self.cfgs['b3_convh'], kw=self.cfgs['b3_convw'],
n_out=256, dh=1, dw=1)
self.conv3_2 = self.conv_layer_vgg(self.conv3_1, name="conv3_2",
kh=self.cfgs['b3_convh'], kw=self.cfgs['b3_convw'],
n_out=256, dh=1, dw=1)
self.conv3_3 = self.conv_layer_vgg(self.conv3_2, name="conv3_3",
kh=self.cfgs['b3_convh'], kw=self.cfgs['b3_convw'],
n_out=256, dh=1, dw=1)
self.pool3 = self.max_pool(self.conv3_3, name="pool3", kh=1, kw=2, dh=1, dw=2)
# block 4
self.conv4_1 = self.conv_layer_vgg(self.pool3, name="conv4_1",
kh=self.cfgs['b4_convh'], kw=self.cfgs['b4_convw'],
n_out=512, dh=1, dw=1)
self.conv4_2 = self.conv_layer_vgg(self.conv4_1, name="conv4_2",
kh=self.cfgs['b4_convh'], kw=self.cfgs['b4_convw'],
n_out=512, dh=1, dw=1)
self.conv4_3 = self.conv_layer_vgg(self.conv4_2, name="conv4_3",
kh=self.cfgs['b4_convh'], kw=self.cfgs['b4_convw'],
n_out=512, dh=1, dw=1)
self.pool4 = self.max_pool(self.conv4_3, name="pool4", kh=1, kw=2, dh=1, dw=2)
# block 5
self.conv5_1 = self.conv_layer_vgg(self.pool4, name="conv5_1",
kh=self.cfgs['b5_convh'], kw=self.cfgs['b5_convw'],
n_out=512, dh=1, dw=1)
self.conv5_2 = self.conv_layer_vgg(self.conv5_1, name="conv5_2",
kh=self.cfgs['b5_convh'], kw=self.cfgs['b5_convw'],
n_out=512, dh=1, dw=1)
self.conv5_3 = self.conv_layer_vgg(self.conv5_2, name="conv5_3",
kh=self.cfgs['b5_convh'], kw=self.cfgs['b5_convw'],
n_out=512, dh=1, dw=1)
self.side_1_1 = self.side_layer(self.conv1_1,'side_1_1')
self.side_1_2 = self.side_layer(self.conv1_2,'side_1_2')
self.side_2_1 = self.side_layer(self.conv2_1,'side_2_1')
self.side_2_2 = self.side_layer(self.conv2_2,'side_2_2')
self.side_3_1 = self.side_layer(self.conv3_1,'side_3_1')
self.side_3_2 = self.side_layer(self.conv3_2,'side_3_2')
self.side_3_3 = self.side_layer(self.conv3_3,'side_3_3')
self.side_4_1 = self.side_layer(self.conv4_1,'side_4_1')
self.side_4_2 = self.side_layer(self.conv4_2,'side_4_2')
self.side_4_3 = self.side_layer(self.conv4_3,'side_4_3')
self.side_5_1 = self.side_layer(self.conv5_1,'side_5_1')
self.side_5_2 = self.side_layer(self.conv5_2,'side_5_2')
self.side_5_3 = self.side_layer(self.conv5_3,'side_5_3')
#RSRN in a deep to shallow fashion
#block 5
self.dsnout_5_3 = self.deconv_layer(x = self.side_5_3,upscale=16,
name='{}_dsnout_{}'.format('side_5_3', 16),
w_init=tf.truncated_normal_initializer(stddev=0.1))
self.residual_5_2, self.dsnout_5_2 = self.RU_layer(side_inputs = self.side_5_2,
res_inputs = self.side_5_3,
name = 'RU_5_2',
upscale_out = 16,
upscale_res = 1)
self.residual_5_1, self.dsnout_5_1 = self.RU_layer(self.side_5_1, self.residual_5_2,'RU_5_1', 16, 1)
#block 4
self.residual_4_3, self.dsnout_4_3 = self.RU_layer(self.side_4_3, self.residual_5_1,'RU_4_3', 8, 2)
self.residual_4_2, self.dsnout_4_2 = self.RU_layer(self.side_4_2, self.residual_4_3,'RU_4_2', 8, 1)
self.residual_4_1, self.dsnout_4_1 = self.RU_layer(self.side_4_1, self.residual_4_2,'RU_4_1', 8, 1)
#block 3
self.residual_3_3, self.dsnout_3_3 = self.RU_layer(self.side_3_3, self.residual_4_1,'RU_3_3', 4, 2)
self.residual_3_2, self.dsnout_3_2 = self.RU_layer(self.side_3_2, self.residual_3_3,'RU_3_2', 4, 1)
self.residual_3_1, self.dsnout_3_1 = self.RU_layer(self.side_3_1, self.residual_3_2,'RU_3_1', 4, 1)
#block 2
self.residual_2_2, self.dsnout_2_2 = self.RU_layer(self.side_2_2, self.residual_3_1,'RU_2_2', 2, 2)
self.residual_2_1, self.dsnout_2_1 = self.RU_layer(self.side_2_1, self.residual_2_2,'RU_2_1', 2, 1)
#block 1
self.residual_1_2, self.dsnout_1_2 = self.RU_layer(self.side_1_2, self.residual_2_1,'RU_1_2', 1, 2)
self.residual_1_1, self.dsnout_1_1 = self.RU_layer(self.side_1_1, self.residual_1_2,'RU_2_1', 1, 1)
self.side_outputs = [self.dsnout_1_1, self.dsnout_1_2,
self.dsnout_2_1, self.dsnout_2_2,
self.dsnout_3_1, self.dsnout_3_2, self.dsnout_3_3,
self.dsnout_4_1, self.dsnout_4_2, self.dsnout_4_3,
self.dsnout_5_1,self.dsnout_5_2,self.dsnout_5_3]
"""
self.side_outputs = [self.dsnout_1_1, self.dsnout_1_2]
"""
w_shape = [1, 1, len(self.side_outputs), 1]
self.fuse = self.conv_layer(tf.concat(self.side_outputs, axis=3),
w_shape, name='fuse_1', use_bias=False,
w_init=tf.constant_initializer(0.2))
self.outputs = self.side_outputs + [self.fuse]
self.io.print_info("Build model finished: {:.4f}s".format(time.time() - start_time))
def max_pool(self, input_tenosr, name,kh,kw,dh,dw):
return tf.nn.max_pool(input_tenosr,
ksize=[1, kh, kw, 1],
strides=[1, dh, dw, 1],
padding='SAME',name=name)
def conv_layer_vgg(self,input_tensor, name,kh,kw,n_out,dh,dw):
n_in = input_tensor.get_shape().as_list()[3]
with tf.variable_scope(name):
filt = tf.get_variable("weight",[kh,kw,n_in,n_out],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv = tf.nn.conv2d(input_tensor, filt, [1, dh, dw, 1], padding='SAME')
conv_biases = tf.get_variable("bias",[n_out],initializer=tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
return relu
def conv_layer(self, x, W_shape, b_shape=None, name=None,
padding='SAME', use_bias=False, w_init=None, b_init=None):
W = self.weight_variable(W_shape, w_init)
tf.summary.histogram('weights_{}'.format(name), W)
if use_bias:
b = self.bias_variable([b_shape], b_init)
tf.summary.histogram('biases_{}'.format(name), b)
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
return conv + b if use_bias else conv
def deconv_layer(self, x, upscale, name, padding='SAME', w_init=None):
x_shape = tf.shape(x)
in_shape = x.shape.as_list()
w_shape = [1, upscale * 2, in_shape[-1], 1]
strides = [1, 1, upscale, 1]
W = self.weight_variable(w_shape, w_init)
tf.summary.histogram('weights_{}'.format(name), W)
out_shape = tf.stack([x_shape[0], x_shape[1], x_shape[2], w_shape[2]]) * tf.constant(strides, tf.int32)
deconv = tf.nn.conv2d_transpose(x, W, out_shape, strides=strides, padding=padding)
return deconv
def side_layer(self, inputs, name):
with tf.variable_scope(name):
in_shape = inputs.shape.as_list()
w_shape = [1, 1, in_shape[-1], 1]
classifier = self.conv_layer(inputs, w_shape, b_shape=1,
w_init=tf.constant_initializer(),
b_init=tf.constant_initializer(),
name=name + '_side')
return classifier
def RU_layer(self, side_inputs, res_inputs, name, upscale_out, upscale_res):
res_inputs_deconv = self.deconv_layer(x = res_inputs,
upscale=upscale_res,
name='{}_RUdeconv_{}'.format(name, upscale_res),
w_init=tf.truncated_normal_initializer(stddev=0.1))
classifier = tf.concat(values=[side_inputs,res_inputs_deconv], axis=3, name = name + '_concat')
with tf.variable_scope(name):
#in_shape = classifier.shape.as_list()
w_shape = [1, 3, 2, 1]
res_outputs = self.conv_layer(x = classifier, W_shape = w_shape, b_shape=1,
w_init=tf.constant_initializer(),
b_init=tf.constant_initializer(),
name=name + '_RU')
side_outputs = self.deconv_layer(x = res_outputs,
upscale=upscale_out,
name='{}_dsnout_{}'.format(name, upscale_out),
w_init=tf.truncated_normal_initializer(stddev=0.1))
return res_outputs, side_outputs
"""
def side_layer(self, inputs, name, upscale, slice_num):
with tf.variable_scope(name):
in_shape = inputs.shape.as_list()
w_shape = [1, 1, in_shape[-1], slice_num]
classifier = self.conv_layer(inputs, w_shape, b_shape=1,
w_init=tf.constant_initializer(),
b_init=tf.constant_initializer(),
name=name + '_reduction')
classifier = self.deconv_layer(classifier,
upscale=upscale,
name='{}_deconv_{}'.format(name, upscale),
w_init=tf.truncated_normal_initializer(stddev=0.1))
return classifier
"""
def get_conv_filter(self, name):
return tf.constant(self.data_dict[name][0], name="filter")
def get_bias(self, name):
return tf.constant(self.data_dict[name][1], name="biases")
def weight_variable(self, shape, initial):
init = initial(shape)
return tf.Variable(init)
def bias_variable(self, shape, initial):
init = initial(shape)
return tf.Variable(init)
def setup_testing(self, session):
self.predictions = []
for idx, b in enumerate(self.outputs):
output = tf.nn.sigmoid(b, name='output_{}'.format(idx))
#output = tf.nn.relu(b, name='output_{}'.format(idx))
self.predictions.append(output)
def setup_training(self, session):
"""
Apply sigmoid non-linearity to side layer ouputs + fuse layer outputs
Compute total loss := side_layer_loss + fuse_layer_loss
Compute predicted edge maps from fuse layer as pseudo performance metric to track
"""
self.predictions = []
self.loss = 0
self.io.print_warning('Deep supervision application set to {}'.format(self.cfgs['deep_supervision']))
for idx, b in enumerate(self.side_outputs):
output = tf.nn.sigmoid(b, name='output_{}'.format(idx))
cost = sigmoid_cross_entropy_balanced(b, self.groundtruths, name='cross_entropy{}'.format(idx))
self.predictions.append(output)
if self.cfgs['deep_supervision']:
self.loss += (self.cfgs['loss_weights'] * cost)
fuse_output = tf.nn.sigmoid(self.fuse, name='fuse')
fuse_cost = sigmoid_cross_entropy_balanced(self.fuse, self.groundtruths, name='cross_entropy_fuse')
self.predictions.append(fuse_output)
self.loss += (self.cfgs['loss_weights'] * fuse_cost)
pred = tf.cast(tf.greater(fuse_output, 0.5), tf.int32, name='predictions')
error = tf.cast(tf.not_equal(pred, tf.cast(self.groundtruths, tf.int32)), tf.float32)
self.error = tf.reduce_mean(error, name='pixel_error')
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('error', self.error)
self.merged_summary = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(self.cfgs['save_dir'] + '/train', session.graph)
self.val_writer = tf.summary.FileWriter(self.cfgs['save_dir'] + '/val')
|
{"hexsha": "1026b7909d61272bcf49c9b31b89dbd696c52cbc", "size": 15318, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/PickNet/fcn/models/picknet.py", "max_stars_repo_name": "MrXiaoXiao/ESPRH", "max_stars_repo_head_hexsha": "c4bbebba001523fbd86f9de4b09cb931665b7a71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-12-02T03:26:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T04:26:02.000Z", "max_issues_repo_path": "src/PickNet/fcn/models/picknet.py", "max_issues_repo_name": "Damin1909/ESPRH", "max_issues_repo_head_hexsha": "2b26a7e698fe7c411d44ce5f51d52fffdb742d48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-12-04T17:00:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T04:02:11.000Z", "max_forks_repo_path": "src/PickNet/fcn/models/picknet.py", "max_forks_repo_name": "Damin1909/ESPRH", "max_forks_repo_head_hexsha": "2b26a7e698fe7c411d44ce5f51d52fffdb742d48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-12-02T01:38:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-02T05:37:47.000Z", "avg_line_length": 47.1323076923, "max_line_length": 122, "alphanum_fraction": 0.5372764068, "include": true, "reason": "import numpy", "num_tokens": 3999}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This set of functions is used for plotting the results from
CT Crash data analysis
@author: Anna Konstorum (konstorum.anna@gmail.com)
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def myround(x, base=5):
return int(base * round(float(x)/base))
def plot_accidents_bytown(exit_mile,town_mile,town_name,town_data, y_max, y_min, title_out):
"""Plot accidents by milemarker"""
exit_mile_E = exit_mile[exit_mile["Direction"]=='E']
exit_mile_W = exit_mile[exit_mile["Direction"]=='W']
town_data_east = town_data[town_data["Direction"]=='E']
town_data_west = town_data[town_data["Direction"]=='W']
if town_name=="All":
town_min_ind = 0.00
town_max_ind = 98.25
else:
town_ind = town_mile[town_mile["Town_Name"]==town_name]
town_min_ind= town_ind["Mile"].values[0]
town_max_ind = town_min_ind + town_ind["Town_Miles"].values[0]
x_index = np.arange(town_min_ind,town_max_ind,0.25)
town_data_east_loc = town_data_east["Milemarker"]
town_data_west_loc = town_data_west["Milemarker"]
town_data_east_bin = pd.cut(town_data_east_loc, x_index, include_lowest = 1)
town_data_west_bin = pd.cut(town_data_west_loc, x_index, include_lowest = 1)
town_data_east_bin_count = town_data_east_loc.groupby(town_data_east_bin).size()
town_data_west_bin_count = town_data_west_loc.groupby(town_data_west_bin).size()
max_acc = max(town_data_east_bin_count.max(),town_data_west_bin_count.max())
exit_mile_town_E = exit_mile_E[exit_mile_E["Mile"]>=town_min_ind]
exit_mile_town_E = exit_mile_town_E[exit_mile_town_E["Mile"]<town_max_ind]
exit_mile_town_W = exit_mile_W[exit_mile_W["Mile"]>=town_min_ind]
exit_mile_town_W = exit_mile_town_W[exit_mile_town_W["Mile"]<town_max_ind]
if max_acc > 250:
steps = 100
elif max_acc <=250 and max_acc > 100:
steps = 50
elif max_acc<= 100 and max_acc > 50:
steps = 25
elif max_acc<=50 and max_acc > 25:
steps = 5
else:
steps = 2
if y_max == 'not_set' and y_min == 'not_set':
y_max = myround(town_data_east_bin_count.max(),steps)+steps/2
y_min = -1*myround(town_data_west_bin_count.max(),steps)-steps/2
fig, ax = plt.subplots(figsize=[15,10])
plt.rcParams['figure.figsize'] = [15, 10]
plt.bar(x_index[:-1],town_data_east_bin_count,align='edge', width = 0.25,color='darksalmon')
plt.bar(x_index[:-1],-town_data_west_bin_count,align='edge', width=0.25, color='cornflowerblue')
plt.ylim(y_min,y_max)
plt.yticks(np.arange(y_min,y_max,step=steps),abs(np.arange(y_min,y_max,step=steps)))
plt.ylabel('Number of accidents',fontsize=14)
plt.xlabel('I-84 Milemarker',fontsize=14)
plt.title(title_out,fontsize=15)
# create custom legend
leg_elements = [Line2D([0], [0], color='coral', lw=3, label='East'),
Line2D([0], [0], color='cornflowerblue', lw=3, label='West')]
ax.legend(handles=leg_elements, loc='upper right')
# Add exit ramp delineations and names
max_height_mile = int(max(steps/4,2))
plt.vlines(exit_mile_town_E["Mile"],ymin = 0, ymax = max_height_mile, color = 'red', linewidth = 0.4, linestyle='-')
for i, row in enumerate(exit_mile_town_E.values):
go=0
if (town_name=="All") and ((i)%5==0):
go=1
elif (town_name!="All"):
go=1
if go==1:
Direction, Mile, Exit, Town_Number, Town_name = row
loc_print = Mile
plt.text(loc_print,max_height_mile + min(max_height_mile,5),Exit,rotation = 90,color='black',
fontsize = 9,verticalalignment='center',horizontalalignment='center' )
plt.vlines(exit_mile_town_W["Mile"],ymin = -1*max_height_mile, ymax =0, color = 'red', linewidth = 0.4, linestyle='-')
for i, row in enumerate(exit_mile_town_W.values):
go=0
if (town_name=="All") and ((i)%5==0):
go=1
elif (town_name!='All'):
go=1
if go==1:
Direction, Mile, Exit, Town_Number, Town_name = row
loc_print = Mile
plt.text(loc_print,-1*(max_height_mile+min(max_height_mile,5)),Exit,rotation = 90,color='black',
fontsize = 9,verticalalignment='center',horizontalalignment='center' )
# Add town names
if town_name=="All":
loc_town_names = town_data_west_bin_count.max()
plt.vlines(town_mile["Mile"], ymin=-int(max_acc/3), ymax=int(max_acc/3),color='dimgrey', linewidth=0.5,linestyle='-.')
for i, row in enumerate(town_mile.values):
Mile, Town_Number, Town_Name, Town_Miles = row
loc_print = Mile + Town_Miles/2
if (i!=2 or i!=3):
plt.text(loc_print,y_min+5,Town_Name,rotation=-90,
fontsize=8,verticalalignment='bottom',horizontalalignment='right')
elif i==2:
plt.text(loc_print+5,-(max(loc_town_names,10)+10),Town_Name,rotation=-90,
fontsize=8,verticalalignment='bottom',horizontalalignment='right')
elif i==3:
plt.text(loc_print+8,-(max(loc_town_names,10)+10),Town_Name,rotation=-90,
fontsize=8,verticalalignment='bottom',horizontalalignment='right')
|
{"hexsha": "433c6973882a25d71d4c0d9e420bb0f036eba472", "size": 5831, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/plot_results.py", "max_stars_repo_name": "akonstodata/CT_crash_analysis", "max_stars_repo_head_hexsha": "66a8ecce5279f4dfc9f1cc3766a00573229812ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-04T20:54:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-04T20:54:25.000Z", "max_issues_repo_path": "code/plot_results.py", "max_issues_repo_name": "akonstodata/CT_crash_analysis", "max_issues_repo_head_hexsha": "66a8ecce5279f4dfc9f1cc3766a00573229812ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/plot_results.py", "max_forks_repo_name": "akonstodata/CT_crash_analysis", "max_forks_repo_head_hexsha": "66a8ecce5279f4dfc9f1cc3766a00573229812ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.1900826446, "max_line_length": 130, "alphanum_fraction": 0.6058995027, "include": true, "reason": "import numpy", "num_tokens": 1567}
|
#!/usr/bin/env python
# coding: utf-8
# # import required library
# In[1]:
# Import numpy, pandas for data manipulation
import numpy as np
import pandas as pd
# Import matplotlib, seaborn for visualization
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# In[2]:
# Import the data
weather_data = pd.read_csv('weather.csv')
weather_data.head()
# In[8]:
rain_df = weather_data[['Date','Rainfall']]
rain_df.head()
# In[9]:
rain_df.shape
# In[10]:
rain_df.info()
# **Using 50 values**
# In[15]:
rain_df = rain_df.loc[:49]
rain_df.head()
# In[16]:
rain_df.shape
# In[17]:
# Convert the time column into datetime
rain_df['Date'] = pd.to_datetime(rain_df['Date'])
rain_df['Date'].head()
# In[18]:
rain_df.info()
# In[24]:
# fill the empty row
rain_df = rain_df.fillna(rain_df['Rainfall'].mean())
rain_df.head()
# ### Dataset Explanation
# In[27]:
rain_df.describe()
# In[29]:
# Output the maximum and minimum rain date
print(rain_df.loc[rain_df["Rainfall"] == rain_df["Rainfall"].max()])
print(rain_df.loc[rain_df["Rainfall"] == rain_df["Rainfall"].min()])
# In[30]:
# Reset the index
rain_df.set_index("Date", inplace=True)
# ### Data Visualization
# In[32]:
# Plot the daily temperature change
plt.figure(figsize=(16,10), dpi=100)
plt.plot(rain_df.index, rain_df.Rainfall, color='tab:red')
plt.gca().set(title="Daily Rain", xlabel='Date', ylabel="rain value")
plt.show()
# In[35]:
# Apply the Moving Average function by a subset of size 10 days.
rain_df_mean = rain_df.Rainfall.rolling(window=10).mean()
rain_df_mean.plot(figsize=(16,10))
plt.show()
# In[37]:
from statsmodels.tsa.seasonal import seasonal_decompose
# Additive Decomposition
result_add = seasonal_decompose(rain_df.Rainfall, model='additive', extrapolate_trend=0)
# Plot
plt.rcParams.update({'figure.figsize': (10,10)})
result_add.plot().suptitle('Additive Decomposition', fontsize=22)
plt.show()
# ### Baseline Model
# In[38]:
# Shift the current rain to the next day.
predicted_df = rain_df["Rainfall"].to_frame().shift(1).rename(columns = {"Rainfall": "rain_pred" })
actual_df = rain_df["Rainfall"].to_frame().rename(columns = {"Rainfall": "rain_actual" })
# Concatenate the actual and predicted rain
one_step_df = pd.concat([actual_df,predicted_df],axis=1)
# Select from the second row, because there is no prediction for today due to shifting.
one_step_df = one_step_df[1:]
one_step_df.head(10)
# > Here you can the we have two column one is our **actual rain** column and othe is **predicted rain** column that we use next model
# We could validate how well our model is by looking at the Root Mean Squared Error(RMSE) between the predicted and actual rain
# In[41]:
from sklearn.metrics import mean_squared_error as MSE
from math import sqrt
# Calculate the RMSE
rain_pred_err = MSE(one_step_df.rain_actual, one_step_df.rain_pred, squared=False)
print("The RMSE is",rain_pred_err)
# > Our RMSE value is 4.002 is arround 4 that are pretty good for model.
# ## Using SARIMA model
# ### Parameter Selection
# #### Grid Search
# We are going to apply one of the most commonly used method for time-series forecasting, known as SARIMA, which stands for Seasonal Autoregressive Integrated Moving Average. SARIMA models are denoted with the notation SARIMA(p,d,q)(P,D,Q,s). These three parameters account for seasonality, trend, and noise in data:
#
# We will use a “grid search” to iteratively explore different combinations of parameters. For each combination of parameters, we fit a new seasonal SARIMA model with the SARIMAX() function from the statsmodels module and assess its overall quality.
# In[42]:
import itertools
# Define the p, d and q parameters to take any value between 0 and 2
p = d = q = range(0, 2)
# Generate all different combinations of p, q and q triplets
pdq = list(itertools.product(p, d, q))
# Generate all different combinations of seasonal p, q and q triplets
seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]
print('Examples of parameter combinations for Seasonal ARIMA...')
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))
# In[43]:
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(one_step_df.rain_actual,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print('SARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))
except:
continue
# ### Fitting the Model
# In[47]:
import warnings
warnings.filterwarnings("ignore") # specify to ignore warning messages
# Import the statsmodels library for using SARIMAX model
import statsmodels.api as sm
# Fit the SARIMAX model using optimal parameters
mod = sm.tsa.statespace.SARIMAX(one_step_df.rain_actual,
order=(1,1,1),
seasonal_order=(1,1,1,12),
enforce_stationarity=False,
enforce_invertibility=False)
# In[48]:
results = mod.fit()
# In[49]:
results.summary()
# **Predictions**
# In[51]:
pred = results.predict(start=0,end=49)[1:]
pred
# In[52]:
pred = results.get_prediction(start=0,end = 49, dynamic=False)
pred_ci = pred.conf_int()
# In[53]:
pred_ci.head()
# In[55]:
print(pred)
# In[58]:
ax = one_step_df.rain_actual.plot(label='observed',figsize=(16,10))
ax.set_xlabel('Date')
ax.set_ylabel('value')
plt.ylim([0,2.0])
plt.legend()
plt.show()
# ### Forecast Diagnostic
# It is also useful to quantify the accuracy of our forecasts. We will use the MSE (Mean Squared Error), in which for each predicted value, we compute its distance to the true value and square the result
# In[65]:
y_forecasted = pred.predicted_mean[:49]
y_truth = one_step_df.rain_actual
print(y_forecasted.shape)
print(y_truth.shape)
# Compute the mean square error
mse = MSE(y_truth, y_forecasted, squared=True)
print('The Mean Squared Error of our forecasts is {}'.format(round(mse, 2)))
# Amazziingggg! Our forecast model forecasts the rain with only an error of 25.85.
#
# In the weather forecast field, the prediction error of 2.19 degrees seems promising and sufficient, as there are many other factors that contribute to the change in rain, including but not limited to the wind speed, the air pressure, etc.
# ### Validating the Dynamic Forecast
# In this case, we only use information from the time series up to a certain point, and after that, forecasts are generated using values from previous forecasted time points.
#
# In[66]:
pred_dynamic = results.get_prediction(start=0,end = 49, dynamic=True, full_results=True)
pred_dynamic_ci = pred_dynamic.conf_int()
# In[67]:
pred_dynamic_ci.head()
# Once again, we plot the real and forecasted values of the average daily rain to assess how well we did:
# In[71]:
ax = one_step_df.rain_actual.plot(label='observed', figsize=(15, 11))
pred_dynamic.predicted_mean.plot(label='Dynamic Forecast', ax=ax)
ax.fill_between(pred_dynamic_ci.index,
pred_dynamic_ci.iloc[:, 0],
pred_dynamic_ci.iloc[:, 1], color='k', alpha=.25)
ax.set_xlabel('Date')
ax.set_ylabel('Temperature (in Celsius)')
plt.ylim([0,2.0])
plt.legend()
plt.show()
# > In this case, the model seems to predict the rain inaccurately, with major fluctuations between the true value and the predicted value.
# ### Forecast Diagnostic
# In[73]:
# Extract the predicted and true values of our time series
y_forecasted = pred_dynamic.predicted_mean[:49]
y_truth = one_step_df.rain_actual
# Compute the mean square error
mse = sqrt(MSE(y_truth, y_forecasted).mean())
print('The Root Mean Squared Error of our forecasts is {}'.format(round(mse, 2)))
# The **predicted** values obtained from the dynamic forecasts yield an MSE of 3.68. This is significantly higher than the one-step ahead, which is to be expected given that we are relying on less historical data from the time series.
# # Conclusion
# I described how to implement a seasonal SARIMA model in Python. I made extensive use of the pandas and statsmodels libraries and showed how to run model diagnostics, as well as how to produce forecasts of the Rain.
# Recall that in the assumption I made in the section 2.2 Baseline Model, I could even reinforce our assumption and continue our belief that the rainfall today depends on the rainfall yesterday, the rainfall yesterday depends on the day before yesterday, and so on.
#
# It is the best so far to use the history up to the point that we would like to make **predictions** on. Especially it holds for weather forecasting, where the rainfall today does not change much from yesterday, and the transition to another season signaling through the rainfall should gradually occur, unless there is any disastrous factors such as storm, drought, etc.
|
{"hexsha": "f2551e934ec02c72327ff261b91868d8569a6187", "size": 9361, "ext": "py", "lang": "Python", "max_stars_repo_path": "Time Series Analysis/Weather Forecasting using SRIMAX Model/weather prediction.py", "max_stars_repo_name": "shreejitverma/Data-Scientist", "max_stars_repo_head_hexsha": "03c06936e957f93182bb18362b01383e5775ffb1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-12T04:53:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T12:39:21.000Z", "max_issues_repo_path": "Time Series Analysis/Weather Forecasting using SRIMAX Model/weather prediction.py", "max_issues_repo_name": "shreejitverma/Data-Scientist", "max_issues_repo_head_hexsha": "03c06936e957f93182bb18362b01383e5775ffb1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Time Series Analysis/Weather Forecasting using SRIMAX Model/weather prediction.py", "max_forks_repo_name": "shreejitverma/Data-Scientist", "max_forks_repo_head_hexsha": "03c06936e957f93182bb18362b01383e5775ffb1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-12T04:52:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T12:45:32.000Z", "avg_line_length": 24.9626666667, "max_line_length": 372, "alphanum_fraction": 0.7051597052, "include": true, "reason": "import numpy,import statsmodels,from statsmodels", "num_tokens": 2298}
|
@testset "Robots: biped" begin
q0 = [0.0; 0.0; 0.5 * π * ones(7)]
v0 = zeros(9)
@test norm(lagrangian(biped, q0, v0)) < 1.0e-8
# visualize
vis = RoboDojo.Visualizer();
@test visualize!(vis, biped, [q0], Δt=0.1);
end
|
{"hexsha": "def11798a6d8bf8bf45dba96832e4c872e2f727f", "size": 241, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/robots/biped.jl", "max_stars_repo_name": "mcx/RoboDojo.jl", "max_stars_repo_head_hexsha": "b31fa17ee84285f45b76de78d9e660a83f5ddc9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-11-14T00:59:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T08:17:58.000Z", "max_issues_repo_path": "test/robots/biped.jl", "max_issues_repo_name": "alpv95/RobotDojo.jl", "max_issues_repo_head_hexsha": "331c7240bb2ca888d3e36180be86bbd405a989ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-13T02:28:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-11T21:08:50.000Z", "max_forks_repo_path": "test/robots/biped.jl", "max_forks_repo_name": "alpv95/RobotDojo.jl", "max_forks_repo_head_hexsha": "331c7240bb2ca888d3e36180be86bbd405a989ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T07:24:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T07:24:01.000Z", "avg_line_length": 26.7777777778, "max_line_length": 50, "alphanum_fraction": 0.5643153527, "num_tokens": 106}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Twenty Seconds Resume/CV
% LaTeX Template
% Version 1.0 (14/7/16)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% Original author:
% Carmine Spagnuolo (cspagnuolo@unisa.it) with major modifications by
% Vel (vel@LaTeXTemplates.com)
%
% License:
% The MIT License (see included LICENSE file)
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%----------------------------------------------------------------------------------------
% PACKAGES AND OTHER DOCUMENT CONFIGURATIONS
%----------------------------------------------------------------------------------------
\documentclass[letterpaper]{twentysecondcv} % a4paper for A4
% Command for printing skill progress bars
\newcommand\skills{
~
\smartdiagram[bubble diagram]{
\textbf{Full Stack}\\\textbf{Dev},
\textbf{Relational/}\\\textbf{Document}\\\textbf{Databases},
\textbf{~~~~OOP~~~~~},
\textbf{Mobile}\\\textbf{Dev},
\textbf{Machine}\\\textbf{Learning},
\textbf{Test}\\\textbf{~~Automation~~},
\textbf{Statistical}\\\textbf{Analysis}
}
}
\interests{{Dev Ops/4.5},{Test Automation/5},{Data Science/6},{Software Engineering/6}}
%----------------------------------------------------------------------------------------
% PERSONAL INFORMATION
%----------------------------------------------------------------------------------------
% If you don't need one or more of the below, just remove the content leaving the command, e.g. \cvnumberphone{}
\cvname{Harsh Gadgil} % Your name
\cvjobtitle{ Data Scientist, \\ Full Stack Developer} % Job title/career
\cvlinkedin{https://linkedin.com/in/hsgadgil}
\cvnumberphone{+1 205 282 9201} % Phone number
\cvsite{http://www.hgadgil.com/} % Personal website
\cvmail{harsh.gadgil@gmail.com} % Email address
%----------------------------------------------------------------------------------------
\begin{document}
\makeprofile % Print the sidebar
%----------------------------------------------------------------------------------------
% EDUCATION
%----------------------------------------------------------------------------------------
\section{Education}
\begin{twenty} % Environment for a list with descriptions
\twentyitem
{Expected \\ Dec 2016}
{MSc., Computer Science}
{\href{http://www.uoguelph.ca/}{University of Guelph}}
{Guelph, Ontario, Canada}
{Current GPA: 3.7/4, 83\%}
\twentyitem
{2009 - 2013}
{BEng., Computer Engineering}
{\href{http://www.unipune.ac.in/}{University of Pune}}
{Pune, Maharashtra, India}
{GPA: 4.0, First Class with Distinction}
%\twentyitem{<dates>}{<title>}{<organization>}{<location>}{<description>}
\end{twenty}
\section{Research}
\begin{twenty}
\twentyitem
{2015 - 2016}
{Graduate Research Assistant}
{\href{http://www.uoguelph.ca/}{University of Guelph}}
{}
{
{\begin{itemize}
\item Performed classification using Support Vector Machine and multivariate regression analyses using Logistic Regression
\item Proposed a novel method to reliably integrate large, noisy datasets. Evaluated the method on datasets containing six million records
\item A paper describing the method and preliminary results is accepted for publication in the proceedings of the IEEE International Conference of Data Mining 2016
\end{itemize}}
}
\end{twenty}
%----------------------------------------------------------------------------------------
% EXPERIENCE
%----------------------------------------------------------------------------------------
\section{Experience}
\begin{twenty} % Environment for a list with descriptions
\twentyitem
{Sep 2015 - \\May 2016}
{Co-Founder \& Full Stack Developer}
{\href{http://www.localxchange.ca/}{LocalXChange Inc.}}
{}
{
{\begin{itemize}
\item In a team of two, won \$8,000 in funding from The Hub incubator (CBaSE, University of Guelph) to kick start a startup venture
\item In a team of three, built a prototype hyperlocal content platform. The goal was to deliver hyperlocal news, events and other content from community organizations to local users
\item Built hybrid mobile and web apps using Node.js, Ionic, AngularJS and MongoDB. Performed automated testing using Selenium.
\item Met with city officials, including the Mayor, and university executives to marketing \& business strategies for the platform
\end{itemize}}
}
\twentyitem
{Sept 2015 - \\ Present}
{Graduate Teaching Assistant}
{\href{http://www.uoguelph.ca}{University of Guelph}}
{}
{
{\begin{itemize}
\item TA for CIS*2430 (OOP), CIS*4150 (Software Reliability \& Testing) and CIS*3530 (Database Systems \& Concepts) courses
\end{itemize}}
}
\twentyitem
{Dec 2013 - \\ Apr 2015}
{Test Automation Engineer}
{\href{http://www.synechron.com/}{Synechron}}
{}
{
\begin{itemize}
\item Developed a \textit{Keyword Driven} and \textit{Behavior Driven} test automation framework for \href{https://www.microsoft.com/en-ca/dynamics/crm.aspx}{Microsoft Dynamics CRM}. Wrote an efficient recursive function to search within multi-level nested frames, reducing development time by several weeks. Won SPOT award {\includegraphics[scale=0.05]{img/trophy.png}}
\textit{My work opened up a new position in the organization, enabling it to earn additional revenue}
\item Involved in development of a Coded UI test automation framework for \href{https://www.microsoft.com/en-ca/dynamics/erp-ax-overview.aspx}{Microsoft Dynamics AX} in C\#
\item Involved in manual testing for \href{http://www.fico.com/en/products/fico-blaze-advisor-decision-rules-management-system}{FICO Blaze Advisor}. Participated in standup meetings and Three Amigos sessions with onshore/offshore developers and BAs. Responsible for sending daily status reports and testing user stories. Used JIRA and Quality Center for defect reporting, and Quick Test Professional for executing automated test cases
\end{itemize}
}
%\twentyitem{<dates>}{<title>}{<location>}{<description>}
\end{twenty}
\end{document}
|
{"hexsha": "282a49fcbf7d94a35b76159512de83de5af722d9", "size": 6382, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "template.tex", "max_stars_repo_name": "latexstudio/Data-Engineer-Resume-LaTeX", "max_stars_repo_head_hexsha": "0ca7e94b54375276550a10da8041eea2d26786ed", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "template.tex", "max_issues_repo_name": "latexstudio/Data-Engineer-Resume-LaTeX", "max_issues_repo_head_hexsha": "0ca7e94b54375276550a10da8041eea2d26786ed", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "template.tex", "max_forks_repo_name": "latexstudio/Data-Engineer-Resume-LaTeX", "max_forks_repo_head_hexsha": "0ca7e94b54375276550a10da8041eea2d26786ed", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-15T18:38:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T18:38:53.000Z", "avg_line_length": 42.2649006623, "max_line_length": 442, "alphanum_fraction": 0.59276089, "num_tokens": 1473}
|
library(RMySQL)
lin_sem_distance <- function (data) {
options(warn = -1)
con <- dbConnect(MySQL(),
user="user", password="password",
dbname="snomed_20160731", host="127.0.0.1")
codes <- sort(na.omit(unique(as.vector(data))))
n_codes <- length(codes)
weight <- matrix(nrow=n_codes, ncol=n_codes)
for(i in 1:n_codes) {
weight[i,i] <- 1
if(i > 1) {
for(j in 1:(i-1)) {
descIrs <- dbSendQuery(con, paste("SELECT descendants FROM concepts WHERE Id = ", codes[i]))
descI <- as.numeric(dbFetch(descIrs))
dbClearResult(descIrs)
descJrs <- dbSendQuery(con, paste("SELECT descendants FROM concepts WHERE Id = ", codes[j]))
descJ <- as.numeric(dbFetch(descJrs))
dbClearResult(descJrs)
lcsRs <- dbSendQuery(con, paste("SELECT t1.SupertypeId, c.descendants FROM transitiveclosure t1 join transitiveclosure t2 on t1.SupertypeId = t2.SupertypeId JOIN concepts c ON t1.SupertypeId = c.Id WHERE t1.SubtypeId = ",
codes[i], " AND t2.SubtypeId = ", codes[j], " ORDER BY (t1.PathLength + t2.PathLength) ASC LIMIT 1"))
lcs <- dbFetch(lcsRs)
descLcs <- as.numeric(lcs[2])
dbClearResult(lcsRs)
lin <- 2 * -log(descLcs / 321901 ) / (-log(descI / 321901) + -log(descJ / 321901))
#print(lin)
weight[i,j] <- lin
weight[j,i] <- lin
}
}
}
dbDisconnect(con)
return(weight)
}
|
{"hexsha": "26745d90e8e9072c2b3753f1a941045f0a1cb4fe", "size": 1515, "ext": "r", "lang": "R", "max_stars_repo_path": "sem_distance.r", "max_stars_repo_name": "danka74/SnomedAgreement", "max_stars_repo_head_hexsha": "4eb3d0764846c8bd25f178915f671603978058d7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sem_distance.r", "max_issues_repo_name": "danka74/SnomedAgreement", "max_issues_repo_head_hexsha": "4eb3d0764846c8bd25f178915f671603978058d7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sem_distance.r", "max_forks_repo_name": "danka74/SnomedAgreement", "max_forks_repo_head_hexsha": "4eb3d0764846c8bd25f178915f671603978058d7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4318181818, "max_line_length": 229, "alphanum_fraction": 0.5795379538, "num_tokens": 433}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 denglixi edenglixi@xgpd2>
#
# Distributed under terms of the MIT license.
"""
"""
import numpy
import os
from xml_process import parse_rec
def create_dishes(canteen):
"""create_dishes"""
# each dish may have more than 1 image (mostly is 2).
# construct dish-> [image1(anno) , image2(anno)]
# dishes = [dish1, ..., dishn]
root_path = '../data/Food/Food_{}/'.format(canteen)
anno_path = os.path.join(root_path, 'Annotations')
dishes = []
dish = []
# sort is important since listdir func do not return a ordered results.
all_xml_fs = sorted(os.listdir(anno_path))
for x_f in all_xml_fs:
x_f_path = os.path.join(anno_path, x_f)
x_f_name = x_f.split('.')[0]
# get cls
objs = parse_rec(x_f_path)
cls_of_image = []
for obj in objs:
cls_of_image.append(obj['name'])
cls_of_image = sorted(cls_of_image)
# process dish & x_f_name
if not dish: # first image of dish
dish = [x_f_name]
cls_of_dish = cls_of_image
else:
# the same dish. TODO some samples are wrong in the following condition
if cls_of_image == cls_of_dish or set(cls_of_image) > set(cls_of_dish) or set(cls_of_dish) > set(cls_of_image):
dish.append(x_f_name)
else:
# new dish
dishes.append(dish)
dish = [x_f_name]
cls_of_dish = sorted(cls_of_image)
return dishes
def clean_validation(val_set, dishes):
root_path = '../data/Food/Food_All/'
imageset_path = os.path.join(root_path, 'ImageSets')
valmt_sets_path = os.path.join(imageset_path, 'valmt10.txt')
with open(valmt_sets_path, 'r') as f:
valmt_names = [x.strip('\n') for x in f.readlines()]
print(len(dishes))
left_val = []
for valmt_name in valmt_names:
for dish in dishes:
if valmt_name in dish:
# if left_of_dish not in valmt_names
all_in_val = True
for img in dish:
if img not in valmt_name:
all_in_val = False
break
if all_in_val:
left_val.append(valmt_name)
break
with open("all_in_val.txt", 'w') as f:
f.writelines([x+"\n" for x in left_val])
dishes = create_dishes("Arts")
import pdb
pdb.set_trace()
|
{"hexsha": "bf6c2224e63c9953cc6804e19ea902a1893da37b", "size": 2537, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/datasets/clean_val.py", "max_stars_repo_name": "denglixi/faster-rcnn.pytorch", "max_stars_repo_head_hexsha": "12158fa2ec998ba3733a4696b7a4e08a35c157e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/datasets/clean_val.py", "max_issues_repo_name": "denglixi/faster-rcnn.pytorch", "max_issues_repo_head_hexsha": "12158fa2ec998ba3733a4696b7a4e08a35c157e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/datasets/clean_val.py", "max_forks_repo_name": "denglixi/faster-rcnn.pytorch", "max_forks_repo_head_hexsha": "12158fa2ec998ba3733a4696b7a4e08a35c157e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5056179775, "max_line_length": 123, "alphanum_fraction": 0.5798186835, "include": true, "reason": "import numpy", "num_tokens": 643}
|
from pathlib import Path
from typing import Union, Dict, List
import numpy as np
from .eeg import EEG
from .transforms import HighPass, RemoveBeginning, RemoveLineNoise, Standardize
def ingest_session(
data_path: Path, output_dir: Path
) -> Dict[str, Union[int, List[int]]]:
eeg = EEG.from_hdf5(data_path)
transforms = (HighPass(1.0), RemoveBeginning(), RemoveLineNoise(), Standardize())
for transform in transforms:
eeg = transform(eeg)
statistics = {
'dataset_size': 0,
'action_count': [0] * 5,
'preparation_count': [0] * 5,
'action_length_q': None,
'preparation_length_q': None,
}
action_lengths = []
preparation_lengths = []
for i, episode in enumerate(eeg.split_session()):
action_label = np.max(np.unique(episode.action_state))
preparation_label = np.max(np.unique(episode.preparation_state))
diff = episode.preparation_state[1:] - episode.preparation_state[:-1]
candidate_lengths = (diff * (diff < 0)).nonzero()[0] + 1
assert len(candidate_lengths) == 1
preparation_length = candidate_lengths[0]
assert episode.preparation_state[preparation_length] == 0
assert episode.preparation_state[preparation_length - 1] > 0
np.savez(
output_dir / f'{data_path.stem}_{i:03d}.npz',
data=episode.data,
action_label=action_label,
preparation_label=preparation_label,
preparation_length=preparation_length,
)
action_lengths.append(episode.data.shape[1])
preparation_lengths.append(preparation_length)
statistics['dataset_size'] += 1
statistics['action_count'][action_label] += 1
statistics['preparation_count'][max(0, preparation_label - 4)] += 1
statistics['action_length_q'] = np.quantile(action_lengths, (0, 0.25, 0.5, 0.75, 1))
statistics['action_length_q'] = (
statistics['action_length_q'].astype(np.int64).tolist()
)
statistics['preparation_length_q'] = np.quantile(
preparation_lengths, (0, 0.25, 0.5, 0.75, 1)
)
statistics['preparation_length_q'] = (
statistics['preparation_length_q'].astype(np.int64).tolist()
)
return statistics
|
{"hexsha": "a294aa3fce6d2cc0bb3d7713b0da7200fc507b43", "size": 2262, "ext": "py", "lang": "Python", "max_stars_repo_path": "eegdrive/ingestion/ingest.py", "max_stars_repo_name": "lucagrementieri/eegdrive", "max_stars_repo_head_hexsha": "65b122246e2a75c0c7c80db3e544f6a6741ceb53", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "eegdrive/ingestion/ingest.py", "max_issues_repo_name": "lucagrementieri/eegdrive", "max_issues_repo_head_hexsha": "65b122246e2a75c0c7c80db3e544f6a6741ceb53", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eegdrive/ingestion/ingest.py", "max_forks_repo_name": "lucagrementieri/eegdrive", "max_forks_repo_head_hexsha": "65b122246e2a75c0c7c80db3e544f6a6741ceb53", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0, "max_line_length": 88, "alphanum_fraction": 0.657382847, "include": true, "reason": "import numpy", "num_tokens": 562}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 14 18:16:26 2020
@author: arslan
"""
from pyit2fls import IT2FS_Gaussian_UncertMean, join, IT2FS_plot, \
max_s_norm, probabilistic_sum_s_norm, bounded_sum_s_norm, \
drastic_s_norm, nilpotent_maximum_s_norm, einstein_sum_s_norm
from numpy import linspace
domain = linspace(-1., 7., 8001)
A1 = IT2FS_Gaussian_UncertMean(domain, [0., 0.2, 0.25, 1.])
A2 = IT2FS_Gaussian_UncertMean(domain, [1., 0.2, 0.25, 1.])
A3 = IT2FS_Gaussian_UncertMean(domain, [2., 0.2, 0.25, 1.])
A4 = IT2FS_Gaussian_UncertMean(domain, [3., 0.2, 0.25, 1.])
A5 = IT2FS_Gaussian_UncertMean(domain, [4., 0.2, 0.25, 1.])
A6 = IT2FS_Gaussian_UncertMean(domain, [5., 0.2, 0.25, 1.])
A7 = IT2FS_Gaussian_UncertMean(domain, [6., 0.2, 0.25, 1.])
IT2FS_plot(A1, A2, A3, A4, A5, A6, A7, title="Sets",
legends=["Set 1", "Set 2", "Set 3", "Set 4",
"Set 5", "Set 6", "Set 7"])
M1 = join(domain, A1, A2, max_s_norm)
M2 = join(domain, A2, A3, probabilistic_sum_s_norm)
M3 = join(domain, A3, A4, bounded_sum_s_norm)
M4 = join(domain, A4, A5, drastic_s_norm)
M5 = join(domain, A5, A6, nilpotent_maximum_s_norm)
M6 = join(domain, A6, A7, einstein_sum_s_norm)
IT2FS_plot(M1, M2, M3, M4, M5, M6,
legends=["Maximum (1, 2)", "Probabilistic Sum (2, 3)",
"Bounded Sum (3, 4)", "Drastic (4, 5)",
"Nilpotent Maximum (5, 6)", "Einstein Sum (6, 7)"])
|
{"hexsha": "7656caa9f56b779574285fec981fef6ed383ae94", "size": 1472, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/ex_13.py", "max_stars_repo_name": "Haghrah/PyIT2FLS", "max_stars_repo_head_hexsha": "ca2763032a4f441c3c4456570c18faa68cfee3e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2019-01-06T19:33:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T01:34:54.000Z", "max_issues_repo_path": "examples/ex_13.py", "max_issues_repo_name": "sirojkhuja/PyIT2FLS", "max_issues_repo_head_hexsha": "ca2763032a4f441c3c4456570c18faa68cfee3e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-04-10T10:43:06.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-18T19:06:31.000Z", "max_forks_repo_path": "examples/ex_13.py", "max_forks_repo_name": "sirojkhuja/PyIT2FLS", "max_forks_repo_head_hexsha": "ca2763032a4f441c3c4456570c18faa68cfee3e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-10-13T05:09:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T04:36:23.000Z", "avg_line_length": 31.3191489362, "max_line_length": 71, "alphanum_fraction": 0.6351902174, "include": true, "reason": "from numpy", "num_tokens": 597}
|
import math, torch
import numpy as np
from numpy.random import normal as normrnd
from scipy.stats import multivariate_normal, norm
from scipy.linalg import sqrtm, expm
from pdb import set_trace as bp
from include.DNN import DNN
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from include.dataStructures.particle import Particle
class localize:
def __init__(self, numP, su, sz, distMap, mat, wayPts, R, dim, useClas, hardClas, modelpath="./models/best.pth"):
self.np = numP
self.sz = sz
self.dists = distMap
self.dim = dim
self.wayPts = wayPts
self.pts = self.convert(wayPts)
self.nAP = mat.numAPs
self.tx = mat.Tx
self.R = R
self.start = self.wayPts[0]
self.su = su
self.path = []
self.APLocs = []
self.IDs = []
self.use = useClas
self.hard = hardClas
self.modelpath = modelpath
self.model = None
self.confidence = [0, 0, 0, 0] # true positive, false positive, true negative, false negative
if self.dim == 2: self.su = su[0:2]
if self.use: self.load_model()
def print(self, samples):
for i in range(self.np):
print("pose: ", samples[i].pose, " | weight: ", samples[i].w)
def distance(self, x, y):
if len(x)==3 and len(y)==3:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 + (x[2]-y[2])**2 )
else:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 )
def MSE(self):
mse = 0
for i in range(len(self.pts)):
mse += self.distance(self.wayPts[i], self.path[i])
mse = mse/len(self.pts)
return mse
def getCDF(self):
cdf = [0 for x in range(len(self.pts))]
for i in range(len(self.pts)):
cdf[i] = self.distance(self.wayPts[i], self.path[i])
return cdf
def distrib(self):
start = self.wayPts[0] ; samples = []
if self.dim == 2: start = [start[0], start[1]]
if self.dim == 3: start = start
for _ in range(self.np):
samples.append(Particle(start, 1/self.np))
return samples
def convert(self, pts):
n = len(pts)
rtPts = []
for i in range(1, n):
dx = pts[i][0] - pts[i-1][0]
dy = pts[i][1] - pts[i-1][1]
if self.dim==2: rtPts.append([dx, dy])
if self.dim==3: dz = pts[i][2] - pts[i-1][2] ; rtPts.append([dx, dy, dz])
return rtPts
'''
load pytorch model and save dict
'''
def load_model(self):
model = DNN()
path = self.modelpath
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
self.model = model
self.model.eval()
'''
classify into LOS/NLOS
'''
def classify(self, rssi, euc):
inp = torch.tensor([rssi, euc])
out = self.model(inp.float())
pred = 1 if (out[1]>out[0]) else 0
return pred
'''
weighting using the normpdf subroutine
'''
def getWeight(self, dz):
norpdf = 1
for i in range(len(dz)):
if dz[i]!=0:
norpdf *= norm.pdf(dz[i], 0, self.sz[i])
return norpdf
'''
weighting using the mvnpdf subroutine
'''
def getMultiWeight(self, dz):
idx = [i for i, e in enumerate(dz) if e != 0]
val = [] ; sig = []
if len(idx)==0:
return 1/self.np
for i in idx:
val.append(dz[i])
sig.append(self.sz[i])
mvn = multivariate_normal([0]*len(idx), np.diag(sig))
return mvn.pdf(val)
'''
return is not required as python works on
pass-by-reference and there is no way of
emulating pass-by-value
'''
def motion_model(self, samples, point, su):
for i in range(self.np):
dx = point[0] - normrnd(0, su[0])
dy = point[1] - normrnd(0, su[1])
if self.dim == 2: pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy]
if self.dim == 3: dz = point[2] - normrnd(0, su[2])
if self.dim == 3: pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy, samples[i].pose[2] + dz]
samples[i].pose = pose
'''
measurement model for the particle filter
label for dMap = 1 : NLOS , 0 : LOS
'''
def measure_model(self, samples, z):
totalWt = 0 ; nAP = len(z)
for i in range(self.np):
dz = [0 for x in range(nAP)]
for j in range(nAP):
tx = self.tx[j] ; pos = samples[i].pose
d = self.distance(tx, pos)
if d <= self.R:
if self.use:
if self.hard:
label = self.classify(z[j].rssi, d)
# confidence matrix calculation
if label==0 and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif label==0 and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif label==1 and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif label==1 and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
if label==0:
dz[j] = abs(z[j].rssi-d)
else:
inp = torch.tensor([z[j].rssi, d])
out = self.model(inp.float()).detach().numpy()
dz[j] = out[0]*abs(z[j].rssi-d) + out[1]*abs(z[j].rssi - normrnd(self.R,3))
# confidence matrix calculation
if out[0]>out[1] and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif out[0]>out[1] and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false positive
elif out[0]<out[1] and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif out[0]<out[1] and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false negative
else:
dz[j] = abs(z[j].rssi-d)
wt = self.getWeight(dz)
samples[i].w *= wt
totalWt += wt
if totalWt!=0:
for i in range(self.np):
samples[i].w = samples[i].w / totalWt
else:
for i in range(self.np):
samples[i].w = 1/self.np
'''
measurement model for fast slam v1
label for dMap = 1 : NLOS , 0 : LOS
'''
def fast_measure_model(self, samples, z):
if self.dim == 2: Qt = np.diag([10,10])
if self.dim == 3: Qt = np.diag([10,10,10])
Qt = Qt.tolist() ; nAP = len(z) ; totWt = 0
for i in range(self.np):
for j in range(nAP):
tx = np.array(self.tx[j]) ; pos = np.array(samples[i].pose)
d = self.distance(tx, pos)
if d <= self.R:
# initialize particle map
if j not in samples[i].mapID:
samples[i].mapMu.append(tx)
samples[i].mapSigma.append(Qt)
samples[i].mapID.append(j)
samples[i].hashMap[j] = len(samples[i].mapID) - 1
samples[i].w = 1/self.np
# update particle map
else:
ID = samples[i].hashMap[j]
# prediction step
muHat = samples[i].mapMu[ID]
sigHat = np.array(samples[i].mapSigma[ID])
# update step
dHat = self.distance(pos, muHat)
# use classifier or not
if self.use:
if self.hard:
label = self.classify(z[j].rssi, dHat)
# confidence matrix calculation
if label==0 and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif label==0 and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif label==1 and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif label==1 and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
if label==0:
innov = abs(z[j].rssi-dHat)
else:
continue
else:
inp = torch.tensor([z[j].rssi, dHat])
out = self.model(inp.float()).detach().numpy()
innov = out[0]*abs(z[j].rssi - dHat) + out[1]*abs(z[j].rssi - normrnd(self.R,3))
# confidence matrix calculation
if out[0]>out[1] and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif out[0]>out[1] and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif out[0]<out[1] and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif out[0]<out[1] and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
else:
innov = abs(z[j].rssi - dHat)
dx = muHat[0] - pos[0] ; dy = muHat[1] - pos[1]
den = math.sqrt(dx**2 + dy**2)
H = np.array([dx/den, dy/den])
if self.dim==3:
dz = muHat[2] - pos[2]
den = math.sqrt(dx**2 + dy**2 + dz**2)
H = np.array([dx/den, dy/den, dz/den])
try:
Q = np.matmul(np.matmul(H, sigHat), H) + self.sz[j]
except:
bp()
# Kalman Gain
K = np.matmul(sigHat, H)/Q
# update pose/ covar
mu = muHat + innov*K
K = K.reshape((self.dim,1))
sig = (np.identity(self.dim) - K*H)*sigHat
samples[i].mapMu[ID] = mu.reshape((self.dim,))
samples[i].mapSigma[ID] = sig.tolist()
samples[i].w = max(samples[i].w, math.sqrt(2*math.pi*Q)*math.exp(-0.5*(innov**2)/Q))
totWt += samples[i].w
# normalize the weights
if totWt==0:
for i in range(self.np):
samples[i].w = 1/self.np
else:
for i in range(self.np):
samples[i].w = samples[i].w/totWt
'''
resampling algorithm applicable to both
particle filter and fast slam because of
common structure of particle
'''
def resample(self, samples):
idx = [0]*self.np ; Q = [0]*self.np ; Q[0] = samples[0].w
for i in range(1, self.np):
Q[i] = samples[i].w + Q[i-1]
t = np.random.rand(self.np+1, 1)
T = np.sort(t, axis=0)
T[self.np] = 1 ; i,j = 0,0
while i<self.np and j<self.np:
if T[i] < Q[j]:
idx[i] = j
i += 1
else:
j += 1
if len(set(idx))>0.2*self.np:
for i in range(self.np):
samples[i].pose = samples[idx[i]].pose
samples[i].w = 1/self.np
samples[i].mapMu = samples[idx[i]].mapMu
samples[i].mapID = samples[idx[i]].mapID
samples[i].mapSigma = samples[idx[i]].mapSigma
samples[i].hashMap = samples[idx[i]].hashMap
'''
Calculates the effective number of particles
in the sampled distribution.
'''
def neff(self, samples):
wghts = [0]*self.np ; totWt = 0
for i in range(self.np):
wghts[i] = samples[i].w
totWt += samples[i].w
den = 0
for i in range(self.np):
wghts[i] = (wghts[i]/totWt)**2
den += wghts[i]
return 1/den
'''
Calculates weighted mean and variance of the
sample distribution
'''
def meanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].pose[0]
mu[1] += samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/self.np, mu[1]/self.np]
if self.dim==3: mu = [mu[0]/self.np, mu[1]/self.np, mu[2]/self.np]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/self.np
return mu, sig
'''
Calculates weighted mean and variance of the
sample distribution
'''
def weightedMeanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].w*samples[i].pose[0]
mu[1] += samples[i].w*samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].w*samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/totWt, mu[1]/totWt]
if self.dim==3: mu = [mu[0]/totWt, mu[1]/totWt, mu[2]/totWt]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += samples[i].w*np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/totWt
return mu, sig
'''
Get the maximum weighted particle and use it
to calculate the IDs of the APs discovered &
the locations of the discovered APs
'''
def getAPLocs(self, samples):
maxWeight = -9999999999 ; idx = 0
for i in range(self.np):
if samples[i].w > maxWeight:
maxWeight = samples[i].w
idx = i
self.APLocs = samples[idx].mapMu
self.IDs = samples[idx].mapID
'''
Plot the particle poses for each particle. Can
only be used for debugging as of now as animation
support is yet to be added
'''
def plot(self, samples):
x = [] ; y = []
for i in range(self.np):
x.append(samples[i].pose[0])
y.append(samples[i].pose[1])
plt.plot(x,y,'c.')
mXY,_ = self.meanVar(samples)
wmXY,_ = self.weightedMeanVar(samples)
plt.plot(mXY[0],mXY[1],'ro')
plt.plot(wmXY[0],wmXY[1],'bo')
plt.xlim([-100,300])
plt.ylim([-100,300])
plt.show()
'''
The main Particle filter class
'''
def particleFilter(self):
self.path.append(self.wayPts[0])
samples = self.distrib()
print("Running Particle Filter ..")
for i in range(len(self.pts)):
# provide action update
self.motion_model(samples, self.pts[i], self.su)
# provide measurement update
self.measure_model(samples, self.dists[i])
# resample only when number of effective particle drops
if self.neff(samples) <= 1/3*self.np:
self.resample(samples)
mXY, _ = self.weightedMeanVar(samples)
self.path.append(mXY)
print("Particle Filter has finished running ..")
'''
The main Fast SLAM v1 class
'''
def FastSLAM(self):
self.path.append(self.wayPts[0])
samples = self.distrib()
print("Running Fast SLAM ..")
for i in range(len(self.pts)):
# provide action update
self.motion_model(samples, self.pts[i], self.su)
# provide measurement update
self.fast_measure_model(samples, self.dists[i])
# resample only when number of effective particle drops
if self.neff(samples) <= 1/3*self.np:
self.resample(samples)
mXY, _ = self.weightedMeanVar(samples)
self.path.append(mXY)
self.getAPLocs(samples)
print("FastSLAM has finished running ..")
'''
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
'''
'''
Localizer for Experimental Setup:
1. Contains only FastSlam
2. Measurement Model updated to read data from experiments
'''
class localizeExp:
def __init__(self, numP, su, sz, map, useClas, hardClas, modelpath="./models/best.pth"):
self.np = numP
self.sz = sz
self.dim = map.dim
self.wayPts = map.wayPts
self.pts = self.convert(self.wayPts)
self.dim = map.dim
self.TXName = map.TXName
self.numPts = map.numPts
self.numAPs = map.numAPs
self.maxZ = map.maxZ
self.dists = map.distMap
self.name2MAC = map.name2MAC
self.name2Pos = map.name2Pos
self.MAC2Name = map.MAC2Name
self.start = self.wayPts[0][:2]
self.su = su
self.path = []
self.APLocs = []
self.IDs = []
self.use = useClas
self.hard = hardClas
self.modelpath = modelpath
self.model = None
self.confidence = [0, 0, 0, 0] # true positive, false positive, true negative, false negative
if self.dim == 2: self.su = su[0:2]
if self.use: self.load_model()
def print(self, samples):
for i in range(self.np):
print("pose: ", samples[i].pose, " | weight: ", samples[i].w)
def distance(self, x, y):
if len(x)==3 and len(y)==3:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 + (x[2]-y[2])**2 )
else:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 )
def MSE(self):
mse = 0
for i in range(len(self.pts)):
mse += self.distance(self.wayPts[i], self.path[i])
mse = mse/len(self.pts)
return mse
def getCDF(self):
cdf = [0 for x in range(len(self.pts))]
for i in range(len(self.pts)):
cdf[i] = self.distance(self.wayPts[i], self.path[i])
return cdf
def distrib(self):
start = self.wayPts[0] ; samples = []
if self.dim == 2: start = [start[0], start[1]]
if self.dim == 3: start = start
for _ in range(self.np):
samples.append(Particle(start, 1/self.np))
return samples
def convert(self, pts):
n = len(pts)
rtPts = []
for i in range(1, n):
dx = pts[i][0] - pts[i-1][0]
dy = pts[i][1] - pts[i-1][1]
if self.dim==2: rtPts.append([dx, dy])
if self.dim==3: dz = pts[i][2] - pts[i-1][2] ; rtPts.append([dx, dy, dz])
return rtPts
'''
load pytorch model and save dict
'''
def load_model(self):
model = DNN()
path = self.modelpath
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
self.model = model
self.model.eval()
'''
classify into LOS/NLOS
'''
def classify(self, rssi, euc):
inp = torch.tensor([rssi, euc])
out = self.model(inp.float())
pred = 1 if (out[1]>out[0]) else 0
return pred
'''
weighting using the normpdf subroutine
'''
def getWeight(self, dz):
norpdf = 1
for i in range(len(dz)):
if dz[i]!=0:
norpdf *= norm.pdf(dz[i], 0, self.sz[i])
return norpdf
def rssi2Dist(self, rssi):
'''
https://stackoverflow.com/questions/11217674/how-to-calculate-distance-from-wifi-router-using-signal-strength
http://pylayers.github.io/pylayers/notebook/2-AP/CoverageMetis.html
'''
if abs(rssi) > 60: exp = (abs(rssi) - 32.44)/20
else : exp = (abs(rssi) - 12.55)/20
val = (10**exp) / 60
return val
'''
weighting using the mvnpdf subroutine
'''
def getMultiWeight(self, dz):
idx = [i for i, e in enumerate(dz) if e != 0]
val = [] ; sig = []
if len(idx)==0:
return 1/self.np
for i in idx:
val.append(dz[i])
sig.append(self.sz[i])
mvn = multivariate_normal([0]*len(idx), np.diag(sig))
return mvn.pdf(val)
'''
return is not required as python works on
pass-by-reference and there is no way of
emulating pass-by-value
'''
def motion_model(self, samples, point, su):
for i in range(self.np):
dx = point[0] - normrnd(0, su[0])
dy = point[1] - normrnd(0, su[1])
pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy]
samples[i].pose = pose
'''
measurement model for fast slam v1
label for dMap = 1 : NLOS , 0 : LOS
'''
def fast_measure_model(self, samples, wpID):
Qt = np.diag([5,5])
Qt = Qt.tolist() ; totWt = 0
print("Iteration: " , wpID, end='\r')
for i in range(self.np):
for j in range(len(self.name2Pos)):
name = self.TXName[j]
tx = np.array(self.name2Pos[name])
pos = np.array(samples[i].pose)
# initialize particle map
if name not in samples[i].mapID:
samples[i].mapMu.append(tx[:2])
samples[i].mapSigma.append(Qt)
samples[i].mapID.append(name)
samples[i].hashMap[name] = len(samples[i].mapID) - 1
samples[i].w = 1/self.np
# update particle map
else:
ID = samples[i].hashMap[name]
# prediction step
muHat = samples[i].mapMu[ID]
sigHat = np.array(samples[i].mapSigma[ID])
# update step
dHat = self.distance(pos, muHat)
rssiDist = self.dists[wpID][j].rssi
# use classifier or not
if self.use:
if self.hard:
label = self.classify(rssiDist, dHat)
if label==0:
innov = abs(rssiDist-dHat)
else:
continue
else:
inp = torch.tensor([rssiDist, dHat])
out = self.model(inp.float()).detach().numpy()
innov = out[0]*abs(rssiDist - dHat) + out[1]*abs(rssiDist - normrnd(15,3))
else:
innov = abs(rssiDist - dHat)
dx = muHat[0] - pos[0] ; dy = muHat[1] - pos[1]
den = math.sqrt(dx**2 + dy**2)
H = np.array([dx/den, dy/den])
try:
Q = np.matmul(np.matmul(H, sigHat), H) + self.sz[j]
except:
bp()
# Kalman Gain
K = np.matmul(sigHat, H)/Q
# update pose/ covar
mu = muHat + innov*K
K = K.reshape((self.dim,1))
sig = (np.identity(self.dim) - K*H)*sigHat
samples[i].mapMu[ID] = mu.reshape((self.dim,))
samples[i].mapSigma[ID] = sig.tolist()
samples[i].w = max(samples[i].w, math.sqrt(2*math.pi*Q)*math.exp(-0.5*(innov**2)/Q))
totWt += samples[i].w
# normalize the weights
if totWt==0:
for i in range(self.np):
samples[i].w = 1/self.np
else:
for i in range(self.np):
samples[i].w = samples[i].w/totWt
'''
resampling algorithm applicable to both
particle filter and fast slam because of
common structure of particle
'''
def resample(self, samples):
idx = [0]*self.np ; Q = [0]*self.np ; Q[0] = samples[0].w
for i in range(1, self.np):
Q[i] = samples[i].w + Q[i-1]
t = np.random.rand(self.np+1, 1)
T = np.sort(t, axis=0)
T[self.np] = 1 ; i,j = 0,0
while i<self.np and j<self.np:
if T[i] < Q[j]:
idx[i] = j
i += 1
else:
j += 1
if len(set(idx))>0.2*self.np:
for i in range(self.np):
samples[i].pose = samples[idx[i]].pose
samples[i].w = 1/self.np
samples[i].mapMu = samples[idx[i]].mapMu
samples[i].mapID = samples[idx[i]].mapID
samples[i].mapSigma = samples[idx[i]].mapSigma
samples[i].hashMap = samples[idx[i]].hashMap
'''
Calculates the effective number of particles
in the sampled distribution.
'''
def neff(self, samples):
wghts = [0]*self.np ; totWt = 0
for i in range(self.np):
wghts[i] = samples[i].w
totWt += samples[i].w
den = 0
for i in range(self.np):
wghts[i] = (wghts[i]/totWt)**2
den += wghts[i]
return 1/den
'''
Calculates weighted mean and variance of the
sample distribution
'''
def meanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].pose[0]
mu[1] += samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/self.np, mu[1]/self.np]
if self.dim==3: mu = [mu[0]/self.np, mu[1]/self.np, mu[2]/self.np]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/self.np
return mu, sig
'''
Calculates weighted mean and variance of the
sample distribution
'''
def weightedMeanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].w*samples[i].pose[0]
mu[1] += samples[i].w*samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].w*samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/totWt, mu[1]/totWt]
if self.dim==3: mu = [mu[0]/totWt, mu[1]/totWt, mu[2]/totWt]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += samples[i].w*np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/totWt
return mu, sig
'''
Get the maximum weighted particle and use it
to calculate the IDs of the APs discovered &
the locations of the discovered APs
'''
def getAPLocs(self, samples):
maxWeight = -9999999999 ; idx = 0
for i in range(self.np):
if samples[i].w > maxWeight:
maxWeight = samples[i].w
idx = i
self.APLocs = samples[idx].mapMu
self.IDs = samples[idx].mapID
'''
Plot the particle poses for each particle. Can
only be used for debugging as of now as animation
support is yet to be added
'''
def plot(self):
print("Displaying Floor Plan.")
wayPts = self.wayPts
path = self.path
TX = self.APLocs
ID = self.IDs
# display the waypoints by RRT
if wayPts!=None:
rows = []; cols = []
for x,y in wayPts:
rows.append(x); cols.append(y)
plt.plot(cols, rows, 'b.-')
# display the actual AP locations
if self.TXName!=None:
rows = []; cols = []
for i in self.TXName:
rows.append(self.name2Pos[i][0]); cols.append(self.name2Pos[i][1])
plt.text(i[1],i[0]," NAME-"+str(i), color='black')
plt.plot(rows, cols, 'rx')
# display the localized path
if path!=None:
rows = []; cols = []
for i in path:
rows.append(i[0]); cols.append(i[1])
plt.plot(cols, rows, 'c.-')
# display the estimated AP locations
if TX!=None and ID!=None:
rows = []; cols = []; ctr = 0
for i in TX:
rows.append(i[0]); cols.append(i[1])
plt.text(i[1],i[0]," NAME "+str(ID[ctr]), color='red')
ctr += 1
plt.plot(cols, rows, 'rx')
plt.gca().invert_yaxis()
plt.show()
'''
The main Fast SLAM v1 class
'''
def FastSLAM(self):
self.path.append(self.wayPts[0][:2])
samples = self.distrib()
print("Running Fast SLAM ..")
for i in range(len(self.pts)):
# provide action update
self.motion_model(samples, self.pts[i], self.su)
# provide measurement update
self.fast_measure_model(samples, i)
# resample only when number of effective particle drops
if self.neff(samples) <= 1/3*self.np:
self.resample(samples)
mXY, _ = self.weightedMeanVar(samples)
self.path.append(mXY)
self.getAPLocs(samples)
print("FastSLAM has finished running ..")
|
{"hexsha": "c28ebb519de9bc9d9ac37cfb68bb3d8715cb4b51", "size": 31290, "ext": "py", "lang": "Python", "max_stars_repo_path": "include/localize.py", "max_stars_repo_name": "sahibdhanjal/DeepLocNet", "max_stars_repo_head_hexsha": "a3a5973a0cb549d0a16f17b96a9c78c200cf0c7e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2019-04-10T17:49:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T23:25:35.000Z", "max_issues_repo_path": "include/localize.py", "max_issues_repo_name": "guhaopython/DeepLocNet", "max_issues_repo_head_hexsha": "a3a5973a0cb549d0a16f17b96a9c78c200cf0c7e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/localize.py", "max_forks_repo_name": "guhaopython/DeepLocNet", "max_forks_repo_head_hexsha": "a3a5973a0cb549d0a16f17b96a9c78c200cf0c7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-04-10T17:49:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T13:53:35.000Z", "avg_line_length": 35.9242250287, "max_line_length": 129, "alphanum_fraction": 0.4782678172, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 8195}
|
import numpy as np
class Solution:
def minMoves2(self, nums: List[int]) -> int:
nums.sort()
length = len(nums)
median = nums[length//2]
left, right = 0, length - 1
# for i in nums:
while left <= right:
mid = left + (right - left) // 2
if nums[mid] < median:
left = mid + 1
elif nums[mid] > median:
right = mid - 1
else:
break
if left > right:
if abs(nums[left] - median) > abs(nums[right] - median):
median = nums[right]
else:
median = nums[left]
else:
median = nums[mid]
sum = 0
for num in nums:
sum += abs(num - median)
return sum
|
{"hexsha": "71b7ef56ab5f6800c93ce1452c4e63bd4f259deb", "size": 804, "ext": "py", "lang": "Python", "max_stars_repo_path": "leetcode/462.py", "max_stars_repo_name": "strawsyz/straw", "max_stars_repo_head_hexsha": "db313c78c2e3c0355cd10c70ac25a15bb5632d41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-06T09:09:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-24T03:59:55.000Z", "max_issues_repo_path": "leetcode/462.py", "max_issues_repo_name": "strawsyz/straw", "max_issues_repo_head_hexsha": "db313c78c2e3c0355cd10c70ac25a15bb5632d41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "leetcode/462.py", "max_forks_repo_name": "strawsyz/straw", "max_forks_repo_head_hexsha": "db313c78c2e3c0355cd10c70ac25a15bb5632d41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.935483871, "max_line_length": 68, "alphanum_fraction": 0.4353233831, "include": true, "reason": "import numpy", "num_tokens": 190}
|
//=============================================================================
//
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
//=============================================================================
#include <remus/worker/detail/MessageRouter.h>
#include <remus/server/PortNumbers.h>
#include <remus/common/SleepFor.h>
#include <remus/proto/Message.h>
#include <remus/proto/Response.h>
#include <remus/worker/detail/JobQueue.h>
#include <remus/proto/zmqHelper.h>
#include <remus/testing/Testing.h>
#include <boost/uuid/uuid.hpp>
using namespace remus::worker::detail;
namespace {
//------------------------------------------------------------------------------
remus::worker::ServerConnection bindToTCPSocket(zmq::socket_t &socket)
{
//try a new port each time we are called this is to help speed up the test
int port_offset = 94;
zmq::socketInfo<zmq::proto::tcp> socketInfo("127.0.0.1",
remus::server::WORKER_PORT + port_offset);
socketInfo = zmq::bindToAddress(socket,socketInfo);
return remus::worker::ServerConnection(socketInfo);
}
//------------------------------------------------------------------------------
void test_worker_terminate_routing_call(MessageRouter& mr,
remus::worker::ServerConnection serverConn,
zmq::socket_t& workerSocket,
JobQueue& jq)
{
mr.start( serverConn, *(serverConn.context()) );
REMUS_ASSERT( (mr.valid()) )
REMUS_ASSERT( (mr.isForwardingToServer()) )
//now send it a terminate message over the worker channel
remus::proto::Message sent = remus::proto::send_Message(remus::common::MeshIOType(),
remus::TERMINATE_WORKER,
&workerSocket);
REMUS_ASSERT( sent.isValid() )
//cheap block while we wait for the router thread to get the message
for(int i=0; i < 10 && jq.size() == 0; ++i)
{
remus::common::SleepForMillisec(250);
}
REMUS_ASSERT( (jq.size() > 0) )
remus::common::SleepForMillisec(250);
REMUS_ASSERT( (jq.size() == 1) )
remus::worker::Job invalid_job = jq.take();
REMUS_ASSERT( (!invalid_job.valid()) )
REMUS_ASSERT( (invalid_job.validityReason() ==
remus::worker::Job::TERMINATE_WORKER) )
//MessageRouter is still forwarding messages to the server
//we could have a worker shutting down and still have
//a message we need to route to the server ( a large result )
REMUS_ASSERT( (mr.valid()) )
REMUS_ASSERT( (mr.isForwardingToServer()) )
}
}
int UnitTestMessageRouterWorkerTermination(int, char *[])
{
zmq::socketInfo<zmq::proto::inproc> worker_channel(remus::testing::UniqueString());
zmq::socketInfo<zmq::proto::inproc> queue_channel(remus::testing::UniqueString());
//bind the serverSocket
boost::shared_ptr<zmq::context_t> context = remus::worker::make_ServerContext();
zmq::socket_t serverSocket(*context, ZMQ_ROUTER);
remus::worker::ServerConnection serverConn = bindToTCPSocket(serverSocket);
//set the context on the server connection to the one we just created
serverConn.context(context);
//we need to bind to the inproc sockets before constructing the MessageRouter
//this is a required implementation detail caused by zmq design, also we have
//to share the same zmq context with the inproc protocol
zmq::socket_t worker_socket(*context, ZMQ_PAIR);
zmq::bindToAddress(worker_socket, worker_channel);
JobQueue jq(*context,queue_channel); //bind the jobqueue to the worker channel
//It should be noted that once you send a terminate call to a JobQueue
//or MessageRouter it can't be started again
MessageRouter mr(worker_channel, queue_channel);
test_worker_terminate_routing_call(mr,serverConn,worker_socket,jq);
return 0;
}
|
{"hexsha": "3e9944e5dec445a88596757d8add3d863ba2c729", "size": 4145, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "remus/worker/detail/testing/UnitTestMessageRouterWorkerTermination.cxx", "max_stars_repo_name": "robertmaynard/Remus", "max_stars_repo_head_hexsha": "090a14c9a4b0e628a86590dcfa7e46ba728e9c04", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2015-08-11T03:42:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-19T11:36:16.000Z", "max_issues_repo_path": "remus/worker/detail/testing/UnitTestMessageRouterWorkerTermination.cxx", "max_issues_repo_name": "robertmaynard/Remus", "max_issues_repo_head_hexsha": "090a14c9a4b0e628a86590dcfa7e46ba728e9c04", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": 25.0, "max_issues_repo_issues_event_min_datetime": "2015-01-22T18:35:50.000Z", "max_issues_repo_issues_event_max_datetime": "2016-02-23T16:42:56.000Z", "max_forks_repo_path": "remus/worker/detail/testing/UnitTestMessageRouterWorkerTermination.cxx", "max_forks_repo_name": "Kitware/Remus", "max_forks_repo_head_hexsha": "090a14c9a4b0e628a86590dcfa7e46ba728e9c04", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 9.0, "max_forks_repo_forks_event_min_datetime": "2015-03-23T20:41:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T09:09:05.000Z", "avg_line_length": 38.3796296296, "max_line_length": 86, "alphanum_fraction": 0.6337756333, "num_tokens": 951}
|
[STATEMENT]
theorem min_of_list3_correct: "(min_of_list3,min_of_list) \<in> (array_assn nat_assn)\<^sup>k \<rightarrow>\<^sub>a nat_assn"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (min_of_list3, min_of_list) \<in> (array_assn nat_assn)\<^sup>k \<rightarrow>\<^sub>a nat_assn
[PROOF STEP]
using min_of_list3.refine[FCOMP min_of_list2_refine, FCOMP min_of_list1_refine]
[PROOF STATE]
proof (prove)
using this:
(min_of_list3, min_of_list) \<in> (array_assn nat_assn)\<^sup>k \<rightarrow>\<^sub>a nat_assn
goal (1 subgoal):
1. (min_of_list3, min_of_list) \<in> (array_assn nat_assn)\<^sup>k \<rightarrow>\<^sub>a nat_assn
[PROOF STEP]
.
|
{"llama_tokens": 292, "file": "Refine_Imperative_HOL_Userguides_Sepref_Guide_Quickstart", "length": 2}
|
Name: Jesse Unger.
Office: FC
Personality:
Activities:
going to college... pretty much a full time job
work in Wickson Hall for Dr. Hildegarde Heymann
founder and prez of SBA AT UCD http://www.sbaatucd.com
working for qualcomm over the summer
going abroad
20051225 19:23:33 nbsp hey i called your number and it didnt work Users/DomenicSantangelo
20051226 00:11:24 nbsp Jesses is the best freshman I know...he helped me out in school! Users/LizethCazares
20060216 22:36:10 nbsp Hey! Users/JulieWeiss
20090523 23:58:04 nbsp Hee! Its one of my favorites! Users/CurlyGirl26
|
{"hexsha": "a8dc5785c46c690cb8937dbaf976315e67a3828f", "size": 587, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/JesseUnger.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/JesseUnger.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/JesseUnger.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5217391304, "max_line_length": 107, "alphanum_fraction": 0.7717206133, "num_tokens": 185}
|
import os
import scipy.io as io
import numpy as np
import torch
from .. import LIB_DATA_PATH
from .spatial import SpatialModel
from .spatial_OLD.spatial_model import SpatialModel as SpatialModelOriginal
from .spatial_OLD.spatial_hist import SpatialHist
from ..util.general import aeq
class Library(object):
"""
LIBRARY: hyper-parameters for the BPL model
Parameters
----------
lib_dir : string
path to the library files
use_hist : bool
if true, the original BPL spatial histogram model will
be used. The default (False) uses a new, differentiable
spatial distribution
"""
def __init__(self, lib_dir=None, use_hist=False):
if lib_dir is None:
lib_dir = LIB_DATA_PATH
# get contents of dir
contents = os.listdir(lib_dir)
# save lists of structs and single elements
structs = ['shape', 'scale', 'rel', 'tokenvar', 'affine', 'stat']
singles = [
'logT', 'logStart', 'pkappa', 'pmat_nsub', 'newscale',
'smooth_bigrams', 'diagSigma'
]
# load structs
for elt in structs:
assert elt in contents
value = get_dict(os.path.join(lib_dir, elt))
if elt == 'shape':
value = fix_shape_params(value)
setattr(self, elt, value)
# load individual properties
for elt in singles:
assert elt+'.mat' in contents
value = get_data(elt+'.mat', lib_dir)
setattr(self, elt, value)
# change type of 'diagSigma' to torch.uint8 since this is a boolean
self.diagSigma = self.diagSigma.byte()
# Finally, load SpatialModel
if use_hist:
# use original BPL spatial histograms
spatial_path = os.path.join(lib_dir, 'Spatial')
hists = sorted(os.listdir(spatial_path))
list_SH = []
for hist in hists:
SH = load_hist(os.path.join(spatial_path, hist))
list_SH.append(SH)
SM = SpatialModelOriginal()
SM.set_properties(list_SH)
self.Spatial = SM
else:
# use new spatial model that is differentiable
clump_ID = 2
xlim = torch.tensor([0, 105], dtype=torch.float)
ylim = torch.tensor([-105, 0], dtype=torch.float)
spatial_model = SpatialModel(xlim, ylim, clump_ID)
spatial_model.initialize_unif()
self.Spatial = spatial_model
# Check consistency of the library
self.check_consistent()
@property
def ncpt(self):
"""
Get the number of control points
Returns
-------
ncpt : int
the number of control points
"""
dim = self.shape['mu'].shape[1]
assert dim % 2 == 0 # dimension must be even
ncpt = int(dim/2)
return ncpt
@property
def N(self):
"""
Get the number of primitives
Returns
-------
N: int
the number of primitives
"""
N = self.shape['mu'].shape[0]
return N
def check_consistent(self):
"""
Check consistency of the number of primitives in the model
"""
N = self.N
ncpt = self.ncpt
assert len(self.shape['mu'].shape) == 2
assert len(self.shape['Sigma'].shape) == 3
assert self.shape['mu'].shape[1] == ncpt*2
assert self.shape['Sigma'].shape[0] == N
assert self.shape['Sigma'].shape[1] == ncpt*2
assert self.shape['Sigma'].shape[2] == ncpt*2
assert self.logT.shape[0] == N
assert self.logStart.shape[0] == N
assert self.shape['mixprob'].shape[0] == N
assert self.shape['freq'].shape[0] == N
assert self.shape['vsd'].shape[0] == N
assert self.scale['theta'].shape[0] == N
assert aeq(torch.sum(torch.exp(self.logStart)), torch.tensor(1.))
for sid in range(N):
pT = self.pT(torch.tensor(sid))
assert aeq(torch.sum(pT), torch.tensor(1.))
def pT(self, prev_state):
"""
Get the probability of transitioning to a new state, given your current
state is "prev_state"
Parameters
----------
prev_state : tensor
current state of the model
Returns
-------
p : tensor
probability vector; probabilities of transitioning to
each potential new state
"""
assert prev_state.shape == torch.Size([])
logR = self.logT[prev_state]
R = torch.exp(logR)
p = R / torch.sum(R)
return p
@property
def isunif(self):
return torch.isnan(self.shape['mu']).any()
def get_dict(path):
"""
load folder of arrays as dictionary of tensors
"""
field = {}
contents = os.listdir(path)
for item in contents:
key = item.split('.')[0]
field[key] = get_data(item, path)
return field
def get_data(item, path):
"""
load single array as a tensor
"""
item_path = os.path.join(path, item)
data = io.loadmat(item_path)['value']
data = data.astype(np.float32) # convert to float32
out = torch.squeeze(torch.tensor(data, dtype=torch.float))
return out
def load_hist(path):
"""
load spatial histogram
"""
# load all hist properties
logpYX = io.loadmat(os.path.join(path, 'logpYX'))['value']
xlab = io.loadmat(os.path.join(path, 'xlab'))['value']
ylab = io.loadmat(os.path.join(path, 'ylab'))['value']
rg_bin = io.loadmat(os.path.join(path, 'rg_bin'))['value']
prior_count = io.loadmat(os.path.join(path, 'prior_count'))['value']
# fix some of the properties, convert to torch tensors
logpYX = torch.tensor(logpYX, dtype=torch.float)
xlab = torch.tensor(xlab[0], dtype=torch.float)
ylab = torch.tensor(ylab[0], dtype=torch.float)
rg_bin = torch.tensor(rg_bin[0], dtype=torch.float)
prior_count = prior_count.item()
# build the SpatialHist instance
H = SpatialHist()
H.set_properties(logpYX, xlab, ylab, rg_bin, prior_count)
return H
def fix_shape_params(shape):
"""
fix organization of shapes 'mu' and 'Sigma' arrays to account for
differences in the 'reshape' operation between MATLAB and numpy/pytorch
"""
shapes_mu = shape['mu']
shapes_Cov = shape['Sigma']
n, m = shapes_mu.shape
assert m % 2 == 0
ncpt = m // 2
# fix shapes mean
shapes_mu = shapes_mu.view(n, 2, ncpt) # (n, 2, ncpt)
shapes_mu = shapes_mu.permute(0, 2, 1) # (n, ncpt, 2)
shapes_mu = shapes_mu.contiguous()
shapes_mu = shapes_mu.view(n, ncpt * 2) # (n, ncpt*2)
shapes_mu = shapes_mu.contiguous()
# fix shapes covariance
shapes_Cov = shapes_Cov.permute(2, 0, 1) # (n, 2*ncpt, 2*ncpt)
shapes_Cov = shapes_Cov.view(n, 2, ncpt, 2, ncpt) # (n, 2, ncpt, 2, ncpt)
shapes_Cov = shapes_Cov.permute(0, 2, 1, 4, 3) # (n, ncpt, 2, ncpt, 2)
shapes_Cov = shapes_Cov.contiguous()
shapes_Cov = shapes_Cov.view(n, ncpt * 2, ncpt * 2) # (n, ncpt*2, ncpt*2)
shapes_Cov = shapes_Cov.contiguous()
# re-assign
shape['mu'] = shapes_mu
shape['Sigma'] = shapes_Cov
return shape
|
{"hexsha": "18aa18bb038b0bd03fa0b0fa9ef2aac028fa706b", "size": 7306, "ext": "py", "lang": "Python", "max_stars_repo_path": "pybpl/library/library.py", "max_stars_repo_name": "lucast4/pyBPL", "max_stars_repo_head_hexsha": "fc5d9a87266df4a7b0014d09feec713a7052bc39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2020-05-08T17:31:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T10:05:32.000Z", "max_issues_repo_path": "pybpl/library/library.py", "max_issues_repo_name": "lucast4/pyBPL", "max_issues_repo_head_hexsha": "fc5d9a87266df4a7b0014d09feec713a7052bc39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-05-22T03:39:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-05T23:31:20.000Z", "max_forks_repo_path": "pybpl/library/library.py", "max_forks_repo_name": "lucast4/pyBPL", "max_forks_repo_head_hexsha": "fc5d9a87266df4a7b0014d09feec713a7052bc39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-05-11T12:38:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-09T13:08:53.000Z", "avg_line_length": 31.7652173913, "max_line_length": 79, "alphanum_fraction": 0.5856830003, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1897}
|
/-
Copyright (c) 2018 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Simon Hudon
-/
import tactic.hint
namespace tactic
open expr
open tactic.interactive ( casesm constructor_matching )
/--
find all assumptions of the shape `¬ (p ∧ q)` or `¬ (p ∨ q)` and
replace them using de Morgan's law.
-/
meta def distrib_not : tactic unit :=
do hs ← local_context,
hs.for_each $ λ h,
all_goals' $
iterate_at_most' 3 $
do h ← get_local h.local_pp_name,
e ← infer_type h,
match e with
| `(¬ _ = _) := replace h.local_pp_name ``(mt iff.to_eq %%h)
| `(_ ≠ _) := replace h.local_pp_name ``(mt iff.to_eq %%h)
| `(_ = _) := replace h.local_pp_name ``(eq.to_iff %%h)
| `(¬ (_ ∧ _)) := replace h.local_pp_name ``(decidable.not_and_distrib'.mp %%h) <|>
replace h.local_pp_name ``(decidable.not_and_distrib.mp %%h)
| `(¬ (_ ∨ _)) := replace h.local_pp_name ``(not_or_distrib.mp %%h)
| `(¬ ¬ _) := replace h.local_pp_name ``(decidable.of_not_not %%h)
| `(¬ (_ → (_ : Prop))) := replace h.local_pp_name ``(decidable.not_imp.mp %%h)
| `(¬ (_ ↔ _)) := replace h.local_pp_name ``(decidable.not_iff.mp %%h)
| `(_ ↔ _) := replace h.local_pp_name ``(decidable.iff_iff_and_or_not_and_not.mp %%h) <|>
replace h.local_pp_name ``(decidable.iff_iff_and_or_not_and_not.mp (%%h).symm) <|>
() <$ tactic.cases h
| `(_ → _) := replace h.local_pp_name ``(decidable.not_or_of_imp %%h)
| _ := failed
end
/-!
The following definitions maintain a path compression datastructure, i.e. a forest such that:
- every node is the type of a hypothesis
- there is a edge between two nodes only if they are provably equivalent
- every edge is labelled with a proof of equivalence for its vertices
- edges are added when normalizing propositions.
-/
meta def tauto_state := ref $ expr_map (option (expr × expr))
meta def modify_ref {α : Type} (r : ref α) (f : α → α) :=
read_ref r >>= write_ref r ∘ f
meta def add_refl (r : tauto_state) (e : expr) : tactic (expr × expr) :=
do m ← read_ref r,
p ← mk_mapp `rfl [none,e],
write_ref r $ m.insert e none,
return (e,p)
/--
If there exists a symmetry lemma that can be applied to the hypothesis `e`,
store it.
-/
meta def add_symm_proof (r : tauto_state) (e : expr) : tactic (expr × expr) :=
do env ← get_env,
let rel := e.get_app_fn.const_name,
some symm ← pure $ environment.symm_for env rel
| add_refl r e,
(do e' ← mk_meta_var `(Prop),
iff_t ← to_expr ``(%%e = %%e'),
(_,p) ← solve_aux iff_t
(applyc `iff.to_eq ; () <$ split ; applyc symm),
e' ← instantiate_mvars e',
m ← read_ref r,
write_ref r $ (m.insert e (e',p)).insert e' none,
return (e',p) )
<|> add_refl r e
meta def add_edge (r : tauto_state) (x y p : expr) : tactic unit :=
modify_ref r $ λ m, m.insert x (y,p)
/--
Retrieve the root of the hypothesis `e` from the proof forest.
If `e` has not been internalized, add it to the proof forest.
-/
meta def root (r : tauto_state) : expr → tactic (expr × expr) | e :=
do m ← read_ref r,
let record_e : tactic (expr × expr) :=
match e with
| v@(expr.mvar _ _ _) :=
(do (e,p) ← get_assignment v >>= root,
add_edge r v e p,
return (e,p)) <|>
add_refl r e
| _ := add_refl r e
end,
some e' ← pure $ m.find e | record_e,
match e' with
| (some (e',p')) :=
do (e'',p'') ← root e',
p'' ← mk_app `eq.trans [p',p''],
add_edge r e e'' p'',
pure (e'',p'')
| none := prod.mk e <$> mk_mapp `rfl [none,some e]
end
/--
Given hypotheses `a` and `b`, build a proof that `a` is equivalent to `b`,
applying congruence and recursing into arguments if `a` and `b`
are applications of function symbols.
-/
meta def symm_eq (r : tauto_state) : expr → expr → tactic expr | a b :=
do m ← read_ref r,
(a',pa) ← root r a,
(b',pb) ← root r b,
(unify a' b' >> add_refl r a' *> mk_mapp `rfl [none,a]) <|>
do p ← match (a', b') with
| (`(¬ %%a₀), `(¬ %%b₀)) :=
do p ← symm_eq a₀ b₀,
p' ← mk_app `congr_arg [`(not),p],
add_edge r a' b' p',
return p'
| (`(%%a₀ ∧ %%a₁), `(%%b₀ ∧ %%b₁)) :=
do p₀ ← symm_eq a₀ b₀,
p₁ ← symm_eq a₁ b₁,
p' ← to_expr ``(congr (congr_arg and %%p₀) %%p₁),
add_edge r a' b' p',
return p'
| (`(%%a₀ ∨ %%a₁), `(%%b₀ ∨ %%b₁)) :=
do p₀ ← symm_eq a₀ b₀,
p₁ ← symm_eq a₁ b₁,
p' ← to_expr ``(congr (congr_arg or %%p₀) %%p₁),
add_edge r a' b' p',
return p'
| (`(%%a₀ ↔ %%a₁), `(%%b₀ ↔ %%b₁)) :=
(do p₀ ← symm_eq a₀ b₀,
p₁ ← symm_eq a₁ b₁,
p' ← to_expr ``(congr (congr_arg iff %%p₀) %%p₁),
add_edge r a' b' p',
return p') <|>
do p₀ ← symm_eq a₀ b₁,
p₁ ← symm_eq a₁ b₀,
p' ← to_expr ``(eq.trans (congr (congr_arg iff %%p₀) %%p₁)
(iff.to_eq iff.comm ) ),
add_edge r a' b' p',
return p'
| (`(%%a₀ → %%a₁), `(%%b₀ → %%b₁)) :=
if ¬ a₁.has_var ∧ ¬ b₁.has_var then
do p₀ ← symm_eq a₀ b₀,
p₁ ← symm_eq a₁ b₁,
p' ← mk_app `congr_arg [`(implies),p₀,p₁],
add_edge r a' b' p',
return p'
else unify a' b' >> add_refl r a' *> mk_mapp `rfl [none,a]
| (_, _) :=
(do guard $ a'.get_app_fn.is_constant ∧
a'.get_app_fn.const_name = b'.get_app_fn.const_name,
(a'',pa') ← add_symm_proof r a',
guard $ a'' =ₐ b',
pure pa' )
end,
p' ← mk_eq_trans pa p,
add_edge r a' b' p',
mk_eq_symm pb >>= mk_eq_trans p'
meta def find_eq_type (r : tauto_state) : expr → list expr → tactic (expr × expr)
| e [] := failed
| e (H :: Hs) :=
do t ← infer_type H,
(prod.mk H <$> symm_eq r e t) <|> find_eq_type e Hs
private meta def contra_p_not_p (r : tauto_state) : list expr → list expr → tactic unit
| [] Hs := failed
| (H1 :: Rs) Hs :=
do t ← (extract_opt_auto_param <$> infer_type H1) >>= whnf,
(do a ← match_not t,
(H2,p) ← find_eq_type r a Hs,
H2 ← to_expr ``( (%%p).mpr %%H2 ),
tgt ← target,
pr ← mk_app `absurd [tgt, H2, H1],
tactic.exact pr)
<|> contra_p_not_p Rs Hs
meta def contradiction_with (r : tauto_state) : tactic unit :=
contradiction <|>
do tactic.try intro1,
ctx ← local_context,
contra_p_not_p r ctx ctx
meta def contradiction_symm :=
using_new_ref (native.rb_map.mk _ _) contradiction_with
meta def assumption_with (r : tauto_state) : tactic unit :=
do { ctx ← local_context,
t ← target,
(H,p) ← find_eq_type r t ctx,
mk_eq_mpr p H >>= tactic.exact }
<|> fail "assumption tactic failed"
meta def assumption_symm :=
using_new_ref (native.rb_map.mk _ _) assumption_with
/--
Configuration options for `tauto`.
If `classical` is `tt`, runs `classical` before the rest of `tauto`.
`closer` is run on any remaining subgoals left by `tauto_core; basic_tauto_tacs`.
-/
meta structure tauto_cfg :=
(classical : bool := ff)
(closer : tactic unit := pure ())
meta def tautology (cfg : tauto_cfg := {}) : tactic unit := focus1 $
let basic_tauto_tacs : list (tactic unit) :=
[reflexivity, solve_by_elim,
constructor_matching none [``(_ ∧ _),``(_ ↔ _),``(Exists _),``(true)]],
tauto_core (r : tauto_state) : tactic unit :=
do try (contradiction_with r);
try (assumption_with r);
repeat (do
gs ← get_goals,
repeat (() <$ tactic.intro1);
distrib_not;
casesm (some ()) [``(_ ∧ _),``(_ ∨ _),``(Exists _),``(false)];
try (contradiction_with r);
try (target >>= match_or >> refine ``( or_iff_not_imp_left.mpr _));
try (target >>= match_or >> refine ``( or_iff_not_imp_right.mpr _));
repeat (() <$ tactic.intro1);
constructor_matching (some ()) [``(_ ∧ _),``(_ ↔ _),``(true)];
try (assumption_with r),
gs' ← get_goals,
guard (gs ≠ gs') ) in
do when cfg.classical classical,
using_new_ref (expr_map.mk _) tauto_core;
repeat (first basic_tauto_tacs); cfg.closer, done
namespace interactive
local postfix `?`:9001 := optional
setup_tactic_parser
/--
`tautology` breaks down assumptions of the form `_ ∧ _`, `_ ∨ _`, `_ ↔ _` and `∃ _, _`
and splits a goal of the form `_ ∧ _`, `_ ↔ _` or `∃ _, _` until it can be discharged
using `reflexivity` or `solve_by_elim`.
This is a finishing tactic: it either closes the goal or raises an error.
The variant `tautology!` uses the law of excluded middle.
`tautology {closer := tac}` will use `tac` on any subgoals created by `tautology`
that it is unable to solve before failing.
-/
meta def tautology (c : parse $ (tk "!")?) (cfg : tactic.tauto_cfg := {}) :=
tactic.tautology $ { classical := c.is_some, ..cfg }
-- Now define a shorter name for the tactic `tautology`.
/--
`tauto` breaks down assumptions of the form `_ ∧ _`, `_ ∨ _`, `_ ↔ _` and `∃ _, _`
and splits a goal of the form `_ ∧ _`, `_ ↔ _` or `∃ _, _` until it can be discharged
using `reflexivity` or `solve_by_elim`.
This is a finishing tactic: it either closes the goal or raises an error.
The variant `tauto!` uses the law of excluded middle.
`tauto {closer := tac}` will use `tac` on any subgoals created by `tauto`
that it is unable to solve before failing.
-/
meta def tauto (c : parse $ (tk "!")?) (cfg : tactic.tauto_cfg := {}) : tactic unit :=
tautology c cfg
add_hint_tactic "tauto"
/--
This tactic (with shorthand `tauto`) breaks down assumptions of the form
`_ ∧ _`, `_ ∨ _`, `_ ↔ _` and `∃ _, _`
and splits a goal of the form `_ ∧ _`, `_ ↔ _` or `∃ _, _` until it can be discharged
using `reflexivity` or `solve_by_elim`. This is a finishing tactic: it
either closes the goal or raises an error.
The variants `tautology!` and `tauto!` use the law of excluded middle.
For instance, one can write:
```lean
example (p q r : Prop) [decidable p] [decidable r] : p ∨ (q ∧ r) ↔ (p ∨ q) ∧ (r ∨ p ∨ r) := by tauto
```
and the decidability assumptions can be dropped if `tauto!` is used
instead of `tauto`.
`tauto {closer := tac}` will use `tac` on any subgoals created by `tauto`
that it is unable to solve before failing.
-/
add_tactic_doc
{ name := "tautology",
category := doc_category.tactic,
decl_names := [`tactic.interactive.tautology, `tactic.interactive.tauto],
tags := ["logic", "decision procedure"] }
end interactive
end tactic
|
{"author": "JLimperg", "repo": "aesop3", "sha": "a4a116f650cc7403428e72bd2e2c4cda300fe03f", "save_path": "github-repos/lean/JLimperg-aesop3", "path": "github-repos/lean/JLimperg-aesop3/aesop3-a4a116f650cc7403428e72bd2e2c4cda300fe03f/src/tactic/tauto.lean"}
|
program set_threads
! Load the OpenMP functions library
use omp_lib
! Set variables
implicit none
integer :: tnum
! Create a parallel block of four threads (including master thread)
!$OMP PARALLEL PRIVATE(tnum) NUM_THREADS(4)
tnum = OMP_GET_THREAD_NUM()
print *, "I am thread number", tnum
!$OMP END PARALLEL
! Create a parallel block, without specifying the number of threads
!$OMP PARALLEL PRIVATE(tnum)
tnum = OMP_GET_THREAD_NUM()
print *, "Second block, I am thread number", tnum
!$OMP END PARALLEL
end program set_threads
|
{"hexsha": "561d9b4744e8dad78f94450968f59cdf016ae88b", "size": 608, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "files/arc_openmp/fortran/setthreads.f90", "max_stars_repo_name": "ARCCA/Introduction-to-Parallel-Programming-using-OpenMP", "max_stars_repo_head_hexsha": "830c240a041c32928b6c1fb0f76693268114cd2e", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "files/arc_openmp/fortran/setthreads.f90", "max_issues_repo_name": "ARCCA/Introduction-to-Parallel-Programming-using-OpenMP", "max_issues_repo_head_hexsha": "830c240a041c32928b6c1fb0f76693268114cd2e", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "files/arc_openmp/fortran/setthreads.f90", "max_forks_repo_name": "ARCCA/Introduction-to-Parallel-Programming-using-OpenMP", "max_forks_repo_head_hexsha": "830c240a041c32928b6c1fb0f76693268114cd2e", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4347826087, "max_line_length": 71, "alphanum_fraction": 0.6726973684, "num_tokens": 150}
|
-- Andreas, 2012-01-30, bug reported by Nisse
-- {-# OPTIONS -v tc.term.absurd:50 -v tc.signature:30 -v tc.conv.atom:30 -v tc.conv.elim:50 #-}
module Issue557 where
data ⊥ : Set where
postulate
A : Set
a : (⊥ → ⊥) → A
F : A → Set
f : (a : A) → F a
module M (I : Set → Set) where
x : A
x = a (λ ())
y : A
y = M.x (λ A → A)
z : F y
z = f y
-- cause was absurd lambda in a module, i.e., under a telescope (I : Set -> Set)
-- (λ ()) must be replaced by (absurd I) not just by (absurd)
|
{"hexsha": "4e0ca7e6bd1ca737b7b4b7252e4f18163452cd0f", "size": 500, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/Succeed/Issue557.agda", "max_stars_repo_name": "cruhland/agda", "max_stars_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1989, "max_stars_repo_stars_event_min_datetime": "2015-01-09T23:51:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:20:48.000Z", "max_issues_repo_path": "test/Succeed/Issue557.agda", "max_issues_repo_name": "cruhland/agda", "max_issues_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4066, "max_issues_repo_issues_event_min_datetime": "2015-01-10T11:24:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:14:49.000Z", "max_forks_repo_path": "test/Succeed/Issue557.agda", "max_forks_repo_name": "cruhland/agda", "max_forks_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 371, "max_forks_repo_forks_event_min_datetime": "2015-01-03T14:04:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T19:00:30.000Z", "avg_line_length": 19.2307692308, "max_line_length": 96, "alphanum_fraction": 0.562, "num_tokens": 194}
|
! This file is part of HolmMHD
! Copyright (C) 2019 Daniel Verscharen (d.verscharen@ucl.ac.uk)
!All rights reserved.
!
!Redistribution and use in source and binary forms, with or without
!modification, are permitted provided that the following conditions are met:
!
!1. Redistributions of source code must retain the above copyright notice, this
! list of conditions and the following disclaimer.
!2. Redistributions in binary form must reproduce the above copyright notice,
! this list of conditions and the following disclaimer in the documentation
! and/or other materials provided with the distribution.
!
!THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
!ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
!WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
!DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
!ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
!(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
!LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
!ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
!(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
!SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
!
!The views and conclusions contained in the software and documentation are those
!of the authors and should not be interpreted as representing official policies,
!either expressed or implied, of the HolmMHD project.
subroutine set_parameters()
use globals
implicit none
! Root blocks ( RBx * RBy * RBz has to be equal to number of MPI processes):
RBx=2
RBy=2
RBz=1
! Number of grid points per root block:
Nx = 150
Ny = 150
Nz = 2
! Total number of time steps for the simulation:
Nt = 7000
! Upper bound for time step (will be adjusted according to CLF criterium):
dt = 0.01d0
! Step size in spatial coordinates (can be changed in initialise.f90):
dx = 1.d0/(1.d0*(RBx*Nx))
dy = 1.d0/(1.d0*(RBy*Ny))
dz = 1.d0/(1.d0*(RBz*Nz))
! Adiabatic index:
gamma = 5.d0/3.d0
! Output occurs every outsteps steps:
outsteps=10
end subroutine
|
{"hexsha": "03dcbee201d118583b2d5db0ce8c7a114f108cdf", "size": 2222, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/parameters.f90", "max_stars_repo_name": "danielver02/HolmMHD", "max_stars_repo_head_hexsha": "e32a42e77377bbc6632cb0e1aa975c45a83d1a66", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-07-16T01:44:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-27T01:55:53.000Z", "max_issues_repo_path": "src/parameters.f90", "max_issues_repo_name": "danielver02/HolmMHD", "max_issues_repo_head_hexsha": "e32a42e77377bbc6632cb0e1aa975c45a83d1a66", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/parameters.f90", "max_forks_repo_name": "danielver02/HolmMHD", "max_forks_repo_head_hexsha": "e32a42e77377bbc6632cb0e1aa975c45a83d1a66", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.71875, "max_line_length": 80, "alphanum_fraction": 0.7704770477, "num_tokens": 554}
|
// Copyright 2005 Alexander Nasonov.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef FILE_boost_type_traits_integral_promotion_hpp_INCLUDED
#define FILE_boost_type_traits_integral_promotion_hpp_INCLUDED
#include <boost/config.hpp>
#include <boost/type_traits/integral_constant.hpp>
#include <boost/type_traits/is_const.hpp>
#include <boost/type_traits/is_enum.hpp>
#include <boost/type_traits/is_volatile.hpp>
#include <boost/type_traits/remove_cv.hpp>
namespace boost
{
namespace type_traits
{
namespace detail
{
// 4.5/2
template <class T>
struct need_promotion : public boost::is_enum<T>
{
};
// 4.5/1
template <>
struct need_promotion<char> : public true_type
{
};
template <>
struct need_promotion<signed char> : public true_type
{
};
template <>
struct need_promotion<unsigned char> : public true_type
{
};
template <>
struct need_promotion<signed short int> : public true_type
{
};
template <>
struct need_promotion<unsigned short int> : public true_type
{
};
// Specializations for non-standard types.
// Type is promoted if it's smaller then int.
#define BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(T) \
template <> \
struct need_promotion<T> \
: public integral_constant<bool, (sizeof(T) < sizeof(int))> \
{ \
};
// Same set of integral types as in boost/type_traits/is_integral.hpp.
// Please, keep in sync.
#if (defined(BOOST_INTEL_CXX_VERSION) && defined(_MSC_VER) && \
(BOOST_INTEL_CXX_VERSION <= 600)) || \
(defined(__BORLANDC__) && (__BORLANDC__ == 0x600) && (_MSC_VER < 1300))
// TODO: common macro for this #if. Or better yet, PP SEQ of non-standard types.
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(__int8)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(unsigned __int8)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(__int16)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(unsigned __int16)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(__int32)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(unsigned __int32)
#ifdef __BORLANDC__
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(unsigned __int64)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(__int64)
#endif
#endif
#if defined(BOOST_HAS_LONG_LONG)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(boost::ulong_long_type)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(boost::long_long_type)
#elif defined(BOOST_HAS_MS_INT64)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(unsigned __int64)
BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE(__int64)
#endif
#undef BOOST_TT_AUX_PROMOTE_NONSTANDARD_TYPE
#ifndef BOOST_NO_INTRINSIC_WCHAR_T
// 4.5/2
template <>
struct need_promotion<wchar_t> : public true_type
{
};
#endif
// 4.5/3 (integral bit-field) is not supported.
// 4.5/4
template <>
struct need_promotion<bool> : public true_type
{
};
// Get promoted type by index and cv qualifiers.
template <int Index, int IsConst, int IsVolatile>
struct promote_from_index;
#define BOOST_TT_AUX_PROMOTE_FROM_INDEX(N, T) \
template <> \
struct promote_from_index<N, 0, 0> \
{ \
typedef T type; \
}; \
template <> \
struct promote_from_index<N, 0, 1> \
{ \
typedef T volatile type; \
}; \
template <> \
struct promote_from_index<N, 1, 0> \
{ \
typedef T const type; \
}; \
template <> \
struct promote_from_index<N, 1, 1> \
{ \
typedef T const volatile type; \
};
BOOST_TT_AUX_PROMOTE_FROM_INDEX(1, int)
BOOST_TT_AUX_PROMOTE_FROM_INDEX(2, unsigned int)
BOOST_TT_AUX_PROMOTE_FROM_INDEX(3, long)
BOOST_TT_AUX_PROMOTE_FROM_INDEX(4, unsigned long)
// WARNING: integral promotions to non-standard types
// long long and __int64 are not defined by the standard.
// Additional specialisations and overloads shouldn't
// introduce ambiguity, though.
#if defined(BOOST_HAS_LONG_LONG)
BOOST_TT_AUX_PROMOTE_FROM_INDEX(5, boost::long_long_type)
BOOST_TT_AUX_PROMOTE_FROM_INDEX(6, boost::ulong_long_type)
#elif defined(BOOST_HAS_MS_INT64)
BOOST_TT_AUX_PROMOTE_FROM_INDEX(7, __int64)
BOOST_TT_AUX_PROMOTE_FROM_INDEX(8, unsigned __int64)
#endif
#undef BOOST_TT_AUX_PROMOTE_FROM_INDEX
// Define BOOST_TT_AUX_PROMOTED_INDEX_TESTER:
#if !defined(BOOST_MSVC)
template <int N>
struct sized_type_for_promotion
{
typedef char (&type)[N];
};
#define BOOST_TT_AUX_PROMOTED_INDEX_TESTER(I, T) \
sized_type_for_promotion<I>::type promoted_index_tester(T);
#else
#define BOOST_TT_AUX_PROMOTED_INDEX_TESTER(I, T) \
char(&promoted_index_tester(T))[I];
#endif
BOOST_TT_AUX_PROMOTED_INDEX_TESTER(1, int)
BOOST_TT_AUX_PROMOTED_INDEX_TESTER(2, unsigned int)
BOOST_TT_AUX_PROMOTED_INDEX_TESTER(3, long)
BOOST_TT_AUX_PROMOTED_INDEX_TESTER(4, unsigned long)
#if defined(BOOST_HAS_LONG_LONG)
BOOST_TT_AUX_PROMOTED_INDEX_TESTER(5, boost::long_long_type)
BOOST_TT_AUX_PROMOTED_INDEX_TESTER(6, boost::ulong_long_type)
#elif defined(BOOST_HAS_MS_INT64)
BOOST_TT_AUX_PROMOTED_INDEX_TESTER(7, __int64)
BOOST_TT_AUX_PROMOTED_INDEX_TESTER(8, unsigned __int64)
#endif
#undef BOOST_TT_AUX_PROMOTED_INDEX_TESTER
// Get an index of promoted type for type T.
// Precondition: need_promotion<T>
template <class T>
struct promoted_index
{
static T testee; // undefined
BOOST_STATIC_CONSTANT(int, value = sizeof(promoted_index_tester(+testee)));
// Unary plus promotes testee LOOK HERE ---> ^
};
template <class T>
struct integral_promotion_impl
{
typedef BOOST_DEDUCED_TYPENAME promote_from_index<
(boost::type_traits::detail::promoted_index<T>::value),
(boost::is_const<T>::value), (boost::is_volatile<T>::value)>::type type;
};
template <class T, bool b>
struct integral_promotion
{
typedef T type;
};
template <class T>
struct integral_promotion<T, true> : public integral_promotion_impl<T>
{
};
} // namespace detail
} // namespace type_traits
template <class T>
struct integral_promotion
{
private:
typedef boost::type_traits::detail::need_promotion<
typename remove_cv<T>::type>
tag_type;
public:
typedef typename boost::type_traits::detail::integral_promotion<
T, tag_type::value>::type type;
};
#if !defined(BOOST_NO_CXX11_TEMPLATE_ALIASES)
template <class T>
using integral_promotion_t = typename integral_promotion<T>::type;
#endif
} // namespace boost
#endif // #ifndef FILE_boost_type_traits_integral_promotion_hpp_INCLUDED
|
{"hexsha": "92d2a6745017f91d14b4bee89396e1faa4d05053", "size": 7804, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/boost/1.69.0-r0/boost_1_69_0/boost/type_traits/integral_promotion.hpp", "max_stars_repo_name": "sotaoverride/backup", "max_stars_repo_head_hexsha": "ca53a10b72295387ef4948a9289cb78ab70bc449", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/boost/1.69.0-r0/boost_1_69_0/boost/type_traits/integral_promotion.hpp", "max_issues_repo_name": "sotaoverride/backup", "max_issues_repo_head_hexsha": "ca53a10b72295387ef4948a9289cb78ab70bc449", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/boost/1.69.0-r0/boost_1_69_0/boost/type_traits/integral_promotion.hpp", "max_forks_repo_name": "sotaoverride/backup", "max_forks_repo_head_hexsha": "ca53a10b72295387ef4948a9289cb78ab70bc449", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3817427386, "max_line_length": 80, "alphanum_fraction": 0.6200666325, "num_tokens": 1693}
|
!> @brief This function returns the overall index for any step within 2 nested do loops
!>
!> @param[in] i1 the index of the first do loop
!>
!> @param[in] i2 the index of the second do loop
!>
!> @param[in] n2 the end of the second do loop
!>
!> @warning This function assumes that all indexes go from 1 to n (inclusive)
!>
ELEMENTAL FUNCTION func_overall_index_2loops(i1, i2, n2) RESULT(ans)
USE ISO_FORTRAN_ENV
IMPLICIT NONE
! Declare inputs/outputs ...
INTEGER(kind = INT64), INTENT(in) :: i1
INTEGER(kind = INT64), INTENT(in) :: i2
INTEGER(kind = INT64), INTENT(in) :: n2
INTEGER(kind = INT64) :: ans
ans = i2 + n2 * (i1 - 1_INT64)
END FUNCTION
|
{"hexsha": "b9698895e529b59eac695d871a80736ca3fee923", "size": 866, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "mod_safe/func_overall_index/func_overall_index_2loops.f90", "max_stars_repo_name": "Guymer/fortranlib", "max_stars_repo_head_hexsha": "30e27b010cf4bc5acf0f3a63d50f11789640e0e3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-05-28T02:05:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-16T16:50:21.000Z", "max_issues_repo_path": "mod_safe/func_overall_index/func_overall_index_2loops.f90", "max_issues_repo_name": "Guymer/fortranlib", "max_issues_repo_head_hexsha": "30e27b010cf4bc5acf0f3a63d50f11789640e0e3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-06-17T16:49:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T18:47:36.000Z", "max_forks_repo_path": "mod_safe/func_overall_index/func_overall_index_2loops.f90", "max_forks_repo_name": "Guymer/fortranlib", "max_forks_repo_head_hexsha": "30e27b010cf4bc5acf0f3a63d50f11789640e0e3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-11T04:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-11T04:51:33.000Z", "avg_line_length": 34.64, "max_line_length": 87, "alphanum_fraction": 0.5288683603, "num_tokens": 218}
|
import LinearAlgebra.dot
import GeometryBasics.Point
import Random.GLOBAL_RNG
export random_vector_matrix, perlin_noise
"""
random_vector_matrix([rng,] rows, cols)
Produce a matrix with the given number of rows and columns, in which every
entry is a random unit vector. If provided, the given random number generator
is used to determine the vectors.
"""
function random_vector_matrix(rng, rows, cols)
map(frac_rotation.(rand(rng, rows, cols))) do rot
rot * unitvec
end
end
function random_vector_matrix(rows, cols)
random_vector_matrix(GLOBAL_RNG, rows, cols)
end
function smoothstep(v1, v2, w)
w = clamp(w, 0., 1.)
w = 6. * w^5 - 15. * w^4 + 10. * w^3
(1. - w) * v1 + w * v2
end
"""
perlin_noise(control_points, point)
Compute the noise gradient for the given point, given a grid of control points.
"""
function perlin_noise(control_points, point::Point)
rows, cols = size(control_points)
cell = convert.(Int, (floor.(point)))
xi, yi = cell
offset = point - cell
xf, yf = offset
n00 = dot(control_points[xi % rows + 1, yi % cols + 1],
Point(xf, yf))
n01 = dot(control_points[xi % rows + 1, (yi + 1) % cols + 1],
Point(xf, yf - 1))
n10 = dot(control_points[(xi + 1) % rows + 1, yi % cols + 1],
Point(xf - 1, yf))
n11 = dot(control_points[(xi + 1) % rows + 1, (yi + 1) % cols + 1],
Point(xf - 1, yf - 1))
smoothstep(
smoothstep(n00, n01, yf),
smoothstep(n10, n11, yf),
xf
)
end
|
{"hexsha": "8b294a533be8d6da707ec7c15a761d02e223284f", "size": 1507, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/noise.jl", "max_stars_repo_name": "fonsp/PenPlots.jl", "max_stars_repo_head_hexsha": "f330f8014049fad0c915759181798f823dd42757", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2020-12-16T17:25:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T18:27:21.000Z", "max_issues_repo_path": "src/noise.jl", "max_issues_repo_name": "fonsp/PenPlots.jl", "max_issues_repo_head_hexsha": "f330f8014049fad0c915759181798f823dd42757", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-07T07:24:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T16:17:15.000Z", "max_forks_repo_path": "src/noise.jl", "max_forks_repo_name": "fonsp/PenPlots.jl", "max_forks_repo_head_hexsha": "f330f8014049fad0c915759181798f823dd42757", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-08T22:51:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-17T21:55:49.000Z", "avg_line_length": 25.9827586207, "max_line_length": 79, "alphanum_fraction": 0.6390179164, "num_tokens": 473}
|
function codepart(cnt,prevc)
codepart=cnt>1 ? string(cnt) : ""
return codepart * prevc
end
function encode(s)
# prevc=s
prevc = length(s)>0 ? s[1] : s
coded = ""
cnt=0
for c in s
if c == prevc
cnt = cnt + 1
else
# codepart=cnt>1 ? string(cnt) : ""
# print(prevc)
# print(codepart(cnt,prevc))
coded = coded * codepart(cnt,prevc)
# prevc = ""
cnt = 1
end
prevc = c
end
return coded*codepart(cnt,prevc)
end
function decode(s)
concat = "1"
decoded = ""
for c in s
isnumb = isnumeric(c)
if isnumb
# collect new chars as part of number
concat!="1" ? concat = concat * c : concat=c
else
cnt = parse(Int64, concat)
decoded = decoded * repeat(c, cnt)
concat = "1"
end
end
return decoded
end
|
{"hexsha": "6e94bfd99e75414006ca21bb0b23e6b3d633cc59", "size": 1002, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "run-length-encoding/run-length-encoding.jl", "max_stars_repo_name": "stepinski/julia-in", "max_stars_repo_head_hexsha": "08c82e2de236ead3b1eb356d60a04c61e275d4fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-01T11:27:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T11:27:13.000Z", "max_issues_repo_path": "run-length-encoding/run-length-encoding.jl", "max_issues_repo_name": "stepinski/julia-init", "max_issues_repo_head_hexsha": "e70b08c72c4db3677483480e72a9f79827a41fff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run-length-encoding/run-length-encoding.jl", "max_forks_repo_name": "stepinski/julia-init", "max_forks_repo_head_hexsha": "e70b08c72c4db3677483480e72a9f79827a41fff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2692307692, "max_line_length": 56, "alphanum_fraction": 0.4590818363, "num_tokens": 272}
|
#if COMPILATION_INSTRUCTIONS
(echo "#include\""$0"\"" > $0x.cpp) && mpic++ -O3 -std=c++14 -Wall -Wextra -Wfatal-errors -D_TEST_MPI3_SHARED_COMMUNICATOR $0x.cpp -o $0x.x && time mpirun -n 3 $0x.x $@ && rm -f $0x.x $0x.cpp; exit
#endif
#ifndef MPI3_SHARED_COMMUNICATOR_HPP
#define MPI3_SHARED_COMMUNICATOR_HPP
#include "../mpi3/communicator.hpp"
#include "../mpi3/environment.hpp" // processor_name
//#include "/usr/src/kernels/4.18.16-300.fc29.x86_64/include/linux/getcpu.h"
#include<sched.h>
//#include<numa.h> // sudo dnf install numactl-devel
#include <boost/uuid/uuid.hpp>
#include<boost/uuid/uuid_generators.hpp>
namespace boost{
namespace mpi3{
template<class T = void>
struct shared_window;
struct shared_communicator : communicator{
shared_communicator() = default;
shared_communicator(shared_communicator&&) = default;
shared_communicator(shared_communicator const&) = default;
private:
template<class T> static auto data_(T&& t){
using detail::data;
return data(std::forward<T>(t));
}
explicit shared_communicator(communicator&& c) : communicator(std::move(c)){}
explicit shared_communicator(communicator const& comm, int key = 0){
auto e = static_cast<enum error>(MPI_Comm_split_type(comm.get(), MPI_COMM_TYPE_SHARED, key, MPI_INFO_NULL, &impl_));
if(e != mpi3::error::success) throw std::system_error{e, "cannot split"};
name(mpi3::processor_name());
}
shared_communicator(communicator const& comm, mpi3::communicator_type t, int key = 0){
auto e = static_cast<enum error>(MPI_Comm_split_type(comm.get(), static_cast<int>(t), key, MPI_INFO_NULL, &impl_));
if(e != mpi3::error::success) throw std::system_error{e, "cannot send"};
boost::uuids::uuid tag = boost::uuids::random_generator{}(); static_assert(sizeof(unsigned int)<=sizeof(boost::uuids::uuid), "!");
auto utag = reinterpret_cast<unsigned int const&>(tag);
this->broadcast_n(&utag, 1, 0);
auto Tag = std::to_string(utag);
std::string const& base = comm.name();
// !!! switch-case don't work here because in some MPI impls there are repeats !!!
if(communicator_type::shared==t){
#if __linux__
set_name(base+":core/pu" + std::to_string(::sched_getcpu())); //same as ::getcpu()
#else
set_name(base+":core/pu" + Tag);
#endif
}
else if(communicator_type::hw_thread==t) set_name(base+":hw_thread"+Tag);
else if(communicator_type::l1_cache ==t) set_name(base+":l1_cache" +Tag);
else if(communicator_type::l2_cache ==t) set_name(base+":l2_cache" +Tag);
else if(communicator_type::l3_cache ==t) set_name(base+":l3_cache" +Tag);
else if(communicator_type::socket ==t) set_name(base+":socket" +Tag);
else if(communicator_type::numa ==t) set_name(base+":numa" +Tag);
else if(communicator_type::board ==t) set_name(base+":board" +Tag);
else if(communicator_type::host ==t) set_name(base+":cu" +Tag);
else if(communicator_type::cu ==t) set_name(base+":cu" +Tag);
else if(communicator_type::cluster ==t) set_name(base+":cluster" +Tag);
}
/*
enum class communicator_type : int{
node = OMPI_COMM_TYPE_NODE,
hw_thread = OMPI_COMM_TYPE_HWTHREAD,
core = OMPI_COMM_TYPE_CORE,
l1_cache = OMPI_COMM_TYPE_L1CACHE,
l2_cache = OMPI_COMM_TYPE_L2CACHE,
l3_cache = OMPI_COMM_TYPE_L3CACHE,
socket = OMPI_COMM_TYPE_SOCKET,
numa = OMPI_COMM_TYPE_NUMA,
board = OMPI_COMM_TYPE_BOARD,
host = OMPI_COMM_TYPE_HOST,
cu = OMPI_COMM_TYPE_CU,
cpu = OMPI_COMM_TYPE_CU,
cluster = OMPI_COMM_TYPE_CLUSTER
};
*/
friend class communicator;
public:
shared_communicator& operator=(shared_communicator const& other) = default;
shared_communicator& operator=(shared_communicator&& other) = default;
inline shared_communicator split(int key) const{return split_shared(key);}
auto split(int color, int key) const{
return shared_communicator{communicator::split(color, key)};
}
template<class T = char>
shared_window<T> make_shared_window(mpi3::size_t size);
template<class T = char>
shared_window<T> make_shared_window();
};
inline shared_communicator communicator::split_shared(int key /*= 0*/) const{
return shared_communicator(*this, key);
}
inline shared_communicator communicator::split_shared(communicator_type t, int key /*= 0*/) const{
return shared_communicator(*this, t, key);
}
}}
#ifdef _TEST_MPI3_SHARED_COMMUNICATOR
#include "../mpi3/main.hpp"
#include "../mpi3/operation.hpp"
#include "../mpi3/shared_window.hpp"
#include<iostream>
namespace mpi3 = boost::mpi3;
using std::cout;
int mpi3::main(int, char*[], mpi3::communicator world){
auto numa = world.split_shared(communicator_type::numa); // fallback to world.split_shared() if OMPI is not available
auto win = numa.make_shared_window<int>(numa.rank()?0:1);
assert(win.base() != nullptr and win.size() == 1);
win.lock_all();
if(numa.rank() == 0){
*win.base() = 42;
win.sync();
}
for(int j=1; j != numa.size(); ++j){
if(numa.rank()==0) numa.send_n((int*)nullptr, 0, j, 666);
else if(numa.rank()==j) numa.receive_n((int*)nullptr, 0, 0, 666);
}
if(numa.rank() != 0) win.sync();
// int l = *win.base();
win.unlock_all();
#if 0
auto win = node.make_shared_window<int>(node.rank()?0:1);
assert(win.base() != nullptr and win.size() == 1);
win.lock_all();
if(node.rank()==0){
*win.base() = 42;
win.sync();
}
for(int j=1; j != node.size(); ++j){
if(node.rank()==0) node.send_n((int*)nullptr, 0, j, 666);
else if(node.rank()==j) node.receive_n((int*)nullptr, 0, 0, 666);
}
if(node.rank() != 0) win.sync();
int l = *win.base();
win.unlock_all();
int minmax[2] = {-l,l};
// node.reduce_in_place_n(&minmax[0], 2, mpi3::max<>{}, 0);
node.all_reduce_n(&minmax[0], 2, mpi3::max<>{});
assert( -minmax[0] == minmax[1] );
#endif
return 0;
}
#endif
#endif
|
{"hexsha": "287c377a7a2b398404977b5db7e54b744febd198", "size": 5728, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "external_codes/mpi_wrapper/mpi3/shared_communicator.hpp", "max_stars_repo_name": "djstaros/qmcpack", "max_stars_repo_head_hexsha": "280f67e638bae280448b47fa618f05b848c530d2", "max_stars_repo_licenses": ["NCSA"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "external_codes/mpi_wrapper/mpi3/shared_communicator.hpp", "max_issues_repo_name": "djstaros/qmcpack", "max_issues_repo_head_hexsha": "280f67e638bae280448b47fa618f05b848c530d2", "max_issues_repo_licenses": ["NCSA"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2019-11-05T16:26:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T18:32:59.000Z", "max_forks_repo_path": "external_codes/mpi_wrapper/mpi3/shared_communicator.hpp", "max_forks_repo_name": "djstaros/qmcpack", "max_forks_repo_head_hexsha": "280f67e638bae280448b47fa618f05b848c530d2", "max_forks_repo_licenses": ["NCSA"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9268292683, "max_line_length": 197, "alphanum_fraction": 0.6998952514, "num_tokens": 1691}
|
"""
Keep different states of pipeline using reposition and position.
If we add preprocessing and postprocessing to pipeline steps, we can play with state and capture specific
inputs and outputs as separate elements of the state. In this example the final state elements are:
- 0: initial dataframe with two columns (subject and body)
- 1: target with infrequent classes filtered out after pruning
- 3: SVD of TF-IDF of email body truncated to 100 most significant components
- 4: subj and body merged together using MergeSubjectBody transformer, the input to SVC classifier
- 5: predictions on the training dataset
- 6: sparse matrix of TF-IDF from subject column
The element with index 2 is not present, because it would enter the SVC classifier as
the parameter sample_weights, which is not desired.
"""
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.calibration import CalibratedClassifierCV
from sklearn.base import BaseEstimator
from src.e2epipeline import E2EPipeline, Step, position, reposition, object_info
from src.custom_transformers import PruneHierarchicalTarget, InfrequentClassFilter
class MergeSubjectBody(BaseEstimator):
def __init__(self):
self.features_ = None
def fit(self, subject, body):
features_subj = [f"subject_{i+1:04d}" for i in range(subject.shape[1])]
features_body = [f"body_{i+1:04d}" for i in range(body.shape[1])]
self.features_ = features_subj + features_body
return self
def transform(self, subject, body):
merged = np.concatenate([subject.todense(), body], axis=1)
assert merged.shape[1] == len(self.features_)
return merged
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(module)s.%(funcName)s#%(lineno)d - %(message)s',
handlers=[
logging.StreamHandler(),
]
)
df = pd.read_csv('data/all_tickets_1000.csv')
pipeline = E2EPipeline([
Step('pruner', PruneHierarchicalTarget(
size_threshold=50
), postprocessing=position([4, 1])),
Step('icf', InfrequentClassFilter(
size_threshold=100
), postprocessing=position([4, 1])),
Step('tfsubj', TfidfVectorizer(
sublinear_tf=True,
min_df=5,
max_features=10,
norm='l2',
ngram_range=(1, 2),
stop_words='english',
), preprocessing=lambda s: {'raw_documents': s[4]['title'], 'y': s.get(1, None)},
postprocessing=position(6)),
Step('tfbody', TfidfVectorizer(
sublinear_tf=True,
min_df=5,
norm='l2',
ngram_range=(1, 2),
stop_words='english',
), preprocessing=lambda s: {'raw_documents': s[4]['body'], 'y': s.get(1, None)},
postprocessing=position(3)),
Step('svd', TruncatedSVD(
n_components=100
), preprocessing=reposition({3: 0}), postprocessing=position(3)),
Step('merge', MergeSubjectBody(),
preprocessing=reposition({6: 0, 3: 1}),
postprocessing=position(4)),
Step('svc', CalibratedClassifierCV(
base_estimator=svm.LinearSVC(),
cv=5
), preprocessing=reposition({4: 0, 1: 1}), postprocessing=position(5)),
])
x = df[['title', 'body']]
y = df['target_path']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
pipeline.fit(x_train, y_train)
logging.info(f"pipeline.state={object_info(pipeline.state)}")
y_pred = pipeline.predict(x_test)
logging.info(f"y_pred={object_info(y_pred)}")
logging.debug(f"features={pipeline.named_steps['merge'].features_}")
logging.debug("completed")
|
{"hexsha": "ca3c6b83c17a96c033200c8b1b619bb03bc322e3", "size": 3753, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/example_reposition.py", "max_stars_repo_name": "ftrojan/e2epipeline", "max_stars_repo_head_hexsha": "e337539010aa3128d021ebcb48a473c712b271b1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-30T15:54:30.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-30T15:54:30.000Z", "max_issues_repo_path": "examples/example_reposition.py", "max_issues_repo_name": "ftrojan/e2epipeline", "max_issues_repo_head_hexsha": "e337539010aa3128d021ebcb48a473c712b271b1", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/example_reposition.py", "max_forks_repo_name": "ftrojan/e2epipeline", "max_forks_repo_head_hexsha": "e337539010aa3128d021ebcb48a473c712b271b1", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.53, "max_line_length": 105, "alphanum_fraction": 0.7029043432, "include": true, "reason": "import numpy", "num_tokens": 935}
|
\documentclass[12pt,a4paper]{article}
\usepackage[]{graphicx}
\usepackage[]{color}
\usepackage{chngcntr}
\usepackage{pdfpages}
\usepackage{pdflscape}
\usepackage{subfig}
\usepackage[backend=biber]{biblatex}
\addbibresource{biblio_supmat.bib}
\usepackage[
colorlinks,
citecolor=blue,
urlcolor=cyan,
bookmarks=true,
hypertexnames=true
]{hyperref}
\usepackage{alltt}
\usepackage{arxiv}
\usepackage[utf8]{inputenc} % allow utf-8 input
\usepackage[T1]{fontenc} % use 8-bit T1 fonts
\usepackage{url} % simple URL typesetting
\usepackage{booktabs} % professional-quality tables
\usepackage{amsfonts} % blackboard math symbols
\usepackage{nicefrac} % compact symbols for 1/2, etc.
\usepackage{microtype} % microtypography
\usepackage{setspace}
\renewcommand{\theequation}{S\arabic{equation}}
% Stuff for the table
\usepackage{array, booktabs, caption, pdflscape, makecell, siunitx, multirow, caption}
\renewcommand\theadfont{\bfseries}
\setcounter{table}{0}
\renewcommand{\thetable}{S\arabic{table}}
\setlength\extrarowheight{7pt}
\renewcommand{\thesection}{S\arabic{section}}
%\renewcommand{\thesubsection}{\thesubsection.S\arabic{subsection}}
% maxwidth is the original width if it is less than linewidth
% otherwise use linewidth (to make sure the graphics do not exceed the margin)
\makeatletter
\def\maxwidth{ %
\ifdim\Gin@nat@width>\linewidth
\linewidth
\else
\Gin@nat@width
\fi
}
\makeatother
\definecolor{fgcolor}{rgb}{0.345, 0.345, 0.345}
\newcommand{\hlnum}[1]{\textcolor[rgb]{0.686,0.059,0.569}{#1}}%
\newcommand{\hlstr}[1]{\textcolor[rgb]{0.192,0.494,0.8}{#1}}%
\newcommand{\hlcom}[1]{\textcolor[rgb]{0.678,0.584,0.686}{\textit{#1}}}%
\newcommand{\hlopt}[1]{\textcolor[rgb]{0,0,0}{#1}}%
\newcommand{\hlstd}[1]{\textcolor[rgb]{0.345,0.345,0.345}{#1}}%
\newcommand{\hlkwa}[1]{\textcolor[rgb]{0.161,0.373,0.58}{\textbf{#1}}}%
\newcommand{\hlkwb}[1]{\textcolor[rgb]{0.69,0.353,0.396}{#1}}%
\newcommand{\hlkwc}[1]{\textcolor[rgb]{0.333,0.667,0.333}{#1}}%
\newcommand{\hlkwd}[1]{\textcolor[rgb]{0.737,0.353,0.396}{\textbf{#1}}}%
\let\hlipl\hlkwb
\usepackage{framed}
\makeatletter
\newenvironment{kframe}{%
\def\at@end@of@kframe{}%
\ifinner\ifhmode%
\def\at@end@of@kframe{\end{minipage}}%
\begin{minipage}{\columnwidth}%
\fi\fi%
\def\FrameCommand##1{\hskip\@totalleftmargin \hskip-\fboxsep
\colorbox{shadecolor}{##1}\hskip-\fboxsep
% There is no \\@totalrightmargin, so:
\hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}%
\MakeFramed {\advance\hsize-\width
\@totalleftmargin\z@ \linewidth\hsize
\@setminipage}}%
{\par\unskip\endMakeFramed%
\at@end@of@kframe}
\makeatother
\definecolor{shadecolor}{rgb}{.97, .97, .97}
\definecolor{messagecolor}{rgb}{0, 0, 0}
\definecolor{warningcolor}{rgb}{1, 0, 1}
\definecolor{errorcolor}{rgb}{1, 0, 0}
\newenvironment{knitrout}{}{} % an empty environment to be redefined in TeX
\newcommand{\I}{\mathbb{I}}
\title{Long-term demographic dynamics of a keystone scavenger disrupted by
human-induced shifts in food availability}
\author{
Pablo Almaraz
\\
Department of Ecology and Coastal Management, \\
ICMAN-CSIC, \\
Campus Río San Pedro, 11510, Puerto Real, Spain.\\ \texttt{\href{mailto:pablo.almaraz@csic.es}{\nolinkurl{pablo.almaraz@csic.es}}} \\
\And
Félix Martínez
\\
Sociedad para la Conservación de los Vertebrados, \\
Avda. de los Pinos 17, 58B, ES-28914 Leganés, Madrid, Spain. \\
\texttt{} \\
\And
Zebensui Morales-Reyes
\\
Departamento de Biología Aplicada, \\
Universidad Miguel Hernández \\
Avda. de la Universidad, s/n, 03202 Elche, Alicante, Spain. \\
\texttt{} \\
\And
José A. Sánchez-Zapata
\\
Departamento de Biología Aplicada \\
Universidad Miguel Hernández \\
Avda. de la Universidad, s/n, 03202 Elche, Alicante, Spain. \\
\texttt{} \\
\And
Guillermo Blanco
\\
Department of Evolutionary Ecology \\
MNCN-CSIC \\
José Gutiérrez Abascal 2, 28006 Madrid, Spain. \\
\texttt{} \\
}
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\begin{document}
\large{\textbf{Supporting Information}. Almaraz, P., Martínez, F., Morales-Reyes, Z., Sánchez-Zapata, J. A., Blanco, G. 2021. Long-term demographic dynamics of a keystone scavenger disrupted by human-induced shifts in food availability. Ecological Applications.}
\vspace{0.1in}
%\maketitle
%\tableofcontents
%\counterwithin{figure}{section}
%\newpage
\vspace{0.7in}
\def\tightlist{}
\section{The inverse Bayesian demographic model}
Here we outline in more detail the mathematical and statistical construction of the inverse state-space, stage-structured density-dependent demographic model (hereafter S4D3M). A life-cycle graph of the model is represented in Figure \ref{fig:FigLifeCycle}.\\
\renewcommand{\thefigure}{S1}
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=10cm]{figs/FigS1.pdf}
\end{center}
\caption{Life-cycle graph for the Eurasian griffon vulture with three sequential life stages (fledglings, sub-adults and adults) linked by thick gray arrows, and five density-dependent vital rates. The true (latent) abundances of fledglings ($n_{f,t}$), sub-adults ($n_{s,t}$) and adults ($n_{a,t}$) are linked to the observed abundances (the data; $y_{f,t}$, $y_{s,t}$, $y_{a,t}$, respectively) through an observation model (open arrows). This accounts for errors in the assignment of a demographic stage to the monitored individuals. An average fledgling enters the sub-adult stage with a probability ($G_{f}$; fledgling recruitment). Once in that stage, it may either survive as a sub-adult with a probability ($S_{s}$; sub-adult survival), or enter the adult population with a transition probability ($G_{s}$; sub-adult recruitment). Finally, an adult may survive with a probability ($S_{a}$; adult survival) and breed with an average fecundity ($F$; fecundity), which closes the graph. A stochastic term including both environmental ($\sigma_{i}^2$) and demographic stochasticity ($\delta_{i}^2$) impacts each life-stage. $ N_{t-1} $ is the total population size at time $t-1$.}
\label{fig:FigLifeCycle}
\end{figure}
\subsection{The full state-space specification}
The state-space model used in the present paper is composed of a set of state equations, representing the temporal dynamics of each stage according to a set of vital rates and both demographic and environmental stochastic noise. In full matrix form this state equations can be written as:
\begin{align}\label{FullModel}
\left(\begin{array}{c}{n_{f, t}} \\ {n_{s, t}} \\ {n_{a, t}}\end{array}\right)=
\left(\begin{array}{ccc}{0} & {0} & {\frac{F}{1+\beta_{1} N_{t-1}}} \\
{\frac{G_{f}}{1+\beta_{2} N_{t-1}}} & {\frac{S_{s}}{1+\beta_{3} N_{t-1}}} & {0} \\
{0} & {\frac{G_{s}}{1+\beta_{4} N_{t-1}}} & {\frac{S_{a}}{1+\beta_{5} N_{t-1}}}\end{array}\right)
\times\left(\begin{array}{c}{n_{f, t-1}} \\ {n_{s, t-1}} \\ {n_{a, t-1}}\end{array}\right)+ \notag \\
MVN\left(0,\left(\begin{array}{ccc}{\sigma_{f}^{2}} & {\zeta_{f, s}} & {\zeta_{f, a}} \\
{\zeta_{s, f}^{2}} & {\sigma_{s}^{2}} & {\zeta_{s, a}^{2}} \\
{\zeta_{a, f}} & {\zeta_{a, s}} & {\sigma_{a}^{2}}\end{array}\right)+\left(\begin{array}{ccc}{\frac{\delta_{f}^{2}}{n_{f, t}}} & {0} & {0} \\
{0} & {\frac{\delta_{s}^{2}}{n_{s, t}}} & {0} \\
{0} & {0} & {\frac{\delta_{a}^{2}}{n_{a, t}}}\end{array}\right)\right)
\end{align}
The observations made on each stage at each time $t$, $y_{f,t}, y_{s,t}$ and $y_{a,t}$, are linked to the latent sates of the set of state equations in \ref{FullModel}, $n_{f,t}, n_{s,t}$ and $n_{a,t}$, through a second set of observation equations:
\begin{equation}\label{ObsEqns}
\begin{array}
{l}{y_{f, t} \sim \mathcal{P}(n_{f, t})} \\
{y_{s, t} \sim \mathcal{P}(n_{s, t})} \\
{y_{a, t} \sim \mathcal{P}(n_{a, t})}
\end{array}
\end{equation}
Finally, the model is fully specified with a set of initial conditions for each latent sate. In this case, the initial observations for each state are used as the priors for the initial states:
\begin{equation}\label{InitStat}
\begin{array}
{l}{n_{f, 1} \sim \mathcal{P}(y_{f, 1})} \\
{n_{s, 1} \sim \mathcal{P}(y_{s, 1})} \\
{n_{a, 1} \sim \mathcal{P}(y_{a, 1})}
\end{array}
\end{equation}
Overall, the set of equations in \ref{FullModel}-\ref{InitStat} represent a non-linear, non-Gaussian state-space model \cite{Durbin2001}. The S4D3M is non-linear due to the density-dependent specification of the vital rates, and it is non-Gaussian due to the Poisson distribution specified for the observation equations and initial states.\\
\subsection{Prior specification}
We performed a literature review for assembling a database of natural history data on vital rates estimates of wild Eurasian Griffon vultures. The data assembled from the literature is shown in Table \ref{t_sim}. We used this information to construct weakly informative priors for each vital rate. The average meta-analytic adult survival compiled from the literature, $S_{a}$, as well as the meta-analytic variance in adult survival, $\sigma^2S_{a}$ (see Table \ref{t_sim}) were used as the mean and variance for the prior distribution. To allow the Bayesian MCMC scheme to explore a wider range of the posterior distribution, a 10-fold increased prior variance was used. Thus, taking into account that survival is a probability, the distribution takes the form of a normal distribution truncated at (0, 1), $S_{a} \sim \textnormal{N}(\bar S_{a}, 10 \times \sigma^2S_{a})T(0,1)$, where $T(a, b)$ denotes truncation at $a$ and $b$. This strategy of prior specification was used for the rest of vital rates: the prior for fledgling recruitment ($G_{f}$) was specified as $G_{f} \sim \textnormal{N}(\bar G_{f}, 10 \times \sigma^2G_{f})T(0,1)$. The brood size of the Eurasian griffon vulture is of a single fledgling, so a normal distribution truncated at 0 and 1 was implemented for fecundity. We took advantage of the fecundity data compiled in our study area during the 42-year period to construct the prior for fecundity; since the maximum number of fledglings per breeding pair is 1, the prior for fecundity was specified as $F \sim \textnormal{N}(\bar{F}, \sigma^2{F})T(0,1)$. Finally, sub-adult recruitment ($G_{s}$) and survival ($S_{s}$), represent probabilities of transition from the same stage; that is, assuming no emigration, a sub-adult can either remain as a sub-adult, or survive to the adult stage. Therefore, both rates are not independent since their sum must be = 1, and they are constrained to be positive. A Dirichlet (1,1) distribution, which is the continuous multivariate generalization of the beta distribution, is traditionally used as priors for these rates (e.g., \cite{Gross2002}). In our case, we take advantage of the natural history data on sub-adult survival available in the literature (Table \ref{t_sim}) and implement a truncated normal prior distribution for sub-adult survival, $S_{s} \sim \textnormal{N}(\bar S_{s}, 10 \times \sigma^2S_{s})T(0,1)$. We thus model sub-adult recruitment as $G_{s} = 1 - S_{s}$.\\
The covariance matrix for environmental noise in $\Sigma_{t}$ (C) was modeled with a scaled inverse Wishart distribution (\cite{Huang2013}), $C^{-1} \sim \textnormal{SWishart}(\zeta, S)$. While the inverse Wishart distribution is the conjugate prior for the covariance matrix of a multivariate normal distribution (\cite{Gelman2014}), it is known to severely constrain the variance parameters. In contrast, the scaled inverse Wishart, also known as the Huang-Wand distribution (\cite{Huang2013}), allows for the separate estimation of a diagonal matrix of scale parameters ($\zeta$) and an unscaled covariance matrix ($S$) following the Wishart distribution. Interestingly, with this distribution the standard deviation and correlation parameters are marginally non-informative, in contrast to the inverse Wishart (see \cite{Huang2013}), and still conditionally conjugate on the expanded space. The same prior scale ($\boldsymbol{\zeta} = \I$) was placed on all the elements of the variance-covariance matrix. The number of degrees of freedom was set to the number of stages, $S$ = 3, which is the value placing marginal uniform distributions on the correlation parameters of the covariance matrix ($S \sim \textnormal{U}(0,1)$). Finally, prior uniform distributions were placed on the standard deviation of demographic noise for each stage, $\delta_{i} \sim \textnormal{U}(0,10)$. \\
\section{Evaluating the evidence for density-dependence in the life-cycle}
The monitored population of Eurasian Griffon vulture showed a steady increasing trend during the 42-year period, only interrupted by the BSE outbreak and the corresponding sanitary legislation. However, given the long time period considered, it is highly likely that density-dependent processes were operating on some vital rates. The state-space model constructed (Eqns. \ref{FullModel}-\ref{InitStat}) is relatively complex in terms of number of parameters, with non-linear density-dependent terms in the demographic transitions among stages. To control for potential issues with MCMC convergence and posterior cross-correlations among parameters, we used a regularization (sparsity-inducing) scheme on the density-dependent parameters impacting on each vital rate, $\beta_{i}$. In brief, with regularization methods it is possible to set some parameters to 0 during the MCMC simulation if their effects on the posterior probability is negligible, and simultaneously let the simulation scheme to estimate freely those parameters with non-negligible effects on the posterior (see \cite{OHara2009} for an introduction to regularization methods, and \cite{Almaraz2011,Mutshinda2011} for ecological examples). \\
Here we use a Stochastic Search Variable Selection scheme (SSVS; \cite{George1993}) to automatically set close to 0 the density-dependent parameters with a negligible effect on demography during the MCMC simulation. Specifically, we used a spike-and-slab prior in the following distribution for the $\beta_{i}$'s in the demographic model:
\begin{equation}\label{dd_dist}
\beta_{i} \sim \textnormal{N}(0, \tau_{i}^2)T(0,\infty)
\end{equation}
Note that this parameter can only take positive values, so it is truncated at 0. The hyperprior for the variance of the parameter $\beta_{i}$, $\tau_{i}^2$, is further modeled as:
\begin{equation}\label{ssvs}
\tau_{i}^2 = (1 - p_{i}) \times \sigma_{0}^2 + p_{i} \times \sigma_{1}^2
\end{equation}
were $p_{i}$ is the probability that a given density-dependent parameter is included in the model, while the constants $\sigma_{0}^2$ and $\sigma_{1}^2$ are the variance when the parameter is either not-included or included in the model, respectively. We set these variances to $\sigma_{0}^2 = 10^{-4}$ and $\sigma_{1}^2 = 0.1$ (see below). Finally, the probabilities of inclusion of a given parameter, $p_{i}$, are given a Bernoulli distribution,
\begin{equation}\label{dist_bern}
p_{i} \sim \textnormal{Bern}(\rho)
\end{equation}
where $\rho$ is the prior probability of inclusion of the density-dependent parameters. Recall that a Bernoulli distribution can only take two values, either 0 or 1. In previous approaches a given constant was used as the prior $\rho$ (e.g., 0.2; see \cite{Almaraz2011,Mutshinda2011}). Here, we used a weakly informative beta distribution for $\rho$ to let the model to learn from sparsity during the posterior simulation:
\begin{equation}\label{dist_beta}
\rho \sim \textnormal{beta}(2,2)
\end{equation}
The beta distribution is a family of distributions defined on the continuous interval [0, 1]. They are parameterized by two shape parameters, and it is the conjugate prior of the Bernoulli distribution \cite{Gelman2014}. In our case, a $\textnormal{beta}(2,2)$ is weakly informative centered on 0.5; this stabilized the posterior simulation. In contrast, one can use a $\textnormal{beta}(1,1)$ as a completely uninformative distribution. Indeed, this later distribution is just a uniform probability distribution defined on the [0, 1] interval.\\
Overall, if the probability of inclusion $p_{i}$ of a given density-dependent parameter $\beta_{i}$ is 1 during the simulation, according to some prior $\rho$, this means that this parameter is active, so its variance (eqn. \ref{ssvs}) becomes $\tau_{i}^2 = p_{i} \times \sigma_{1}^2 = 0.1$, and the prior for the density-dependent parameter is converted into $\beta_{i} \sim \textnormal{N}(0, 0.1)T(0,\infty)$. This is the slab. On the other hand, if the probability of inclusion $p_{i}$ of a given density-dependent parameter $\beta_{i}$ is 0, this parameter is inactivated because, in this case, $\tau_{i}^2 = (1 - p_{i}) \times \sigma_{0}^2 = 10^{-4}$, and then the prior for the density-dependent parameter is converted into $\beta_{i} \sim \textnormal{N}(0, 10^{-4})T(0,\infty)$. This is the spike. During the posterior simulation, those density-dependent parameters $\beta_{i}$ significantly affecting the posterior distribution will more often be in an activated state. This means that the posterior probability of inclusion of a given parameter in the model is simply the proportion of times that the parameter was activated during the MCMC simulation (that is, the number of times that $p_{i} = 1$). This posterior probability can be compared to the prior probability of inclusion, $\rho$, which is common to all parameters (more specifically, to the posterior of the prior inclusion probability). This is very convenient, because a Bayes factor, $\textnormal{BF}_{i}$, \cite{Kass1995,Almaraz2011,Mutshinda2011} can then be calculated for every density-dependent parameter in the life-cycle:
\begin{equation}
\textnormal{BF}_{i} = \frac{p_{i}}{1-p_{i}} \times \frac{\rho}{1-\rho}
\end{equation}
According on the Kass-Raftery scale \cite{Kass1995}, it is possible to evaluate the evidence in favor of the inclusion of a given density-dependent parameter in the demographic model based on its Bayes factor.\\
\section{Variance component estimation}
Given the availability of a 42-year database of time-series abundance data it is straightforward to estimate the relative contribution of each vital rate and stochastic component in the stage-structured model to the observed temporal variance in population dynamics (see also \cite{Almaraz2011,Mutshinda2011}). A variance component was derived for the temporal variance in the abundance of each demographic stage \textit{i} ($var_{i}$). Denoting the temporal variance of the demographic stage \textit{i} as $\sigma^2_{n_{i}}$, and the temporal variance of the total population size as $\sigma^2_{N}$, this partitioning is:
\begin{equation}\label{VarCompSingle}
\begin{array}{l}
{var_{f} = \frac{F^{2}\sigma^2_{n_{a}}}{\left(1 + \beta_{1}\sigma^2_{N}\right)^2} + \sigma_{f}^{2}+\frac{\delta_{f}^{2}}{\sigma^2_{n_{f}}}} \\
{var_{s} = \frac{G_{f}^{2}\sigma^2_{n_{f}}}{\left(1 + \beta_{2}\sigma^2_{N}\right)^2} + \frac{S_{s}^{2}\sigma^2_{n_{s}}}{\left(1 + \beta_{3}\sigma^2_{N}\right)^2} + \sigma_{s}^{2}+\frac{\delta_{s}^{2}}{\sigma^2_{n_{s}}}} \\
{var_{a} = \frac{G_{s}^{2}\sigma^2_{n_{s}}}{\left(1 + \beta_{4}\sigma^2_{N}\right)^2} + \frac{S_{a}^{2}\sigma^2_{n_{a}}}{\left(1 + \beta_{5}\sigma^2_{N}\right)^2} + \sigma_{a}^{2} + \frac{\delta_{a}^{2}}{\sigma^2_{n_{a}}}}
\end{array}
\end{equation}
for the fledgling (\textit{f}), sub-adult (\textit{s}) and adult stages (\textit{a}), respectively. The total temporal variance of the stage-structured population, $Var_{T}$ is just the sum of each of the above component. This total variance can thus be partitioned into the additive variance components of each demographic stage of the Lefkovitch matrix and both the environmental and demographic stochastic components in eqn. \ref{FullModel}:
\begin{align}\label{VarCompFull}
Var_{T}=
\underbrace{\frac{F^{2}\sigma^2_{n_{a}}}{\left(1 + \beta_{1}\sigma^2_{N}\right)^2}}_{\text{Fecundity}} +
\underbrace{\frac{G_{f}^{2}\sigma^2_{n_{f}}}{\left(1 + \beta_{2}\sigma^2_{N}\right)^2}}_{\text{Fledgling recruitment}} +
\underbrace{\frac{S_{s}^{2}\sigma^2_{n_{s}}}{\left(1 + \beta_{3}\sigma^2_{N}\right)^2}}_{\text{Sub-adult survival}} +
\underbrace{\frac{G_{s}^{2}\sigma^2_{n_{s}}}{\left(1 + \beta_{4}\sigma^2_{N}\right)^2}}_{\text{Sub-adult recruitment}} +
\notag\\
\underbrace{\frac{S_{a}^{2}\sigma^2_{n_{a}}}{\left(1 + \beta_{5}\sigma^2_{N}\right)^2}}_{\text{Adult survival}} +
\underbrace{\sum_{i=f}^{a} \sigma_{i}^{2}}_{\text{Environmental stochasticity}} +
\underbrace{\sum_{i=f}^{a} \frac{\delta_{i}^{2}}{\sigma^2_{n_{i}}}}_{\text{Demographic stochasticity}}
\end{align}
\vspace{0.2in}
\section{Equilibrium population size and stability of the density-dependent model}
The transient rate of increase of a stage-structured population growing according to the S4D3M in eqns. \ref{FullModel}-\ref{InitStat} is defined as the rate at which the population grows when it is not at equilibrium; that is, whenever $N_{t} \neq N^* \quad \forall t$. When the population is at equilibrium, the population growth rate is, by definition, 0, since $N_{t + \Delta t} = N_{t} \quad \forall \Delta t, t$. If a population persists indefinitely in a stochastic environment, the asymptotic, long-term rate of increase ($\lambda_{s}$, see \cite{Caswell2001}) is a probability distribution with an average around 1, $\bar \lambda_{s} \approx 1$. In particular, the asymptotic long-term population growth rate in stochastic environments is the logarithm of the asymptotic rate of increase ($log \lambda_{s} \approx 0$) for persistent populations (see \cite{Caswell2010,Caswell2019}), but we focus here in the asymptotic rate of increase. Therefore, in a density-dependent regulated population growing in a stochastic environment stability is achieved whenever $\bar \lambda_{s} \approx 1$. Then, the frequency distribution of population sizes for which $\bar \lambda_{s} \approx 1$ in the long term is, indeed, the population at equilibrium, $N^*$. Note that we are always making a fundamental distinction between deterministic environments with analytic solutions (in which the asymptotic rate of increase $\lambda$ is exactly 1), and stochastic environments with inverse numeric solutions, in which $\lambda_{s}$ is a probability distribution centered around 1. \\
Analytically it is relatively straightforward to estimate the population at equilibrium, $N^*$, from the demographic model in eqns. \ref{FullModel}-\ref{InitStat}. However, we take advantage of the inverse state-space Bayesian approach and calculate $N^*$ numerically: we preliminary fitted the S4D3M to the stage-structured time series for each period, and then simulated the fitted model for an additional 200-year period into the future until the population stabilized. The equilibrium population size $N^*$ for each time period is then the posterior simulated distribution for the total population size after stabilization. These simulated posteriors are shown in Figure \ref{fig:Neqs}.\\
The posterior distributions of the asymptotic rates of increase ($\lambda_{s}$) of the stage-structured populations evaluated at the equilibrium $N^*$ are shown in Figure 3 of the accompanying paper. Interestingly, in spite of the impact of different sources of observational and process stochasticity on the dynamics, the distributions are centered around 1, which suggests that the stage-structured Eurasian Griffon vulture population is stabilized in the long-term. \\
Finally, it is interesting to evaluate the dynamical stability (stability in the Liapunov's sense, \cite{Elaydi2005}) of the S4D3M evaluated at the equilibrium $N^*$. Under some assumptions (see \cite{Caswell2001,Elaydi2005}) it is possible to use a version of the S4D3M linearized around the equilibrium $N^*$ to inform about the local, linear stability of the dynamics at this point. This is achieved through the construction of the Jacobian matrix, which is obtained by finding the partial derivatives of all the functions in the model to the state variables (see \cite{Caswell2001} and \cite{Otto2011} for a gentle introduction). In our case, the Jacobian matrix is derived by Taylor-expanding the Lefkovitch matrix in eqn. \ref{FullModel} around the equilibrium population size $N^*$. The Jacobian matrix (\textbf{J}) can then be written as
\vspace{0.2in}
\begin{equation}\label{Jacobian}
\mathbf{J} = \left(
\begin{array}{ccc}
{0} & {0} & {\frac{-F \beta_{1}}{(1 + \beta_{1} N^*)^2}} \\
{\frac{-G_{f} \beta_{2}}{(1 + \beta_{2} N^*)^2}} & {\frac{-S_{s} \beta_{3}}{(1 + \beta_{3} N^*)^2}} & {0} \\
{0} & {\frac{-G_{s} \beta_{4}}{(1 + \beta_{4} N^*)^2}} & {\frac{-S_{a} \beta_{5}}{(1 + \beta_{5} N^*)^2}}
\end{array}\right)
\end{equation}
\vspace{0.2in}
The stability of the equilibrium $N^*$ is informed by the eigenvalue spectra of the Jacobian matrix \ref{Jacobian}. Given that the S4D3M is a discrete-time model, the dynamic stability criteria states that the equilibrium $N^*$ of the linearized model is \textbf{asymptotically stable} iff the modulus of the dominant eigenvalue of the Jacobian (the spectral radius) is strictly smaller than 1 \cite{Elaydi2005}. In this case, the trajectory perturbed away from $N^*$ will eventually return to it, and the equilibrium is said to be asymptotically stable. In contrast, if the spectral radius is $>1$, the perturbation will grow across time indefinitely and the equilibrium is said to be \textbf{asymptotically unstable}.\\
Again, we take advantage of the inverse state-space Bayesian approach and construct a posterior distribution for the spectral radius during each time period, incorporating all the uncertainty arising from observation and process error. These distributions are shown in Figure \ref{fig:Asympt_resil}. Interestingly, in all time periods the spectral radius (the modulus of the dominant eigenvalue of the Jacobian) is very small ($<<1$), which suggest that the equilibrium $N^*$ is highly stable dynamically across time.
\renewcommand{\thefigure}{S2}
\begin{figure}[htbp]
\centering
\subfloat[\textbf{Before} the BSE outbreak] {{\includegraphics[width=0.3\linewidth]{../output/PreBSE/N_equil/Empirical_Dist_Neq} }}%
\qquad
\subfloat[\textbf{During} the BSE outbreak] {{\includegraphics[width=0.3\linewidth]{../output/BSE/N_equil/Empirical_Dist_Neq} }}%
\qquad
\subfloat[\textbf{After} the BSE outbreak] {{\includegraphics[width=0.3\linewidth]{../output/PostBSE/N_equil/Empirical_Dist_Neq} }}%
\caption{Posterior distributions of the density at equilibrium ($N^*$) before (\textbf{a}), during (\textbf{b}) and after (\textbf{c}) the BSE outbreak.}%
\label{fig:Neqs}%
\end{figure}
\renewcommand{\thefigure}{S3}
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=10cm]{figs/FigS3.pdf}
\end{center}
\caption{Posterior distributions of the spectral radius, the modulus of the dominant eigenvalue of the Jacobian (eqn. \ref{Jacobian}) of the S4D3M evaluated at the equilibrium $N^*$, for the three time periods considered. PreBSE: before the BSE outbreak (1978-2001; BSE: during the BSE outbreak (2002-2011); and PostBSE, after the BSE outbreak (2012-2020)).}
\label{fig:Asympt_resil}
\end{figure}
\section{Testing the performance of the model on simulated data}
The stage-structured demographic model we propose is structurally identifiable \cite{Bellman1970,Cole2020,Villaverde2016}: the Lefkovitch matrix is full-rank, and all of the parameters are conditioned on data. This means that, given sufficient data, all the demographic parameters, process variances and latent states can be recovered with accuracy. However, in real situations there might be issues with parameter identifiability due to, e.g., low sample sizes \cite{Cole2020}. In contrast to structural identifiability, this is called practical non-identifiability. To evaluate this potential problem, we build two synthetic scenarios in which a large number of stochastic time series are generated from models with known demographic parameters and stochastic effects. The S4D3M model is then fitted to each of these synthetic time series using the same Bayesian approach described in the accompanying paper. If the fitted parameter values correlate strongly with the ground-truth values, this would be strong evidence that the S4D3M is strongly identifiable in practice.\\
In the first scenario, we consider a process model with density-independent demographic dynamics with the following parameter values:
\begin{align}\label{FullModel_DI}
\left(\begin{array}{c}
{n_{f, t}} \\
{n_{s, t}} \\
{n_{a, t}}
\end{array}\right)=
\left(\begin{array}{ccc}{0} & {0} & {0.5} \\
{0.25} & {0.4} & {0} \\
{0} & {0.6} & {0.95}
\end{array}\right)
\times\left(\begin{array}{c}
{n_{f, t-1}} \\
{n_{s, t-1}} \\
{n_{a, t-1}}
\end{array}\right)+ \notag \\
MVN\left(0,\left(\begin{array}{ccc}
{62.5} & {-37.5} & {-12.5} \\
{-37.5} & {62.5} & {-12.5} \\
{-12.5} & {-12.5} & {62.5}
\end{array}\right)+\left(\begin{array}{ccc}
{\frac{25}{n_{f, t}}} & {0} & {0} \\
{0} & {\frac{25}{n_{s, t}}} & {0} \\
{0} & {0} & {\frac{25}{n_{a, t}}}\end{array}\right)\right)\end{align}
In the second scenario, we consider a process model with density-dependent fecundity:
\begin{align}\label{FullModel_DD}
\left(\begin{array}{c}
{n_{f, t}} \\
{n_{s, t}} \\
{n_{a, t}}
\end{array}\right)=
\left(\begin{array}{ccc}{0} & {0} & {\frac{0.5}{1+0.001N_{t-1}}} \\
{0.25} & {0.4} & {0} \\
{0} & {0.6} & {0.95}
\end{array}\right)
\times\left(\begin{array}{c}
{n_{f, t-1}} \\
{n_{s, t-1}} \\
{n_{a, t-1}}
\end{array}\right)+ \notag \\
MVN\left(0,\left(\begin{array}{ccc}
{62.5} & {-37.5} & {-12.5} \\
{-37.5} & {62.5} & {-12.5} \\
{-12.5} & {-12.5} & {62.5}
\end{array}\right)+\left(\begin{array}{ccc}
{\frac{25}{n_{f, t}}} & {0} & {0} \\
{0} & {\frac{25}{n_{s, t}}} & {0} \\
{0} & {0} & {\frac{25}{n_{a, t}}}\end{array}\right)\right)\end{align}
In both scenarios, the same observation equations were used:
\begin{equation}\label{ObsEqns2}
\begin{array}
{l}{y_{f, t} \sim \mathcal{P}(n_{f, t})} \\
{y_{s, t} \sim \mathcal{P}(n_{s, t})} \\
{y_{a, t} \sim \mathcal{P}(n_{a, t})}
\end{array}
\end{equation}
The same initial values were used for all \textit{i} stages: 50 adults, 40 sub-adults and 100 fledglings.\\
We simulated an ensemble of 100 time series in each scenario during a 30 year period. The results of the Bayesian fittings of the S4D3M model to each synthetic time series is shown in Figure \ref{fig:FigSupp_PPC}. Even in the presence of relatively large amounts of process stochasticity and sampling variability, the distribution of the posterior estimates of the demographic rates obtained from the fitting of the S4D3M to the ensemble of synthetic time series correlate strongly with their ground-truth estimates, in both the density-independent and density-dependent fecundity scenarios. These results suggest that the S4D3M is strongly identifiable in practice.\\
\renewcommand{\thefigure}{S4}
\begin{figure}[htbp]
\begin{center}
\includegraphics[width=15cm]{figs/FigS4.pdf}
\end{center}
\caption{Results of the stochastic simulation testing the practical identifiability of the S4D3M model from synthetic scenarios. A) and B) show the results of the density-independent scenario (model \ref{FullModel_DI}), while C) and D) show the results for the density-dependent fecundity scenario (model \ref{FullModel_DD}). In A) and C) the posterior estimates for the vital rates of the S4D3M fitted to each of the 50 synthetic time-series are plotted against the ground-truth values for the density-independent and density-dependent synthetic scenarios, respectively. The box-plots show the median (horizontal line), inter-quantile range (box) and 95\% percentiles (whiskers). The thickness of the box is proportional to the posterior density of the estimates within the inter-quantile range. The Y=X regression line (in black) is plotted as a reference. Sub-figures B) and D) show the average of the 100 time series of abundances for the three demographic stages simulated from the density-independent and density-dependent fecundity synthetic scenarios, respectively. The average of the synthetic time series are shown for each stage as lines and shaded regions stand for the 95\% confidence interval. }
\label{fig:FigSupp_PPC}
\end{figure}
\newpage
\pagestyle{empty}
\begin{landscape}
\section{Supplementary Tables}
\begin{table}[!htbp]
\centering
\caption{Summary of vital rates estimates for the Eurassian Gryfon vulture (\textit{Gyps fulvus}) obtained in several areas of the western Paleartic.}
\label{t_sim}
\begin{tabular}{cccccccc}
\toprule
\thead{Fecundity} & \thead{\makecell{Fledgling \\ recruitment}} & \thead{\makecell{Sub-adult \\ recruitment}} & \thead{\makecell{Sub-adult \\ survival}} & \thead{\makecell{Adult \\ survival}} & \thead{Location} & \thead{Comment} & \thead{Reference} \\
\midrule
0.818 & 0.6462 & - & 0.9485 & \makecell{0.9463 \\ 0.9485 \\ 0.8240 ($>$ 28 years)} & \makecell{Grand Causses, \\ Massif Central, \\ France} & \makecell{Late onset of senescence ($>$ 28 years).\\ Different adult annual survivals \\ depending on assumed model.} & \cite{Chantepie2016a} \\
\hline
- & - & - & 0.858 & 0.987 & \makecell{Grand Causses, \\ Massif Central, \\ France} & \makecell{A release effect on adult survival \\ was detected (0.743 during first year).} & \cite{Sarrazin1996} \\
\hline
0.785 & 0.871 & - & - & - & \makecell{Grand Causses, \\ Massif Central, \\ France} & \makecell{Average figures for of 11 years. \\ Only for first clutches.} & \cite{Sarrazin2000} \\
\hline
0.740 & - & - & - & - & \makecell{Island of Crete, \\ Greece} & \makecell{5 years of monitoring.} & \cite{Xirouchakis2010} \\
\hline
0.698 & - & - & - & - & \makecell{25 areas of the \\ western Paleartic.} & \makecell{Figure is the average for \\ 25 areas of the western Paleartic. \\ Range: 0.450-0.890} & \cite{Xirouchakis2010} \\
\hline
0.770 & 0.710 & - & - & - & \makecell{Eastern Rhodopes, \\ Bulgaria} & \makecell{25-year survey of two sites.} & \cite{Demerdzhiev2014} \\
\hline
- & - & - & 0.955-0.970 & 0.955-0.970 & \makecell{Causses, Baronnies, \\ Verdon, Navacelles, \\France} & \makecell{Range of long-term survival rates \\ with release-recapture models.} & \cite{Gouar2008a} \\
\hline
0.670-0.710 & 0.560-0.60 & - & - & - & \makecell{Esla, Porma, \\ Picos de Europa, \\ Spain} & \makecell{41 pairs monitored \\ during year 1997.} & \cite{OleaGarciaFalagan1999} \\
\hline
- & 0.688 & - & 0.946-0.964 & 0.952-0.969 & \makecell{Several areas, \\ Northeast Portugal} & Data from sensitivity analysis & \cite{VanBeest2008} \\
\bottomrule
\end{tabular}
\end{table}
\end{landscape}
\newpage
\begin{table}[!htbp]
\centering
\caption{\label{t_dd} Probability of density-dependence in vital rates and associated Bayes factors for each time period. Note that the prior probability of inclusion of the density-dependent parameter in the stage-structured demographic model is common to all vital rates for each time period. In bold type, vital rates for which the Bayes factor suggest that evidence of density-dependence for that rate is barely worth mentioning (1 to 3.2) according to the Kass-Raftery scale \cite{Kass1995}. }
\begin{tabular}{cccccccccc}
\toprule
\multirow{1}{*}{\textbf{Vital rate}} &
\multicolumn{3}{c}{\textbf{Pre-BSE}} &
\multicolumn{3}{c}{\textbf{BSE}} &
\multicolumn{3}{c}{\textbf{Post-BSE}} \\
& \textbf{Prior} & \textbf{Posterior} & \textbf{Bayes factor}
& \textbf{Prior} & \textbf{Posterior} & \textbf{Bayes factor}
& \textbf{Prior} & \textbf{Posterior} & \textbf{Bayes factor} \\
\hline
\thead{Fecundity} & 0.280 & 0.016 & 0.043 & 0.264 & 0.012 & 0.034 & 0.255 & 0.014 & 0.042 \\
\hline
\thead{\makecell{Fledgling \\ recruitment}} & 0.280 & 0.014 & 0.037 & 0.264 & 0.018 & 0.052 & 0.255 & 0.015 & 0.044 \\
\hline
\thead{\makecell{Sub-adult \\ survival}} & 0.280 & \textbf{0.466} & \textbf{2.248} & 0.264 & 0.064 & 0.191 & 0.255 & 0.023 & 0.068 \\
\hline
\thead{\makecell{Sub-adult \\ recruitment}} & 0.280 & 0.019 & 0.049 & 0.264 & 0.241 & 0.882 & 0.255 & 0.207 & 0.766 \\
\hline
\thead{\makecell{Adult \\ survival}} & 0.280 & 0.018 & 0.046 & 0.264 & 0.014 & 0.039 & 0.255 & 0.015 & 0.045 \\
\bottomrule
\end{tabular}
\end{table}
\newpage
\printbibliography
\end{document}
|
{"hexsha": "689a250619235aea976e3b3ab1435d6d52282a50", "size": 36426, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ms/appendix.tex", "max_stars_repo_name": "palmaraz/SaniVult", "max_stars_repo_head_hexsha": "4ad91b093c5b553c13a70a3c3946e9580f03ac33", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-20T16:02:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T12:20:26.000Z", "max_issues_repo_path": "ms/appendix.tex", "max_issues_repo_name": "palmaraz/SaniVult", "max_issues_repo_head_hexsha": "4ad91b093c5b553c13a70a3c3946e9580f03ac33", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ms/appendix.tex", "max_forks_repo_name": "palmaraz/SaniVult", "max_forks_repo_head_hexsha": "4ad91b093c5b553c13a70a3c3946e9580f03ac33", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 77.3375796178, "max_line_length": 2448, "alphanum_fraction": 0.7257727996, "num_tokens": 11195}
|
import torch
import torchvision
#import skimage.io as io
import numpy as np
import torchvision.transforms as t
import torch.nn as nn
import os
import matplotlib.pyplot as plt
import torchvision.models as model
#from sklearn.metrics import accuracy_score
torch.cuda.set_device(0)
#device=torch.device(#"cuda" if torch.cuda.is_available() else "cpu")
#print("device= ",device)
train_path="/content/drive/My Drive/SIPaKMeD/Train"
test_path="/content/drive/My Drive/SIPaKMeD/Test"
val_path="/content/drive/My Drive/SIPaKMeD/Validation"
plot_path="/content/drive/My Drive/SIPaKMeD"
snapshot_path="/content/drive/My Drive/SIPaKMeD"
#set_proper name
model_name='GoogLeNet'
batch_s = 100
transform=t.Compose([t.Resize((224,224)),
#t.RandomCrop((224,224)),
#t.RandomHorizontalFlip(),
#t.RandomVerticalFlip(),
#t.RandomAffine(degrees=(-180,180), translate=(0.1,0.1), scale=(0.9,1.1), shear=(-5,5)),
t.ToTensor()])
dset_train=torchvision.datasets.ImageFolder(root=train_path,transform=transform)
test_trans=t.Compose([t.Resize((224,224)),t.ToTensor()])
dset_test=torchvision.datasets.ImageFolder(root=test_path,transform=test_trans)
dset_val=torchvision.datasets.ImageFolder(root=val_path,transform=test_trans)
train_loader=torch.utils.data.DataLoader(dset_train,batch_size=batch_s,shuffle=True,num_workers=16)#,drop_last=True)
val_loader=torch.utils.data.DataLoader(dset_val,batch_size=batch_s,shuffle=False,num_workers=16)#,drop_last=True)
test_loader=torch.utils.data.DataLoader(dset_test,batch_size=batch_s,num_workers=16)#, drop_last=True)
num_classes = 7
#net=model.googlenet()
############################## MODEL : GOOGLENET ########################################
models = torchvision.models.googlenet(pretrained=True)
#net.fc = nn.Linear(net.fc.in_features,num_classes)
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
img_modules = list(models.children())[:-1]
self.ModelA = nn.Sequential(*img_modules)
self.Linear1 = nn.Linear(1024, 256)
self.relu = nn.ReLU()
self.Linear2 = nn.Linear(256, 2)
#self.softmax = nn.Softmax(dim = 1)
self.Linear3 = nn.Linear(1024, 5, bias = True)
#self.Linear4 = nn.Linear(1024, 2, bias = True)
def forward(self, x):
x = self.ModelA(x)
x1 = torch.flatten(x, 1)
x2 = self.Linear3(x1)
#x2 = self.relu(x1)
#x2 = self.Linear4(x2)
#x2 = self.softmax(x2)
return x1, x2
#net.fc=nn.Linear(1024,2,True)
net = MyModel()
net=net.cuda()
criterion=nn.CrossEntropyLoss()
params = net.parameters()
optimizer=torch.optim.Adam(net.parameters())
#optimizer = torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
#opyimizer = torch.optim.Adagrad(net.parameters(), lr=0.005, lr_decay=0.01, weight_decay=0.005, initial_accumulator_value=0, eps=1e-10)
model_name1 = 'ResNet50_last'
load_model=snapshot_path+'/model_'+model_name+'.pth'
loaded_flag=False
if os.path.exists(load_model):
checkpoint=torch.load(load_model)
net.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
print("model loaded successfully")
print('starting training after epoch: ',checkpoint['epoch'])
loaded_flag=True
def plot(val_loss,train_loss):
plt.title("Loss after epoch: {}".format(len(train_loss)))
plt.xlabel("epoch")
plt.ylabel("loss")
plt.plot(list(range(len(train_loss))),train_loss,color="r",label="Train_loss")
plt.plot(list(range(len(val_loss))),val_loss,color="b",label="Validation_loss")
plt.legend()
plt.savefig(os.path.join(plot_path,"loss_"+model_name+".png"))
# plt.figure()
plt.close()
val_interval=1
min_loss=99999
val_loss_gph=[]
train_loss_gph=[]
if loaded_flag:
min_loss=checkpoint['loss']
val_loss_gph=checkpoint["val_graph"]
train_loss_gph=checkpoint["train_graph"]
########################## TRIAN ##################
def train(epoch=5):
i=0
global min_loss
flag=True
while i+1<=epoch and flag:
print("Epoch {}".format(i+1 if not loaded_flag else i+1+checkpoint['epoch']))
train_loss=0.0
i+=1
data1 = []
correct=total=0
#net = net.train()
for (image,label) in train_loader:
net.train()
optimizer.zero_grad()
outputs1, outputs2=net(image.cuda())
#data1.append(outputs1)
loss=criterion(outputs2 ,label.cuda())
loss.backward()
optimizer.step()
train_loss+=loss.item()*image.size(0)
_, predicted = torch.max(outputs2.data, 1)
total += label.size(0)
correct += (predicted == label.cuda()).sum().item()
print("Train accuracy", (100*correct/total))
train_loss_gph.append(train_loss/len(dset_train))
#net = net.eval()
if (i+1)%val_interval==0 or (i+1)==epoch:
net.eval()
with torch.no_grad():
val_loss=0
correct=total=0
for (img_v,lab_v ) in val_loader:
output_v1, output_v2=net(img_v.cuda())
#data1.append(output_v1)
#val_loss+=criterion(output_v2,lab_v.cuda())
val_loss+=criterion(output_v2,lab_v.cuda())*img_v.size(0)
_, predicted = torch.max(output_v2.data, 1)
total += lab_v.size(0)
correct += (predicted == lab_v.cuda()).sum().item()
print("Val accuracy", (100*correct/total))
val_loss_gph.append(val_loss/len(dset_val))
if val_loss<min_loss:
state={
"epoch":i if not loaded_flag else i+checkpoint['epoch'],
"model_state":net.cpu().state_dict(),
"optimizer_state":optimizer.state_dict(),
"loss":min_loss,
"train_graph":train_loss_gph,
"val_graph":val_loss_gph,
}
min_loss=val_loss
torch.save(state,os.path.join(snapshot_path,"model_"+model_name+'.pth'))
net.cuda()
print("validation loss : {:.6f} ".format(val_loss/len(dset_val)))
plot(val_loss_gph,train_loss_gph)
print("Train loss : {:.6f}".format(train_loss/len(dset_train)))
if i==epoch:
flag=False
break
train(50)
print("validation MIN loss obtained: {:.6f}".format(min_loss))
net=net.eval()
correct = 0
total = 0
data1 = []
with torch.no_grad():
for data in train_loader:
images, labels = data
labels=labels.cuda()
outputs1, outputs2 = net(images.cuda())
data1.append(outputs1)
_, predicted = torch.max(outputs2.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
print("validation MIN loss obtained: {:.6f}".format(min_loss))
net=net.eval()
correct = 0
total = 0
data2 = []
with torch.no_grad():
for data in val_loader:
images, labels = data
labels=labels.cuda()
outputs1, outputs2 = net(images.cuda())
data2.append(outputs1)
_, predicted = torch.max(outputs2.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
print("validation MIN loss obtained: {:.6f}".format(min_loss))
net=net.eval()
correct = 0
total = 0
data3 = []
with torch.no_grad():
for data in test_loader:
images, labels = data
labels=labels.cuda()
outputs1, outputs2 = net(images.cuda())
data3.append(outputs1)
_, predicted = torch.max(outputs2.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
########### SAVING THE MODEL OF LAST EPOCH ##############
state1 = {
"Model_State": net.cpu().state_dict(),
"Optimiser_State": optimizer.state_dict(),
}
torch.save(state1,os.path.join(snapshot_path,"model_"+model_name1+'.pth'))
'''
p = train(100)
temp=p
import csv
labels=[]
for i in range(len(temp[0].tolist()[0])):
labels.append("Plain"+str(i+1))
with open ("/content/drive/My Drive/ND Sir's Project/HErlev_PlainNet.csv",'w+',newline='') as file:
writer=csv.writer(file)
writer.writerow(labels)
for i in range(len(temp)):
row=temp[i].tolist()[0]
writer.writerow(row)
'''
############### LOADING THE MODEL SAVED AT LAST EPOCH ###########3
load_model1 = '/content/drive/My Drive/SIPaKMeD/model_ResNet50_last.pth'
if os.path.exists(load_model1):
checkpoint=torch.load(load_model1)
net.load_state_dict(checkpoint['Model_State'])
optimizer.load_state_dict(checkpoint['Optimiser_State'])
print("model loaded successfully")
#print('starting training after epoch: ',checkpoint['epoch'])
#loaded_flag=True
###### LOADING DATA WITH BATCH SIZE 1 ################
test_trans=t.Compose([t.Resize((224,224)),t.ToTensor()])
dset_train=torchvision.datasets.ImageFolder(root=train_path,transform=test_trans)
dset_test=torchvision.datasets.ImageFolder(root=test_path,transform=test_trans)
dset_val=torchvision.datasets.ImageFolder(root=val_path,transform=test_trans)
train_loader=torch.utils.data.DataLoader(dset_train,batch_size=1,shuffle=False,num_workers=16)#,drop_last=True)
val_loader=torch.utils.data.DataLoader(dset_val,batch_size=1,shuffle=False,num_workers=16)#,drop_last=True)
test_loader=torch.utils.data.DataLoader(dset_test,batch_size=1,num_workers=16)#, drop_last=True)
############### EXTRACTION OF FEATURES ############
net = net.cuda()
print("validation MIN loss obtained: {:.6f}".format(min_loss))
net=net.eval()
correct = 0
total = 0
data1 = []
with torch.no_grad():
for data in train_loader:
images, labels = data
labels=labels.cuda()
outputs1, outputs2 = net(images.cuda())
data1.append(outputs1)
_, predicted = torch.max(outputs2.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
print("validation MIN loss obtained: {:.6f}".format(min_loss))
net=net.eval()
correct = 0
total = 0
data2 = []
with torch.no_grad():
for data in val_loader:
images, labels = data
labels=labels.cuda()
outputs1, outputs2 = net(images.cuda())
data2.append(outputs1)
_, predicted = torch.max(outputs2.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
print("validation MIN loss obtained: {:.6f}".format(min_loss))
net=net.eval()
correct = 0
total = 0
data3 = []
with torch.no_grad():
for data in test_loader:
images, labels = data
labels=labels.cuda()
outputs1, outputs2 = net(images.cuda())
data3.append(outputs1)
_, predicted = torch.max(outputs2.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
data_all = data1+data2+data3
####### LOADING THE CSV #############3
temp=data_all
import csv
labels=[]
for i in range(len(temp[0].tolist()[0])):
labels.append("ResNet18"+str(i+1))
with open ("/content/drive/My Drive/SIPaKMeD/SIPaKMeD_ResNet18_b_original_.csv",'w+',newline='') as file:
writer=csv.writer(file)
writer.writerow(labels)
for i in range(len(temp)):
row=temp[i].tolist()[0]
writer.writerow(row)
|
{"hexsha": "5312b6629308f46336fbdf54c9a5d501cd3126ac", "size": 12178, "ext": "py", "lang": "Python", "max_stars_repo_path": "DeepFeatureExtraction/EXTRACTOR_GoogLeNet.py", "max_stars_repo_name": "SohamChattopadhyayEE/DeepGA", "max_stars_repo_head_hexsha": "034e0b254244b00bc1bc6daaaeec610d5c73dd55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DeepFeatureExtraction/EXTRACTOR_GoogLeNet.py", "max_issues_repo_name": "SohamChattopadhyayEE/DeepGA", "max_issues_repo_head_hexsha": "034e0b254244b00bc1bc6daaaeec610d5c73dd55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DeepFeatureExtraction/EXTRACTOR_GoogLeNet.py", "max_forks_repo_name": "SohamChattopadhyayEE/DeepGA", "max_forks_repo_head_hexsha": "034e0b254244b00bc1bc6daaaeec610d5c73dd55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7963446475, "max_line_length": 136, "alphanum_fraction": 0.62465101, "include": true, "reason": "import numpy", "num_tokens": 3021}
|
using HDF5
using JLD2
using Pkg
using SparseVertex
include("./triqs_conv_functions.jl")
file = ARGS[1]
outdir = ARGS[2]
# Gimp
gImp = triqs_read_gf(file, "G_imp")
# chi
chiupdo_mesh = h5read(file, "chi_updn_ph_imp/mesh")
mesh = triqs_build_freqGrid(chiupdo_mesh);
chiupdo_raw = h5read(file, "chi_updn_ph_imp/data")
chiupup_raw = h5read(file, "chi_upup_ph_imp/data")
χupdo = permutedims(chiupdo_raw[1,:,:,:] .+ 1im .* chiupdo_raw[2,:,:,:],[3,1,2])
χupup = permutedims(chiupup_raw[1,:,:,:] .+ 1im .* chiupup_raw[2,:,:,:],[3,1,2])
χch = χupup .+ χupdo
χsp = χupup .- χupdo
freqList = triqs_linearize_mesh(mesh)
bGrid = freqList[1][1]:freqList[end][1]
fGrid = freqList[1][2]:freqList[end][2]
nBose = length(bGrid)
nFermi = length(fGrid)
shift = 0
println("Generating freqency mesh with bosonic $(bGrid), fermionic $(fGrid) indices")
β = h5read(file, "chi_updn_ph_imp/mesh/MeshComponent0/domain/beta")
χ0_full = computeχ0(bGrid, fGrid, gImp, β)
Γch = computeΓ(freqList, χch, χ0_full, bGrid, fGrid)
Γsp = computeΓ(freqList, χsp, χ0_full, bGrid, fGrid)
Σ = triqs_read_gf(file, "Sigma_imp")
mkpath(outdir)
mkpath(outdir*"/chi_dir")
mkpath(outdir*"/gamma_dir")
SparseVertex.write_fort_dir("gamma", freqList, Γch, Γsp, outdir*"/gamma_dir", nBose, nFermi)
SparseVertex.write_fort_dir("chi", freqList, χupup, χupdo, outdir*"/chi_dir", nBose, nFermi)
# Grid stuff
include("./genFreqMesh.jl")
gen_mesh(freqList[end][1], freqList[end][2]+1, shift, outdir)
JLD2.save(outdir*"/triqs_out.jld2", "Γch", Γch, "Γsp", Γsp, "χDMFTch", χch, "χDMFTsp", χsp, "gImp", gImp, "SigmaLoc", Σ, "beta", β)
|
{"hexsha": "46e0e81fd0188c133ed4092850f302d3cb533fca", "size": 1584, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/triqs_conv.jl", "max_stars_repo_name": "Atomtomate/LadderDGA.jl", "max_stars_repo_head_hexsha": "8cd39fe2ae2aa1130bff706171266d3cf2d4c8e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-05-04T12:31:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T10:00:25.000Z", "max_issues_repo_path": "scripts/triqs_conv.jl", "max_issues_repo_name": "Atomtomate/LadderDGA.jl", "max_issues_repo_head_hexsha": "8cd39fe2ae2aa1130bff706171266d3cf2d4c8e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/triqs_conv.jl", "max_forks_repo_name": "Atomtomate/LadderDGA.jl", "max_forks_repo_head_hexsha": "8cd39fe2ae2aa1130bff706171266d3cf2d4c8e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-04T12:34:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T12:34:34.000Z", "avg_line_length": 31.0588235294, "max_line_length": 131, "alphanum_fraction": 0.7127525253, "num_tokens": 617}
|
# Copyright (c) 2003-2015 by Mike Jarvis
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
# Note: This file was original written for GalSim. But since I was the author
# of it there, I can republish it here under the TreeCorr license too. I've
# made a few (slight) modifications from the GalSim version.
"""
.. module:: celestial
"""
angle_units = {
'arcsec' : 4.84813681109537e-6,
'arcmin' : 2.90888208665722e-4,
'deg' : 1.74532925199433e-2,
'hour' : 2.61799387799149e-1,
'rad' : 1.,
}
"""The value of various angle units in radians for easy conversion.
Note that there are also shorthand equivalents for these. e.g. Either::
4. * treecorr.angle_units['arcsec']
4. * treecorr.arcsec
would represent 4 arcsec in radians.
"""
arcsec = angle_units['arcsec'] #: 1 arcsec in radians
arcmin = angle_units['arcmin'] #: 1 arcmin in radians
degrees = angle_units['deg'] #: 1 degree in radians
hours = angle_units['hour'] #: 1 hour of angle in radians
radians = angle_units['rad'] #: 1 radian in radians
class CelestialCoord(object):
"""This class defines a position on the celestial sphere, normally given by
two angles, `ra` and `dec`.
This class is used to perform various calculations in spherical coordinates, such
as the angular distance between two points in the sky, the angles in spherical triangles,
projecting from sky coordinates onto a Euclidean tangent plane, etc.
None of these calculations are used directly by TreeCorr, but the projection routines
were useful when developing code for working directly in spherical coordinates, and it
could still be useful for people who need to massage their input catalogs before passing
them to TreeCorr.
A `CelestialCoord` object is constructed from the right ascension and declination:
>>> coord = treecorr.CelestialCoord(ra, dec)
The input angles are assumed to be in radians, but we have some helper variables to
convert from other units:
- :const:`treecorr.arcsec` The value of 1 arcsec in radians = pi/(180*3600)
- :const:`treecorr.arcmin` The value of 1 arcmin in radians = pi/(180*60)
- :const:`treecorr.degrees` The value of 1 degree in radians = pi/180
- :const:`treecorr.hours` The value of 1 hour in radians = pi/12
- :const:`treecorr.radians` The value of 1 radian in radians = 1
So if you have ra in hours, and dec in degrees, you can write:
>>> coord = treecorr.CelestialCoord(ra * treecorr.hours, dec * treecorr.degrees)
After construction, you can access the ra and dec values as read-only attributes.
>>> ra = coord.ra
>>> dec = coord.dec
:param ra: The right ascension in radians.
:param dec: The declination in radian.
"""
def __init__(self, ra, dec):
self._ra = ra
self._dec = dec
self._x = None # Indicate that x,y,z are not set yet.
@property
def ra(self): return self._ra
@property
def dec(self): return self._dec
@staticmethod
def radec_to_xyz(ra, dec, r=1.):
"Convert ra, dec (in radians) to 3D x,y,z coordinates on the unit sphere."
import numpy
cosdec = numpy.cos(dec)
x = cosdec * numpy.cos(ra) * r
y = cosdec * numpy.sin(ra) * r
z = numpy.sin(dec) * r
return x,y,z
@staticmethod
def xyz_to_radec(x, y, z):
"Convert 3D x,y,z coordinates to ra, dec (in radians)."
import numpy
ra = numpy.arctan2(y,x)
dec = numpy.arctan2(z,numpy.sqrt(x**2+y**2))
return ra,dec
def _set_aux(self):
if self._x is None:
import math
self._cosdec = math.cos(self._dec)
self._sindec = math.sin(self._dec)
self._cosra = math.cos(self._ra)
self._sinra = math.sin(self._ra)
self._x = self._cosdec * self._cosra
self._y = self._cosdec * self._sinra
self._z = self._sindec
def distanceTo(self, other):
"""Returns the great circle distance between this coord and another one.
The return value is in radians.
:param other: Another `CelestialCoord` object.
:returns: The great circle distance in radians between this coord and `other`.
"""
# The easiest way to do this in a way that is stable for small separations
# is to calculate the (x,y,z) position on the unit sphere corresponding to each
# coordinate position.
#
# x = cos(dec) cos(ra)
# y = cos(dec) sin(ra)
# z = sin(dec)
self._set_aux()
other._set_aux()
# The the direct distance between the two points is
#
# d^2 = (x1-x2)^2 + (y1-y2)^2 + (z1-z2)^2
dsq = (self._x-other._x)**2 + (self._y-other._y)**2 + (self._z-other._z)**2
# This direct distance can then be converted to a great circle distance via
#
# sin(theta/2) = d/2
import math
theta = 2. * math.asin(0.5 * math.sqrt(dsq))
return theta
def angleBetween(self, coord1, coord2):
"""Find the open angle at the location of the current coord between `coord1` and `coord2`.
Note that this returns a signed angle. The angle is positive if the sweep direction from
coord1 to coord2 is counter-clockwise (as observed from Earth). It is negative if
the direction is clockwise.
:param coord1: Another `CelestialCoord` object.
:param coord2: A third `CelestialCoord` object.
:returns: The angle in radians between the great circles to the two other coords.
"""
# Call A = coord1, B = coord2, C = self
# Then we are looking for the angle ACB.
# If we treat each coord as a (x,y,z) vector, then we can use the following spherical
# trig identities:
#
# (A x C) . B = sina sinb sinC
# (A x C) . (B x C) = sina sinb cosC
#
# Then we can just use atan2 to find C, and atan2 automatically gets the sign right.
# And we only need 1 trig call, assuming that x,y,z are already set up, which is often
# the case.
self._set_aux()
coord1._set_aux()
coord2._set_aux()
AxC = ( coord1._y * self._z - coord1._z * self._y ,
coord1._z * self._x - coord1._x * self._z ,
coord1._x * self._y - coord1._y * self._x )
BxC = ( coord2._y * self._z - coord2._z * self._y ,
coord2._z * self._x - coord2._x * self._z ,
coord2._x * self._y - coord2._y * self._x )
sinC = AxC[0] * coord2._x + AxC[1] * coord2._y + AxC[2] * coord2._z
cosC = AxC[0] * BxC[0] + AxC[1] * BxC[1] + AxC[2] * BxC[2]
import math
C = math.atan2(sinC, cosC)
return C
def area(self, coord1, coord2):
"""Find the area of the spherical triangle defined by the current coordinate, `coord1`,
and `coord2`, returning the area in steradians.
:param coord1: Another `CelestialCoord` object.
:param coord2: A third `CelestialCoord` object.
:returns: The area in steradians of the spherical triangle defined by the three
coords.
"""
# The area of a spherical triangle is defined by the "spherical excess", E.
# There are several formulae for E:
# (cf. http://en.wikipedia.org/wiki/Spherical_trigonometry#Area_and_spherical_excess)
#
# E = A + B + C - pi
# tan(E/4) = sqrt(tan(s/2) tan((s-a)/2) tan((s-b)/2) tan((s-c)/2)
# tan(E/2) = tan(a/2) tan(b/2) sin(C) / (1 + tan(a/2) tan(b/2) cos(C))
#
# We use the last formula, which is stable both for small triangles and ones that are
# nearly degenerate (which the middle formula may have trouble with).
#
# Furthermore, we can use some of the math for angleBetween and distanceTo to simplify
# this further:
#
# In angleBetween, we have formulae for sina sinb sinC and sina sinb cosC.
# In distanceTo, we have formulae for sin(a/2) and sin(b/2).
#
# Define: F = sina sinb sinC
# G = sina sinb cosC
# da = 2 sin(a/2)
# db = 2 sin(b/2)
#
# tan(E/2) = sin(a/2) sin(b/2) sin(C) / (cos(a/2) cos(b/2) + sin(a/2) sin(b/2) cos(C))
# = sin(a) sin(b) sin(C) / (4 cos(a/2)^2 cos(b/2)^2 + sin(a) sin(b) cos(C))
# = F / (4 (1-sin(a/2)^2) (1-sin(b/2)^2) + G)
# = F / (4-da^2) (4-db^2)/4 + G)
import math
self._set_aux()
coord1._set_aux()
coord2._set_aux()
AxC = ( coord1._y * self._z - coord1._z * self._y ,
coord1._z * self._x - coord1._x * self._z ,
coord1._x * self._y - coord1._y * self._x )
BxC = ( coord2._y * self._z - coord2._z * self._y ,
coord2._z * self._x - coord2._x * self._z ,
coord2._x * self._y - coord2._y * self._x )
F = AxC[0] * coord2._x + AxC[1] * coord2._y + AxC[2] * coord2._z
G = AxC[0] * BxC[0] + AxC[1] * BxC[1] + AxC[2] * BxC[2]
dasq = (self._x-coord1._x)**2 + (self._y-coord1._y)**2 + (self._z-coord1._z)**2
dbsq = (self._x-coord2._x)**2 + (self._y-coord2._y)**2 + (self._z-coord2._z)**2
tanEo2 = F / ( 0.25 * (4.-dasq) * (4.-dbsq) + G)
E = 2. * math.atan( abs(tanEo2) )
return E
def project(self, other, projection='lambert'):
"""Use the currect coord as the center point of a tangent plane projection to project
the `other` coordinate onto that plane.
This function returns the position (u,v) in the Euclidean coordinate system defined by
a tangent plane projection around the current coordinate, with +v pointing north and
+u pointing west.
There are currently four options for the projection, which you can specify as a string
value for the `projection` keyword argument:
- 'lambert' Uses a Lambert azimuthal projection, which preserves the area of small
patches, but not the angles between points in these patches. For more information, see
http://mathworld.wolfram.com/LambertAzimuthalEqual-AreaProjection.html
- 'stereographic' Uses a stereographic proejection, which preserves angles between points
in small patches, but not area. For more information, see
http://mathworld.wolfram.com/StereographicProjection.html
- 'gnomonic' Uses a gnomonic projection (i.e. a projection from the center of the sphere),
which has the property that all great circles become straight lines. For more
information, see http://mathworld.wolfram.com/GnomonicProjection.html
- 'postel' Uses a Postel equidistant proejection, which preserves distances from the
projection point, but not area or angles. For more information, see
http://mathworld.wolfram.com/AzimuthalEquidistantProjection.html
The distance or angle errors increase with distance from the projection point of course.
:param other: The coordinate to be projected relative to the current coord.
:param projection: Which kind of projection to use. (default: 'lambert')
:returns: The projected position (u,v) in radians as a tuple.
"""
if projection not in [ 'lambert', 'stereographic', 'gnomonic', 'postel' ]:
raise ValueError('Unknown projection ' + projection)
self._set_aux()
other._set_aux()
# The core calculation is done in a helper function:
return self._project_core(other._cosra, other._sinra, other._cosdec, other._sindec,
projection)
def _project_core(self, cosra, sinra, cosdec, sindec, projection):
# The equations are given at the above mathworld websites. They are the same except
# for the definition of k:
#
# x = k cos(dec) sin(ra-ra0)
# y = k ( cos(dec0) sin(dec) - sin(dec0) cos(dec) cos(ra-ra0) )
#
# Lambert:
# k = sqrt( 2 / ( 1 + cos(c) ) )
# Stereographic:
# k = 2 / ( 1 + cos(c) )
# Gnomonic:
# k = 1 / cos(c)
# Postel:
# k = c / sin(c)
# where cos(c) = sin(dec0) sin(dec) + cos(dec0) cos(dec) cos(ra-ra0)
# cos(dra) = cos(ra-ra0) = cos(ra0) cos(ra) + sin(ra0) sin(ra)
cosdra = self._cosra * cosra + self._sinra * sinra
# sin(dra) = -sin(ra - ra0)
# Note: - sign here is to make +x correspond to -ra,
# so x increases for decreasing ra.
# East is to the left on the sky!
# sin(dra) = -cos(ra0) sin(ra) + sin(ra0) cos(ra)
sindra = -self._cosra * sinra + self._sinra * cosra
# Calculate k according to which projection we are using
cosc = self._sindec * sindec + self._cosdec * cosdec * cosdra
if projection[0] == 'l':
import numpy
k = numpy.sqrt( 2. / (1.+cosc) )
elif projection[0] == 's':
k = 2. / (1. + cosc)
elif projection[0] == 'g':
k = 1. / cosc
else:
import numpy
c = numpy.arccos(cosc)
k = c / numpy.sin(c)
u = k * cosdec * sindra
v = k * ( self._cosdec * sindec - self._sindec * cosdec * cosdra )
return u, v
def project_rad(self, ra, dec, projection):
"""This is basically identical to the :meth:`~treecorr.CelestialCoord.project` method
except that the input `ra`, `dec` are given in radians rather than packaged as a
`CelestialCoord` object.
The main advantage to this is that it will work if `ra` and `dec` are NumPy arrays,
in which case the output `x`, `y` will also be NumPy arrays.
See the doc for :meth:`~treecorr.CelestialCoord.project` for more information about the
kinds of projection.
:param ra: The RA of the coordinate to be projected relative to the current coord.
:param dec: The Dec of the coordinate to be projected relative to the current coord.
:param projection: Which kind of projection to use. (default: 'lambert')
:returns: The projected position (u,v) in radians as a tuple.
"""
if projection not in [ 'lambert', 'stereographic', 'gnomonic', 'postel' ]:
raise ValueError('Unknown projection ' + projection)
self._set_aux()
import numpy
cosra = numpy.cos(ra)
sinra = numpy.sin(ra)
cosdec = numpy.cos(dec)
sindec = numpy.sin(dec)
return self._project_core(cosra, sinra, cosdec, sindec, projection)
def deproject(self, u, v, projection='lambert'):
"""Do the reverse process from the project() function.
i.e. This takes in a position (u,v) as a tuple and returns the corresponding celestial
coordinate, using the current coordinate as the center point of the tangent plane
projection.
See the doc for :meth:`~treecorr.CelestialCoord.project` for more information about the
kinds of projection.
:param u: The projected u value to be deprojected.
:param v: The projected v value to be deprojected.
:param projection: Which kind of projection to use. (default: 'lambert')
:returns: The `CelestialCoord` corresponding to the given projected position.
"""
if projection not in [ 'lambert', 'stereographic', 'gnomonic', 'postel' ]:
raise ValueError('Unknown projection ' + projection)
# Again, do the core calculations in a helper function
ra, dec = self._deproject_core(u, v, projection)
return CelestialCoord(ra,dec)
def _deproject_core(self, u, v, projection):
# The inverse equations are also given at the same web sites:
#
# sin(dec) = cos(c) sin(dec0) + v sin(c) cos(dec0) / r
# tan(ra-ra0) = u sin(c) / (r cos(dec0) cos(c) - v sin(dec0) sin(c))
#
# where
#
# r = sqrt(u^2+v^2)
# c = 2 sin^(-1)(r/2) for lambert
# c = 2 tan^(-1)(r/2) for stereographic
# c = tan^(-1)(r) for gnomonic
# c = r for postel
# Note that we can rewrite the formulae as:
#
# sin(dec) = cos(c) sin(dec0) + v (sin(c)/r) cos(dec0)
# tan(ra-ra0) = u (sin(c)/r) / (cos(dec0) cos(c) - v sin(dec0) (sin(c)/r))
#
# which means we only need cos(c) and sin(c)/r. For most of the projections,
# this saves us from having to take sqrt(rsq).
import numpy
rsq = u*u + v*v
if projection[0] == 'l':
# c = 2 * arcsin(r/2)
# Some trig manipulations reveal:
# cos(c) = 1 - r^2/2
# sin(c) = r sqrt(4-r^2) / 2
cosc = 1. - rsq/2.
sinc_over_r = numpy.sqrt(4.-rsq) / 2.
elif projection[0] == 's':
# c = 2 * arctan(r/2)
# Some trig manipulations reveal:
# cos(c) = (4-r^2) / (4+r^2)
# sin(c) = 4r / (4+r^2)
cosc = (4.-rsq) / (4.+rsq)
sinc_over_r = 4. / (4.+rsq)
elif projection[0] == 'g':
# c = arctan(r)
# cos(c) = 1 / sqrt(1+r^2)
# sin(c) = r / sqrt(1+r^2)
cosc = sinc_over_r = 1./numpy.sqrt(1.+rsq)
else:
r = numpy.sqrt(rsq)
cosc = numpy.cos(r)
sinc_over_r = numpy.sinc(r/numpy.pi)
# Compute sindec, tandra
self._set_aux()
sindec = cosc * self._sindec + v * sinc_over_r * self._cosdec
# Remember the - sign so +dra is -u. East is left.
tandra_num = -u * sinc_over_r
tandra_denom = cosc * self._cosdec - v * sinc_over_r * self._sindec
dec = numpy.arcsin(sindec)
ra = self.ra + numpy.arctan2(tandra_num, tandra_denom)
return ra, dec
def deproject_rad(self, u, v, projection='lambert'):
"""This is basically identical to the deproject() function except that the output `ra`,
`dec` are returned as a tuple (ra, dec) in radians rather than packaged as a
`CelestialCoord` object.
The main advantage to this is that it will work if `u` and `v` are NumPy arrays,
in which case the output `ra`, `dec` will also be NumPy arrays.
See the doc for :meth:`~treecorr.CelestialCoord.project` for more information about the
kinds of projection.
:param u: The projected u value to be deprojected.
:param v: The projected v value to be deprojected.
:param projection: Which kind of projection to use. (default: 'lambert')
:returns: A tuple (ra, dec) of the deprojected coordinates.
"""
if projection not in [ 'lambert', 'stereographic', 'gnomonic', 'postel' ]:
raise ValueError('Unknown projection ' + projection)
return self._deproject_core(u, v, projection)
def deproject_jac(self, u, v, projection='lambert'):
"""Return the jacobian of the deprojection.
i.e. if the input position is (u,v) (in radians) then the return matrix is
J = ( dra/du cos(dec) dra/dv cos(dec) )
( ddec/du ddec/dv )
See the doc for :meth:`~treecorr.CelestialCoord.project` for more information about the
kinds of projection.
:param u: The projected u value to be deprojected.
:param v: The projected v value to be deprojected.
:param projection: Which kind of projection to use. (default: 'lambert')
:returns: The matrix as a tuple (J00, J01, J10, J11)
"""
if projection not in [ 'lambert', 'stereographic', 'gnomonic', 'postel' ]:
raise ValueError('Unknown projection ' + projection)
# sin(dec) = cos(c) sin(dec0) + v sin(c)/r cos(dec0)
# tan(ra-ra0) = u sin(c)/r / (cos(dec0) cos(c) - v sin(dec0) sin(c)/r)
#
# d(sin(dec)) = cos(dec) ddec = s0 dc + (v ds + s dv) c0
# dtan(ra-ra0) = sec^2(ra-ra0) dra
# = ( (u ds + s du) A - u s (dc c0 - (v ds + s dv) s0 ) )/A^2
# where s = sin(c) / r
# c = cos(c)
# s0 = sin(dec0)
# c0 = cos(dec0)
# A = c c0 - v s s0
import numpy
rsq = u*u + v*v
rsq1 = (u+1.e-4)**2 + v**2
rsq2 = u**2 + (v+1.e-4)**2
if projection[0] == 'l':
c = 1. - rsq/2.
s = numpy.sqrt(4.-rsq) / 2.
dcdu = -u
dcdv = -v
dsdu = -u/(4.*s)
dsdv = -v/(4.*s)
elif projection[0] == 's':
s = 4. / (4.+rsq)
c = 2.*s-1.
ssq = s*s
dcdu = -u * ssq
dcdv = -v * ssq
dsdu = 0.5*dcdu
dsdv = 0.5*dcdv
elif projection[0] == 'g':
c = s = 1./numpy.sqrt(1.+rsq)
s3 = s*s*s
dcdu = dsdu = -u*s3
dcdv = dsdv = -v*s3
else:
r = numpy.sqrt(rsq)
if r == 0.:
c = s = 1
else:
c = numpy.cos(r)
s = numpy.sin(r)/r
dcdu = -s*u
dcdv = -s*v
dsdu = (c-s)*u/rsq
dsdv = (c-s)*v/rsq
self._set_aux()
s0 = self._sindec
c0 = self._cosdec
sindec = c * s0 + v * s * c0
cosdec = numpy.sqrt(1.-sindec*sindec)
dddu = ( s0 * dcdu + v * dsdu * c0 ) / cosdec
dddv = ( s0 * dcdv + (v * dsdv + s) * c0 ) / cosdec
tandra_num = u * s
tandra_denom = c * c0 - v * s * s0
# Note: A^2 sec^2(dra) = denom^2 (1 + tan^2(dra) = denom^2 + num^2
A2sec2dra = tandra_denom**2 + tandra_num**2
drdu = ((u * dsdu + s) * tandra_denom - u * s * ( dcdu * c0 - v * dsdu * s0 ))/A2sec2dra
drdv = (u * dsdv * tandra_denom - u * s * ( dcdv * c0 - (v * dsdv + s) * s0 ))/A2sec2dra
drdu *= cosdec
drdv *= cosdec
return drdu, drdv, dddu, dddv
def precess(self, from_epoch, to_epoch):
"""This function precesses equatorial ra and dec from one epoch to another.
It is adapted from a set of fortran subroutines based on (a) pages 30-34 of
the Explanatory Supplement to the AE, (b) Lieske, et al. (1977) A&A 58, 1-16,
and (c) Lieske (1979) A&A 73, 282-284.
:param from_epoch: The epoch to use for the current coord.
:param to_epoch: The new epoch to precess to.
:returns: A new `CelestialCoord` of the coordinates in the new epoch.
"""
if from_epoch == to_epoch: return self
# t0, t below correspond to Lieske's big T and little T
t0 = (from_epoch-2000.)/100.
t = (to_epoch-from_epoch)/100.
t02 = t0*t0
t2 = t*t
t3 = t2*t
# a,b,c below correspond to Lieske's zeta_A, z_A and theta_A. They are all in arcsec.
a = ( (2306.2181 + 1.39656*t0 - 0.000139*t02) * t +
(0.30188 - 0.000344*t0) * t2 + 0.017998 * t3 ) * angle_units['arcsec']
b = ( (2306.2181 + 1.39656*t0 - 0.000139*t02) * t +
(1.09468 + 0.000066*t0) * t2 + 0.018203 * t3 ) * angle_units['arcsec']
c = ( (2004.3109 - 0.85330*t0 - 0.000217*t02) * t +
(-0.42665 - 0.000217*t0) * t2 - 0.041833 * t3 ) * angle_units['arcsec']
import math
cosa = math.cos(a)
sina = math.sin(a)
cosb = math.cos(b)
sinb = math.sin(b)
cosc = math.cos(c)
sinc = math.sin(c)
# This is the precession rotation matrix:
xx = cosa*cosc*cosb - sina*sinb
yx = -sina*cosc*cosb - cosa*sinb
zx = -sinc*cosb
xy = cosa*cosc*sinb + sina*cosb
yy = -sina*cosc*sinb + cosa*cosb
zy = -sinc*sinb
xz = cosa*sinc
yz = -sina*sinc
zz = cosc
# Perform the rotation:
self._set_aux()
x2 = xx*self._x + yx*self._y + zx*self._z
y2 = xy*self._x + yy*self._y + zy*self._z
z2 = xz*self._x + yz*self._y + zz*self._z
new_dec = math.atan2(z2,math.sqrt(x2**2+y2**2))
new_ra = math.atan2(y2,x2)
new_coord = CelestialCoord(new_ra,new_dec)
return new_coord
def galactic(self, epoch=2000.):
"""Get the longitude and latitude in galactic coordinates corresponding to this position.
The formulae are implemented in terms of the 1950 coordinates, so we need to
precess from the current epoch to 1950. The current epoch is assumed to be 2000
by default, but you may also specify a different value with the epoch parameter.
:param epoch: The epoch to assume for the current coordinates (default: 2000)
:returns: The longitude and latitude as a tuple (el, b) in radians.
"""
# cf. Lang, Astrophysical Formulae, page 13
# cos(b) cos(el-33) = cos(dec) cos(ra-282.25)
# cos(b) sin(el-33) = sin(dec) sin(62.6) + cos(dec) sin(ra-282.25) cos(62.6)
# sin(b) = sin(dec) sin(62.6) - cos(dec) sin(ra-282.25) sin(62.6)
import math
el0 = 33. * angle_units['deg']
r0 = 282.25 * angle_units['deg']
d0 = 62.6 * angle_units['deg']
cosd0 = math.cos(d0)
sind0 = math.sin(d0)
temp = self.precess(epoch, 1950.)
d = temp.dec
r = temp.ra
cosd = math.cos(d)
sind = math.sin(d)
cosr = math.cos(r - r0)
sinr = math.sin(r - r0)
cbcl = cosd*cosr
cbsl = sind*sind0 + cosd*sinr*cosd0
sb = sind*cosd0 - cosd*sinr*sind0
b = math.asin(sb)
el = math.atan2(cbsl,cbcl) + el0
return (el, b)
def ecliptic(self, epoch=2000.):
"""Get the longitude and latitude in ecliptic coordinates corresponding to this position.
Note that the ecliptic coordinate system precesses with time. This function takes
an optional epoch parameter to indicate what epoch to use for both the input coordinates
and the output ecliptic coordinates.
:param epoch: The epoch to assume for the coordinates (default: 2000).
:returns: The longitude and latitude as a tuple (lambda, beta) in radians.
"""
import math
self._set_aux()
# From http://en.wikipedia.org/wiki/Axial_tilt
# ep = 23deg 26m +
# (21.406 + T(-46.836769 + T(-0.0001831 + T(0.00200340 + T(-0.576e-6 - 4.34e-8 T) s
ep = 84381.406 # = 23d 26m 21.406s in arcsec
if epoch != 2000.:
T = (epoch - 2000.) / 100.
ep += T*(-46.836769 + T*(-0.0001831 + T*(0.0020034 + T*(-0.576e-6 - T*4.34e-8))))
ep *= arcsec
# cf. http://en.wikipedia.org/wiki/Ecliptic_coordinate_system
# xe = x
# ye = cos(ep) y + sin(ep) z
# ze = -sin(ep) y + cos(ep) z
cos_ep = math.cos(ep)
sin_ep = math.sin(ep)
xe = self._x
ye = cos_ep*self._y + sin_ep*self._z
ze = -sin_ep*self._y + cos_ep*self._z
beta = math.asin(ze)
lam = math.atan2(ye, xe)
return (lam, beta)
def copy(self): return CelestialCoord(self._ra, self._dec)
def __repr__(self): return 'CelestialCoord('+repr(self._ra)+','+repr(self._dec)+')'
|
{"hexsha": "b22b2b71e2a6528482a735a0f37e83b4736e4ad2", "size": 28203, "ext": "py", "lang": "Python", "max_stars_repo_path": "treecorr/celestial.py", "max_stars_repo_name": "kstoreyf/TreeCorr", "max_stars_repo_head_hexsha": "f81b4b762c8672b9047ed045c300730cc2983eb0", "max_stars_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "treecorr/celestial.py", "max_issues_repo_name": "kstoreyf/TreeCorr", "max_issues_repo_head_hexsha": "f81b4b762c8672b9047ed045c300730cc2983eb0", "max_issues_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "treecorr/celestial.py", "max_forks_repo_name": "kstoreyf/TreeCorr", "max_forks_repo_head_hexsha": "f81b4b762c8672b9047ed045c300730cc2983eb0", "max_forks_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4447552448, "max_line_length": 100, "alphanum_fraction": 0.569762082, "include": true, "reason": "import numpy", "num_tokens": 8236}
|
@testset "Main" begin
@testset "Simulate with trace" begin
n = 3
ε = .1
reward = Distribution[Bernoulli(.5 + ((i == j) ? ε : 0.)) for i in 1:n, j in 1:n]
instance = UncorrelatedPerfectBipartiteMatching(reward, PerfectBipartiteMatchingMunkresSolver())
Random.seed!(1)
n_rounds = 2
s, t = simulate(instance, ThompsonSampling(), n_rounds, with_trace=true)
@test s.round == n_rounds
@test length(t.states) == n_rounds
@test length(t.arms) == n_rounds
@test length(t.reward) == n_rounds
@test t.states[n_rounds].round == s.round
@test t.states[n_rounds].regret == s.regret
@test t.states[n_rounds].reward == s.reward
@test t.states[n_rounds].arm_counts == s.arm_counts
@test t.states[n_rounds].arm_reward == s.arm_reward
@test t.states[n_rounds].arm_average_reward == s.arm_average_reward
end
end
|
{"hexsha": "08a2ff674b08f44929172efe982c2104c9e0e442", "size": 867, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/main.jl", "max_stars_repo_name": "dourouc05/-CombinatorialBandits.jl", "max_stars_repo_head_hexsha": "05470f136ac8832e6f8afc2ee07705ac3f769271", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2018-02-18T22:32:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T07:02:11.000Z", "max_issues_repo_path": "test/main.jl", "max_issues_repo_name": "dourouc05/-CombinatorialBandits.jl", "max_issues_repo_head_hexsha": "05470f136ac8832e6f8afc2ee07705ac3f769271", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-01-27T21:06:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-04T20:59:31.000Z", "max_forks_repo_path": "test/main.jl", "max_forks_repo_name": "dourouc05/-CombinatorialBandits.jl", "max_forks_repo_head_hexsha": "05470f136ac8832e6f8afc2ee07705ac3f769271", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-02-26T18:31:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-12T08:01:20.000Z", "avg_line_length": 34.68, "max_line_length": 100, "alphanum_fraction": 0.6735870819, "num_tokens": 272}
|
//
// Boost.Process
// ~~~~~~~~~~~~~
//
// Copyright (c) 2006, 2007 Julio M. Merino Vidal
// Copyright (c) 2008 Ilya Sokolov, Boris Schaeling
// Copyright (c) 2009 Boris Schaeling
// Copyright (c) 2010 Felipe Tanus, Boris Schaeling
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
/**
* \file boost/process.hpp
*
* Convenience header that includes all public Boost.Process header files.
*/
#ifndef BOOST_PROCESS_HPP
#define BOOST_PROCESS_HPP
#include <boost/process/all.hpp>
#endif
|
{"hexsha": "98e3c89ad9d9692502f2bdfd5d75a85166fbc8a6", "size": 625, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libraries/fc/vendor/boost_1.51/include/boost/process.hpp", "max_stars_repo_name": "techsharesteam/techshares", "max_stars_repo_head_hexsha": "47c58630a578204147057b7504e571e19546444f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 77.0, "max_stars_repo_stars_event_min_datetime": "2015-06-09T14:39:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T07:21:48.000Z", "max_issues_repo_path": "libraries/fc/vendor/boost_1.51/include/boost/process.hpp", "max_issues_repo_name": "CocosBlockchainExpedition/cocos-mainnet", "max_issues_repo_head_hexsha": "e0dc376c191f9b18a3a86f7705bfd0e0ca67cb5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2020-02-10T11:06:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-18T07:24:08.000Z", "max_forks_repo_path": "libraries/fc/vendor/boost_1.51/include/boost/process.hpp", "max_forks_repo_name": "CocosBlockchainExpedition/cocos-mainnet", "max_forks_repo_head_hexsha": "e0dc376c191f9b18a3a86f7705bfd0e0ca67cb5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 25.0, "max_forks_repo_forks_event_min_datetime": "2016-03-18T17:36:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T15:09:05.000Z", "avg_line_length": 24.0384615385, "max_line_length": 80, "alphanum_fraction": 0.6832, "num_tokens": 164}
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This module defines a widget designed to display data using the most adapted
view from the ones provided by silx.
"""
from __future__ import division
from silx.gui.data import DataViews
from silx.gui.data.DataViews import _normalizeData
import logging
from silx.gui import qt
from silx.gui.data.NumpyAxesSelector import NumpyAxesSelector
from silx.utils import deprecation
from silx.utils.property import classproperty
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "24/04/2018"
_logger = logging.getLogger(__name__)
class DataViewer(qt.QFrame):
"""Widget to display any kind of data
.. image:: img/DataViewer.png
The method :meth:`setData` allows to set any data to the widget. Mostly
`numpy.array` and `h5py.Dataset` are supported with adapted views. Other
data types are displayed using a text viewer.
A default view is automatically selected when a data is set. The method
:meth:`setDisplayMode` allows to change the view. To have a graphical tool
to select the view, prefer using the widget :class:`DataViewerFrame`.
The dimension of the input data and the expected dimension of the selected
view can differ. For example you can display an image (2D) from 4D
data. In this case a :class:`NumpyAxesSelector` is displayed to allow the
user to select the axis mapping and the slicing of other axes.
.. code-block:: python
import numpy
data = numpy.random.rand(500,500)
viewer = DataViewer()
viewer.setData(data)
viewer.setVisible(True)
"""
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.EMPTY_MODE", since_version="0.7", skip_backtrace_count=2)
def EMPTY_MODE(self):
return DataViews.EMPTY_MODE
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.PLOT1D_MODE", since_version="0.7", skip_backtrace_count=2)
def PLOT1D_MODE(self):
return DataViews.PLOT1D_MODE
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.PLOT2D_MODE", since_version="0.7", skip_backtrace_count=2)
def PLOT2D_MODE(self):
return DataViews.PLOT2D_MODE
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.PLOT3D_MODE", since_version="0.7", skip_backtrace_count=2)
def PLOT3D_MODE(self):
return DataViews.PLOT3D_MODE
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.RAW_MODE", since_version="0.7", skip_backtrace_count=2)
def RAW_MODE(self):
return DataViews.RAW_MODE
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.RAW_ARRAY_MODE", since_version="0.7", skip_backtrace_count=2)
def RAW_ARRAY_MODE(self):
return DataViews.RAW_ARRAY_MODE
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.RAW_RECORD_MODE", since_version="0.7", skip_backtrace_count=2)
def RAW_RECORD_MODE(self):
return DataViews.RAW_RECORD_MODE
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.RAW_SCALAR_MODE", since_version="0.7", skip_backtrace_count=2)
def RAW_SCALAR_MODE(self):
return DataViews.RAW_SCALAR_MODE
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.STACK_MODE", since_version="0.7", skip_backtrace_count=2)
def STACK_MODE(self):
return DataViews.STACK_MODE
# TODO: Can be removed for silx 0.8
@classproperty
@deprecation.deprecated(replacement="DataViews.HDF5_MODE", since_version="0.7", skip_backtrace_count=2)
def HDF5_MODE(self):
return DataViews.HDF5_MODE
displayedViewChanged = qt.Signal(object)
"""Emitted when the displayed view changes"""
dataChanged = qt.Signal()
"""Emitted when the data changes"""
currentAvailableViewsChanged = qt.Signal()
"""Emitted when the current available views (which support the current
data) change"""
def __init__(self, parent=None):
"""Constructor
:param QWidget parent: The parent of the widget
"""
super(DataViewer, self).__init__(parent)
self.__stack = qt.QStackedWidget(self)
self.__numpySelection = NumpyAxesSelector(self)
self.__numpySelection.selectedAxisChanged.connect(self.__numpyAxisChanged)
self.__numpySelection.selectionChanged.connect(self.__numpySelectionChanged)
self.__numpySelection.customAxisChanged.connect(self.__numpyCustomAxisChanged)
self.setLayout(qt.QVBoxLayout(self))
self.layout().addWidget(self.__stack, 1)
group = qt.QGroupBox(self)
group.setLayout(qt.QVBoxLayout())
group.layout().addWidget(self.__numpySelection)
group.setTitle("Axis selection")
self.__axisSelection = group
self.layout().addWidget(self.__axisSelection)
self.__currentAvailableViews = []
self.__currentView = None
self.__data = None
self.__info = None
self.__useAxisSelection = False
self.__userSelectedView = None
self.__hooks = None
self.__views = []
self.__index = {}
"""store stack index for each views"""
self._initializeViews()
def _initializeViews(self):
"""Inisialize the available views"""
views = self.createDefaultViews(self.__stack)
self.__views = list(views)
self.setDisplayMode(DataViews.EMPTY_MODE)
def setGlobalHooks(self, hooks):
"""Set a data view hooks for all the views
:param DataViewHooks context: The hooks to use
"""
self.__hooks = hooks
for v in self.__views:
v.setHooks(hooks)
def createDefaultViews(self, parent=None):
"""Create and returns available views which can be displayed by default
by the data viewer. It is called internally by the widget. It can be
overwriten to provide a different set of viewers.
:param QWidget parent: QWidget parent of the views
:rtype: List[silx.gui.data.DataViews.DataView]
"""
viewClasses = [
DataViews._EmptyView,
DataViews._Hdf5View,
DataViews._NXdataView,
DataViews._Plot1dView,
DataViews._ImageView,
DataViews._Plot3dView,
DataViews._RawView,
DataViews._StackView,
]
views = []
for viewClass in viewClasses:
try:
view = viewClass(parent)
views.append(view)
except Exception:
_logger.warning("%s instantiation failed. View is ignored" % viewClass.__name__)
_logger.debug("Backtrace", exc_info=True)
return views
def clear(self):
"""Clear the widget"""
self.setData(None)
def normalizeData(self, data):
"""Returns a normalized data if the embed a numpy or a dataset.
Else returns the data."""
return _normalizeData(data)
def __getStackIndex(self, view):
"""Get the stack index containing the view.
:param silx.gui.data.DataViews.DataView view: The view
"""
if view not in self.__index:
widget = view.getWidget()
index = self.__stack.addWidget(widget)
self.__index[view] = index
else:
index = self.__index[view]
return index
def __clearCurrentView(self):
"""Clear the current selected view"""
view = self.__currentView
if view is not None:
view.clear()
def __numpyCustomAxisChanged(self, name, value):
view = self.__currentView
if view is not None:
view.setCustomAxisValue(name, value)
def __updateNumpySelectionAxis(self):
"""
Update the numpy-selector according to the needed axis names
"""
previous = self.__numpySelection.blockSignals(True)
self.__numpySelection.clear()
info = self._getInfo()
axisNames = self.__currentView.axesNames(self.__data, info)
if info.isArray and info.size != 0 and self.__data is not None and axisNames is not None:
self.__useAxisSelection = True
self.__numpySelection.setAxisNames(axisNames)
self.__numpySelection.setCustomAxis(self.__currentView.customAxisNames())
data = self.normalizeData(self.__data)
self.__numpySelection.setData(data)
if hasattr(data, "shape"):
isVisible = not (len(axisNames) == 1 and len(data.shape) == 1)
else:
isVisible = True
self.__axisSelection.setVisible(isVisible)
else:
self.__useAxisSelection = False
self.__axisSelection.setVisible(False)
self.__numpySelection.blockSignals(previous)
def __updateDataInView(self):
"""
Update the views using the current data
"""
if self.__useAxisSelection:
self.__displayedData = self.__numpySelection.selectedData()
else:
self.__displayedData = self.__data
qt.QTimer.singleShot(10, self.__setDataInView)
def __setDataInView(self):
self.__currentView.setData(self.__displayedData)
def setDisplayedView(self, view):
"""Set the displayed view.
Change the displayed view according to the view itself.
:param silx.gui.data.DataViews.DataView view: The DataView to use to display the data
"""
self.__userSelectedView = view
self._setDisplayedView(view)
def _setDisplayedView(self, view):
"""Internal set of the displayed view.
Change the displayed view according to the view itself.
:param silx.gui.data.DataViews.DataView view: The DataView to use to display the data
"""
if self.__currentView is view:
return
self.__clearCurrentView()
self.__currentView = view
self.__updateNumpySelectionAxis()
self.__updateDataInView()
stackIndex = self.__getStackIndex(self.__currentView)
if self.__currentView is not None:
self.__currentView.select()
self.__stack.setCurrentIndex(stackIndex)
self.displayedViewChanged.emit(view)
def getViewFromModeId(self, modeId):
"""Returns the first available view which have the requested modeId.
Return None if modeId does not correspond to an existing view.
:param int modeId: Requested mode id
:rtype: silx.gui.data.DataViews.DataView
"""
for view in self.__views:
if view.modeId() == modeId:
return view
return None
def setDisplayMode(self, modeId):
"""Set the displayed view using display mode.
Change the displayed view according to the requested mode.
:param int modeId: Display mode, one of
- `DataViews.EMPTY_MODE`: display nothing
- `DataViews.PLOT1D_MODE`: display the data as a curve
- `DataViews.IMAGE_MODE`: display the data as an image
- `DataViews.PLOT3D_MODE`: display the data as an isosurface
- `DataViews.RAW_MODE`: display the data as a table
- `DataViews.STACK_MODE`: display the data as a stack of images
- `DataViews.HDF5_MODE`: display the data as a table of HDF5 info
- `DataViews.NXDATA_MODE`: display the data as NXdata
"""
try:
view = self.getViewFromModeId(modeId)
except KeyError:
raise ValueError("Display mode %s is unknown" % modeId)
self._setDisplayedView(view)
def displayedView(self):
"""Returns the current displayed view.
:rtype: silx.gui.data.DataViews.DataView
"""
return self.__currentView
def addView(self, view):
"""Allow to add a view to the dataview.
If the current data support this view, it will be displayed.
:param DataView view: A dataview
"""
if self.__hooks is not None:
view.setHooks(self.__hooks)
self.__views.append(view)
# TODO It can be skipped if the view do not support the data
self.__updateAvailableViews()
def removeView(self, view):
"""Allow to remove a view which was available from the dataview.
If the view was displayed, the widget will be updated.
:param DataView view: A dataview
"""
self.__views.remove(view)
self.__stack.removeWidget(view.getWidget())
# invalidate the full index. It will be updated as expected
self.__index = {}
if self.__userSelectedView is view:
self.__userSelectedView = None
if view is self.__currentView:
self.__updateView()
else:
# TODO It can be skipped if the view is not part of the
# available views
self.__updateAvailableViews()
def __updateAvailableViews(self):
"""
Update available views from the current data.
"""
data = self.__data
info = self._getInfo()
# sort available views according to priority
priorities = [v.getDataPriority(data, info) for v in self.__views]
views = zip(priorities, self.__views)
views = filter(lambda t: t[0] > DataViews.DataView.UNSUPPORTED, views)
views = sorted(views, reverse=True)
# store available views
if len(views) == 0:
self.__setCurrentAvailableViews([])
available = []
else:
available = [v[1] for v in views]
self.__setCurrentAvailableViews(available)
def __updateView(self):
"""Display the data using the widget which fit the best"""
data = self.__data
# update available views for this data
self.__updateAvailableViews()
available = self.__currentAvailableViews
# display the view with the most priority (the default view)
view = self.getDefaultViewFromAvailableViews(data, available)
self.__clearCurrentView()
try:
self._setDisplayedView(view)
except Exception as e:
# in case there is a problem to read the data, try to use a safe
# view
view = self.getSafeViewFromAvailableViews(data, available)
self._setDisplayedView(view)
raise e
def getSafeViewFromAvailableViews(self, data, available):
"""Returns a view which is sure to display something without failing
on rendering.
:param object data: data which will be displayed
:param List[view] available: List of available views, from highest
priority to lowest.
:rtype: DataView
"""
hdf5View = self.getViewFromModeId(DataViewer.HDF5_MODE)
if hdf5View in available:
return hdf5View
return self.getViewFromModeId(DataViews.EMPTY_MODE)
def getDefaultViewFromAvailableViews(self, data, available):
"""Returns the default view which will be used according to available
views.
:param object data: data which will be displayed
:param List[view] available: List of available views, from highest
priority to lowest.
:rtype: DataView
"""
if len(available) > 0:
# returns the view with the highest priority
if self.__userSelectedView in available:
return self.__userSelectedView
self.__userSelectedView = None
view = available[0]
else:
# else returns the empty view
view = self.getViewFromModeId(DataViews.EMPTY_MODE)
return view
def __setCurrentAvailableViews(self, availableViews):
"""Set the current available viewa
:param List[DataView] availableViews: Current available viewa
"""
self.__currentAvailableViews = availableViews
self.currentAvailableViewsChanged.emit()
def currentAvailableViews(self):
"""Returns the list of available views for the current data
:rtype: List[DataView]
"""
return self.__currentAvailableViews
def availableViews(self):
"""Returns the list of registered views
:rtype: List[DataView]
"""
return self.__views
def setData(self, data):
"""Set the data to view.
It mostly can be a h5py.Dataset or a numpy.ndarray. Other kind of
objects will be displayed as text rendering.
:param numpy.ndarray data: The data.
"""
self.__data = data
self._invalidateInfo()
self.__displayedData = None
self.__updateView()
self.__updateNumpySelectionAxis()
self.__updateDataInView()
self.dataChanged.emit()
def __numpyAxisChanged(self):
"""
Called when axis selection of the numpy-selector changed
"""
self.__clearCurrentView()
def __numpySelectionChanged(self):
"""
Called when data selection of the numpy-selector changed
"""
self.__updateDataInView()
def data(self):
"""Returns the data"""
return self.__data
def _invalidateInfo(self):
"""Invalidate DataInfo cache."""
self.__info = None
def _getInfo(self):
"""Returns the DataInfo of the current selected data.
This value is cached.
:rtype: DataInfo
"""
if self.__info is None:
self.__info = DataViews.DataInfo(self.__data)
return self.__info
def displayMode(self):
"""Returns the current display mode"""
return self.__currentView.modeId()
def replaceView(self, modeId, newView):
"""Replace one of the builtin data views with a custom view.
Return True in case of success, False in case of failure.
.. note::
This method must be called just after instantiation, before
the viewer is used.
:param int modeId: Unique mode ID identifying the DataView to
be replaced. One of:
- `DataViews.EMPTY_MODE`
- `DataViews.PLOT1D_MODE`
- `DataViews.IMAGE_MODE`
- `DataViews.PLOT2D_MODE`
- `DataViews.COMPLEX_IMAGE_MODE`
- `DataViews.PLOT3D_MODE`
- `DataViews.RAW_MODE`
- `DataViews.STACK_MODE`
- `DataViews.HDF5_MODE`
- `DataViews.NXDATA_MODE`
- `DataViews.NXDATA_INVALID_MODE`
- `DataViews.NXDATA_SCALAR_MODE`
- `DataViews.NXDATA_CURVE_MODE`
- `DataViews.NXDATA_XYVSCATTER_MODE`
- `DataViews.NXDATA_IMAGE_MODE`
- `DataViews.NXDATA_STACK_MODE`
:param DataViews.DataView newView: New data view
:return: True if replacement was successful, else False
"""
assert isinstance(newView, DataViews.DataView)
isReplaced = False
for idx, view in enumerate(self.__views):
if view.modeId() == modeId:
if self.__hooks is not None:
newView.setHooks(self.__hooks)
self.__views[idx] = newView
isReplaced = True
break
elif isinstance(view, DataViews.CompositeDataView):
isReplaced = view.replaceView(modeId, newView)
if isReplaced:
break
if isReplaced:
self.__updateAvailableViews()
return isReplaced
|
{"hexsha": "4db2863999400d13815344a9cbed90531de6e1af", "size": 21174, "ext": "py", "lang": "Python", "max_stars_repo_path": "silx/gui/data/DataViewer.py", "max_stars_repo_name": "vallsv/silx", "max_stars_repo_head_hexsha": "834bfe9272af99096faa360e1ad96291bf46a2ac", "max_stars_repo_licenses": ["CC0-1.0", "MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-08-03T15:51:42.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-03T15:51:42.000Z", "max_issues_repo_path": "silx/gui/data/DataViewer.py", "max_issues_repo_name": "vallsv/silx", "max_issues_repo_head_hexsha": "834bfe9272af99096faa360e1ad96291bf46a2ac", "max_issues_repo_licenses": ["CC0-1.0", "MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2016-10-19T09:27:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-24T13:26:56.000Z", "max_forks_repo_path": "silx/gui/data/DataViewer.py", "max_forks_repo_name": "payno/silx", "max_forks_repo_head_hexsha": "13301e61627f98fa837008250ac74a0627a7a560", "max_forks_repo_licenses": ["CC0-1.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4673366834, "max_line_length": 113, "alphanum_fraction": 0.6430055729, "include": true, "reason": "import numpy", "num_tokens": 4686}
|
import os
from tesserocr import PSM, PyTessBaseAPI
import cv2
import numpy as np
from PIL import Image
from typing import List, Optional
from constants import SIDES_DIR
from cv_helpers import contour_bounding_box_for_contour, extract_color, four_point_transform,\
get_center_for_contour, get_classifier_directories, get_contours, inflate_classifier,\
ls_debug, rotate_image_180, rotate_image_clockwise, rotate_image_counter_clockwise,\
show
SERIAL_IS_ZERO_CLASSIFIER_DIR = os.path.join(SIDES_DIR, "serial", "is_zero")
LABEL_TO_IS_ZERO = {
1: False,
2: True,
}
def get_serial_number_from_side(side):
# type: (np.array) -> Optional[List[str]]
text_threshold = _get_cleaned_up_text_subsection(side)
if text_threshold is None:
return None
letters = _get_letters(text_threshold)
text = _get_text_from_letters(letters)
return text
def _get_cleaned_up_text_subsection(im):
# type: (np.array) -> Optional[np.array]
red1 = extract_color(im, (0, 6), (200, 255), (100, 150))
red2 = extract_color(im, (176, 180), (200, 255), (100, 150))
red = red1 + red2
color = extract_color(im, 45 / 2, (20, 50), (200, 255))
# show(red, .25)
# show(yellow, .25)
# im = scale(im, .25)
# color = scale(color, .25)
red_contour = _get_box_for_largest_rect_contour(red)
text_contour = _get_box_for_largest_rect_contour(color)
if red_contour is None or text_contour is None:
# if red_contour is not None:
# print "RED"
# show(get_drawn_contours(red, [red_contour], True))
# show(color)
# if text_contour is not None:
# print "TEXT"
# show(get_drawn_contours(color, [text_contour], True))
# show(red)
# if not (red_contour is None and text_contour is None):
# show(red)
# show(color)
# assert red_contour is None and text_contour is None, \
# "Error parsing serial number, didn't find one of the text or its label."
return None
red_center = get_center_for_contour(red_contour)
text_center = get_center_for_contour(text_contour)
text_subsection = four_point_transform(im, text_contour)
# show(get_drawn_contours(color, text_contour, True), .25)
# show(get_drawn_contours(red, red_contour, True), .25)
# show(text_subsection)
height, width = im.shape[:2]
# Rotation logic from http://stackoverflow.com/a/5912847/3000133
if height > width:
# Determine if red is left or right of text
if text_center[0] < red_center[0]:
text_subsection = rotate_image_counter_clockwise(text_subsection)
else:
# Rotate clockwise 90
text_subsection = rotate_image_clockwise(text_subsection)
else:
if text_center[1] > red_center[1]:
# We're fine
pass
else:
# Rotate 180
text_subsection = rotate_image_180(text_subsection)
# show(get_drawn_contours(im, [text_contour], True))
# show(text_subsection)
text_subsection_gray = cv2.cvtColor(text_subsection, cv2.COLOR_BGR2GRAY)
# show(text_subsection_gray)
_, text_threshold = cv2.threshold(text_subsection_gray, 50, 255, 0)
text_threshold = 255 - text_threshold
# show(text_threshold)
height, width = text_threshold.shape[:2]
text_threshold[:height / 10, :] = 0
text_threshold[9 * height / 10:, :] = 0
return text_threshold
def _get_letters(text_threshold):
# type: (np.array) -> List[np.array]
height, width = text_threshold.shape[:2]
contours, _ = cv2.findContours(text_threshold.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
contours = [c for c in contours if cv2.boundingRect(c)[3] > height / 3]
centers = [get_center_for_contour(c) for c in contours]
centers = sorted(centers, key=lambda x: x[0])
distances = []
for idx in range(len(centers) - 1):
distances.append(centers[idx + 1][0] - centers[idx][0])
half_avg_dist = sum(distances) / (len(distances) * 2)
# contours = [contour_bounding_box_for_contour(c) for c in contours]
# show(get_drawn_contours(text_threshold, contours, True))
letters = []
for center in centers:
x = center[0] - half_avg_dist
y = 0
w = half_avg_dist * 2
h = height
contour = np.array([
[x, y],
[x + w, y],
[x + w, y + h],
[x, y + h],
]).reshape((4, 1, 2))
letters.append(four_point_transform(text_threshold, contour))
return letters
def _get_text_from_letters(letters):
# type: (List[np.array]) -> List[str]
is_zero_classifier = inflate_classifier(SERIAL_IS_ZERO_CLASSIFIER_DIR)
text = []
with PyTessBaseAPI() as api:
api.SetVariable("load_system_dawg", "F")
api.SetVariable("load_freq_dawg", "F")
api.SetVariable("load_punc_dawg", "F")
api.SetVariable("load_number_dawg", "F")
api.SetVariable("load_unambig_dawg", "F")
api.SetVariable("load_bigram_dawg", "F")
api.SetVariable("load_fixed_length_dawgs", "F")
api.SetVariable("classify_enable_learning", "F")
api.SetVariable("classify_enable_adaptive_matcher", "F")
api.SetVariable("segment_penalty_garbage", "F")
api.SetVariable("segment_penalty_dict_nonword", "F")
api.SetVariable("segment_penalty_dict_frequent_word", "F")
api.SetVariable("segment_penalty_dict_case_ok", "F")
api.SetVariable("segment_penalty_dict_case_bad", "F")
api.SetVariable("edges_use_new_outline_complexity", "T")
api.SetVariable("tessedit_char_whitelist", "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
api.SetPageSegMode(PSM.SINGLE_CHAR)
for letter in letters:
if LABEL_TO_IS_ZERO[is_zero_classifier(letter)]:
text.append("0")
continue
pil_image = Image.fromarray(letter)
api.SetImage(pil_image)
# show(np.array(api.GetThresholdedImage()))
text.append(api.GetUTF8Text().replace("\n\n", ""))
return text
def _get_box_for_largest_rect_contour(color):
# type: (np.array) -> Optional[np.array]
contours = get_contours(color)
# print [0.05 * cv2.arcLength(c, True) for c in contours]
# show(get_drawn_contours(color, contours, True), .25)
contours = [cv2.approxPolyDP(c, 0.05 * cv2.arcLength(c, True), True) for c in contours]
height, width = color.shape[:2]
total_area = height * width
contours = [c for c in contours if len(c) == 4 and cv2.contourArea(c) / total_area > 0.005]
if len(contours) == 0:
return None
contour = sorted(contours, key=cv2.contourArea)[-1]
contour = contour_bounding_box_for_contour(contour)
return contour
def _test():
vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir = \
get_classifier_directories(SERIAL_IS_ZERO_CLASSIFIER_DIR)
letter_idx = 0
i = 0
group = 0
found_in_group = 0
# for path in ls(DATA_DIR + "module_classifier/unlabelled"):
for path in ls_debug(1294, 1297):
# if "-left.png" not in path:
# if "0036-edge-bottom.png" not in path:
# continue
# new_group = int(os.path.basename(path).split("-")[0])
# if new_group % 3 != 0:
# continue
# if new_group != group:
# if found_in_group != 1:
# "!!!! Found {} in group {} !!!!".format(found_in_group, group)
# found_in_group = 0
# group = new_group
i += 1
# if i == 1:
# continue
# if i > 1:
# break
print path
im = cv2.imread(path)
from sides import _extract_side
im = _extract_side(im, "-bottom" in path)
text = get_serial_number_from_side(im)
if text is None:
print "NO SERIAL NUMBER"
else:
print "-".join(text)
text_threshold = _get_cleaned_up_text_subsection(im)
if text_threshold is not None:
show(text_threshold)
# if text_threshold is None:
# continue
# found_in_group += 1
# letters = _get_letters(text_threshold)
# for letter in letters:
# cv2.imwrite(os.path.join(unlabelled_dir, "{:05}.png".format(letter_idx)), letter)
# letter_idx += 1
if __name__ == '__main__':
_test()
|
{"hexsha": "e001462526598e8938908ba58cb103101ba491c2", "size": 8516, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sides/serial_number_cv.py", "max_stars_repo_name": "FuegoFro/KeepTalkingBot", "max_stars_repo_head_hexsha": "c4a66750e253aae667f561a9863f163da0eeb68b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sides/serial_number_cv.py", "max_issues_repo_name": "FuegoFro/KeepTalkingBot", "max_issues_repo_head_hexsha": "c4a66750e253aae667f561a9863f163da0eeb68b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sides/serial_number_cv.py", "max_forks_repo_name": "FuegoFro/KeepTalkingBot", "max_forks_repo_head_hexsha": "c4a66750e253aae667f561a9863f163da0eeb68b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0847457627, "max_line_length": 95, "alphanum_fraction": 0.6345702208, "include": true, "reason": "import numpy", "num_tokens": 2240}
|
-- Estudante: Lucas Emanuel Resck Domingues
-- Exercise 1
section
parameters {A : Type} {R : A → A → Prop}
parameter (irreflR : irreflexive R)
parameter (transR : transitive R)
local infix < := R
def R' (a b : A) : Prop := R a b ∨ a = b
local infix ≤ := R'
theorem reflR' (a : A) : a ≤ a :=
show a < a ∨ a = a, from or.inr rfl
theorem transR' {a b c : A} (h1 : a ≤ b) (h2 : b ≤ c):
a ≤ c :=
have h3 : a < b ∨ a = b, from h1,
have h4 : b < c ∨ b = c, from h2,
show a < c ∨ a = c, from or.elim h3
(assume h5 : a < b,
or.elim h4
(assume h6 : b < c,
have h7 : a < c, from transR h5 h6,
or.inl h7)
(assume h6 : b = c,
have h7 : a < c, from eq.subst h6 h5,
or.inl h7))
(assume h5 : a = b,
or.elim h4
(assume h6 : b < c,
have h7 : a < c, from eq.substr h5 h6,
or.inl h7)
(assume h6 : b = c,
have h7 : a = c, from eq.substr h5 h6,
or.inr h7))
include irreflR
include transR
theorem antisymmR' {a b : A} (h1 : a ≤ b) (h2 : b ≤ a) :
a = b :=
begin
cases h1,
cases h2,
have h3, from transR h1 h2,
have h4, from irreflR a,
contradiction,
apply eq.symm,
assumption,
assumption
end
end
-- Exercise 2
section
parameters {A : Type} {R : A → A → Prop}
parameter (reflR : reflexive R)
parameter (transR : transitive R)
def S (a b : A) : Prop := R a b ∧ R b a
include transR
example : transitive S :=
begin
intros a b c h1 h2,
cases h1 with h3 h4,
cases h2 with h5 h6,
apply and.intro,
have h7, from transR h3 h5,
assumption,
exact transR h6 h4
end
end
--Exercise 3
section
parameters {A : Type} {a b c : A} {R : A → A → Prop}
parameter (Rab : R a b)
parameter (Rbc : R b c)
parameter (nRac : ¬ R a c)
-- Prove one of the following two theorems:
theorem R_is_strict_partial_order :
irreflexive R ∧ transitive R :=
sorry
-- Because nRac, R is not transitive, that is, it's not strict partial order.
include Rab
include Rbc
include nRac
theorem R_is_not_strict_partial_order :
¬(irreflexive R ∧ transitive R) :=
begin
intro h1,
cases h1 with h2 h3,
have Rac, from h3 Rab Rbc,
contradiction
end
end
-- Exercise 4
section
open nat
example : 1 ≤ 4 :=
calc
1 ≤ 1 : le_refl 1
... ≤ 2 : le_succ 1
... ≤ 3 : le_succ 2
... ≤ 4 : le_succ 3
end
|
{"author": "lucasresck", "repo": "Discrete-Mathematics", "sha": "0a08081c5f393e5765259d3f1253c3a6dd043dac", "save_path": "github-repos/lean/lucasresck-Discrete-Mathematics", "path": "github-repos/lean/lucasresck-Discrete-Mathematics/Discrete-Mathematics-0a08081c5f393e5765259d3f1253c3a6dd043dac/Lists of exercises/List 7/cap14-LucasDomingues.lean"}
|
*DECK CARG
FUNCTION CARG (Z)
C***BEGIN PROLOGUE CARG
C***PURPOSE Compute the argument of a complex number.
C***LIBRARY SLATEC (FNLIB)
C***CATEGORY A4A
C***TYPE COMPLEX (CARG-C)
C***KEYWORDS ARGUMENT OF A COMPLEX NUMBER, ELEMENTARY FUNCTIONS, FNLIB
C***AUTHOR Fullerton, W., (LANL)
C***DESCRIPTION
C
C CARG(Z) calculates the argument of the complex number Z. Note
C that CARG returns a real result. If Z = X+iY, then CARG is ATAN(Y/X),
C except when both X and Y are zero, in which case the result
C will be zero.
C
C***REFERENCES (NONE)
C***ROUTINES CALLED (NONE)
C***REVISION HISTORY (YYMMDD)
C 770401 DATE WRITTEN
C 861211 REVISION DATE from Version 3.2
C 891214 Prologue converted to Version 4.0 format. (BAB)
C***END PROLOGUE CARG
COMPLEX Z
C***FIRST EXECUTABLE STATEMENT CARG
CARG = 0.0
IF (REAL(Z).NE.0. .OR. AIMAG(Z).NE.0.) CARG =
1 ATAN2 (AIMAG(Z), REAL(Z))
C
RETURN
END
|
{"hexsha": "f6e44aabc2242534dfcad53e33d0d6ef5618e49f", "size": 950, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "slatec/src/carg.f", "max_stars_repo_name": "andremirt/v_cond", "max_stars_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slatec/src/carg.f", "max_issues_repo_name": "andremirt/v_cond", "max_issues_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slatec/src/carg.f", "max_forks_repo_name": "andremirt/v_cond", "max_forks_repo_head_hexsha": "6b5c364d7cd4243686488b2bd4318be3927e07ea", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6875, "max_line_length": 72, "alphanum_fraction": 0.6726315789, "num_tokens": 332}
|
import os
import requests
from string import Template
import json
import boto3
import uuid
import shutil
import sys
from PIL import Image
from skimage import feature
from skimage.filters import gaussian
from fil_finder import FilFinder2D
from astropy import units as u
import numpy as np
import networkx as nx
# https://ladsweb.modaps.eosdis.nasa.gov/archive/allData/5000/VNP46A1/2020/150.json
baseUrl = "https://ladsweb.modaps.eosdis.nasa.gov/archive/allData/5000/VNP46A1/$year/$day"
translate_command = "gdal_translate -a_nodata 'nan' 'HDF5:$temp_file_name://HDFEOS/GRIDS/VNP_Grid_DNB/Data_Fields/DNB_At_Sensor_Radiance_500m' $jpg_name"
s3 = boto3.client('s3')
db = boto3.client('dynamodb', region_name='eu-central-1')
USERAGENT = 'tis/download.py_1.0--' + sys.version.replace('\n','').replace('\r','')
def handler(event):
# Search for valid file names
url = Template(baseUrl).safe_substitute(event)
requested_file_name = ''
with requests.get(url + '.json') as response:
response.raise_for_status()
response_body = response.json()
found_file_descriptors = list(filter(
lambda descriptor: ('name' in descriptor) & (event['hv_coords'] in descriptor['name']),
response_body
))
requested_file_name = found_file_descriptors[0]['name']
# Prepare to download h5 file
token = os.environ['LAADS_TOKEN']
headers = {
'user-agent' : USERAGENT,
'Authorization' : 'Bearer ' + token
}
file_id = str(uuid.uuid4())
temp_file_name = 'temp' + file_id + '.h5'
# Download h5 file
with requests.get(url + '/' + requested_file_name, stream = True, headers = headers) as file_response:
file_response.raise_for_status()
file_response.raw.decode_content = True
with open(temp_file_name, 'wb') as temp_file:
shutil.copyfileobj(file_response.raw, temp_file)
# Prepare to translate h5 -> jpg
jpg_name = requested_file_name + '.jpg'
full_translate_command = Template(translate_command).safe_substitute(dict(
temp_file_name=temp_file_name,
jpg_name=jpg_name
))
print(full_translate_command)
# Translate h5 -> jpg
os.system(full_translate_command)
# Load image into numpy array, then preprocess with blur and edge detection
img = np.asarray(Image.open(jpg_name)) / 255
edges = feature.canny(img, sigma=2).astype(np.float64)
edges = gaussian(edges, sigma=3)
edges = edges.astype(np.float64)
# edges = edges[::1200, ::1200]
print('HAS NANS:')
print(np.isnan(edges).any())
# Load the edge detected image into a filament finder object
fil = FilFinder2D(edges)
# Preprocess the image by flattening to cap outliers and mask the image so that
# only the most significant structures are analyzed
fil.preprocess_image(flatten_percent=95)
print('CREATING MASK')
fil.create_mask(
adapt_thresh=10.0*u.pix,
smooth_size=1.0*u.pix,
size_thresh=500.0*u.pix**2,
fill_hole_size=1.0*u.pix**2
)
Image.fromarray(fil.mask * 255).convert('L').save('mask.jpg')
# Generate a rough skeleton overlayed on the image
print('CREATING MEDSKEL')
fil.medskel(verbose=True, save_png=True)
# Analyze the skeleton to generate a fine-grained graph and networkx graph
print('ANALYZING SKELETONS')
fil.analyze_skeletons(skel_thresh=1.0*u.pix)
print('ANALYZED SKELETONS')
skeleton_image_file_name = 'skeleton' + jpg_name
Image.fromarray(fil.skeleton * 255).convert('L').save(skeleton_image_file_name)
# Calculate the total clustering by summing the average clustering of each filament
total_clustering = 0.0
for filament in fil.filaments:
total_clustering += (nx.average_clustering(filament.graph))
# Upload jpg to S3 bucket
s3.upload_file(
jpg_name, 'space-apps-imagery', jpg_name,
ExtraArgs={'ACL':'public-read'}
)
s3.upload_file(
skeleton_image_file_name, 'space-apps-imagery', skeleton_image_file_name,
ExtraArgs={'ACL':'public-read'}
)
# Upload record to dynamodb
db.put_item(
TableName = 'space-apps-imagery',
Item = {
'label': { 'S': event['label'] },
'original_image': { 'S': jpg_name },
'skeleton_image': { 'S': skeleton_image_file_name },
'total_clustering': { 'N': str(total_clustering) },
'total_isolated_filaments': { 'N': str(len(fil.filaments)) },
'location_name': { 'S': event['location_name'] },
'day': { 'N': event['day'] },
'year': { 'N': event['year'] }
}
)
# Clean up
os.remove(temp_file_name)
os.remove(skeleton_image_file_name)
os.remove(jpg_name)
os.remove(jpg_name + '.aux.xml')
handler(dict(
label="new_york_before",
year="2019",
day="342",
hv_coords="h10v04",
location_name="an area around New York City"
))
handler(dict(
label="new_york_after",
year="2020",
day="078",
hv_coords="h10v04",
location_name="an area around New York City"
))
handler(dict(
label="wuhan_after",
year="2020",
day="083",
hv_coords="h29v05",
location_name="an area of Hubei province"
))
handler(dict(
label="wuhan_before",
year="2019",
day="361",
hv_coords="h29v05",
location_name="an area of Hubei province"
))
|
{"hexsha": "0fcb5344529f25b2eb5b1baf9130692de297d4e9", "size": 5102, "ext": "py", "lang": "Python", "max_stars_repo_path": "ingest-lamdba/src/lambda.py", "max_stars_repo_name": "Ricool06/TheGlowGetters", "max_stars_repo_head_hexsha": "e0f22073ec51671f0eb6f8ab49472f7981978039", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-02T11:49:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-15T09:50:01.000Z", "max_issues_repo_path": "ingest-lamdba/src/lambda.py", "max_issues_repo_name": "Ricool06/TheGlowGetters", "max_issues_repo_head_hexsha": "e0f22073ec51671f0eb6f8ab49472f7981978039", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-05-30T17:47:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T02:48:10.000Z", "max_forks_repo_path": "ingest-lamdba/src/lambda.py", "max_forks_repo_name": "Ricool06/TheGlowGetters", "max_forks_repo_head_hexsha": "e0f22073ec51671f0eb6f8ab49472f7981978039", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8362573099, "max_line_length": 153, "alphanum_fraction": 0.7058016464, "include": true, "reason": "import numpy,import networkx,from astropy", "num_tokens": 1417}
|
import numpy as np
def cls_type_to_id(cls_type):
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
def __init__(self, line, gt=False): # if read from ground truth label, the txt file looks different
if gt: # read from label file (annotation in velodyne coord sys)
bbox = line
# line is in this case the bbox itself
self.loc = np.array((float(bbox[0]), float(bbox[1]), float(bbox[2])), dtype=np.float32) # in velo coords
self.l, self.w, self.h = float(bbox[3]), float(bbox[4]), float(bbox[5])
self.ry = float(bbox[6]) # rotation angle around z-axis (instead of y as in camera coord.)
# self.dis_to_cam = np.linalg.norm(self.loc)
# According to KITTI definition
self.cls_type = 'Pedestrian'
self.cls_id = 2
beta = np.arctan2(self.loc[1], self.loc[0])
self.alpha = -np.sign(beta) * np.pi / 2 + beta + self.ry
self.score = -1.0
else: # read from detection file including more information
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.alpha = float(label[1])
# self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[5])
self.w = float(label[6])
self.l = float(label[7])
self.loc = np.array((float(label[2]), float(label[3]), float(label[4])), dtype=np.float32)
# self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[8])
self.score = float(label[9]) if label.__len__() == 10 else -1.0
def to_str(self):
print_str = '%s %.3f pos: %s hwl: [%.3f %.3f %.3f] ry: %.3f' \
% (self.cls_type, self.alpha, self.pos, self.h, self.w, self.l,
self.ry)
return print_str
def to_det_format(self):
print_str = '%s %.3f %s %.3f %.3f %.3f %.3f %.3f %.3f' \
% (self.cls_type, self.alpha, self.pos[0],self.pos[1], self.pos[2], self.h, self.w, self.l, self.ry)
return print_str
|
{"hexsha": "5eb0dd8f0b9476ec0a596f5ec7af8ea258374b82", "size": 2398, "ext": "py", "lang": "Python", "max_stars_repo_path": "pcdet/utils/object3d_jrdb.py", "max_stars_repo_name": "brudermueller/OpenPCDet", "max_stars_repo_head_hexsha": "dc6e16abb03363e5b307225e4c02297c231d56da", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-01T02:32:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T02:32:27.000Z", "max_issues_repo_path": "pcdet/utils/object3d_jrdb.py", "max_issues_repo_name": "brudermueller/OpenPCDet", "max_issues_repo_head_hexsha": "dc6e16abb03363e5b307225e4c02297c231d56da", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pcdet/utils/object3d_jrdb.py", "max_forks_repo_name": "brudermueller/OpenPCDet", "max_forks_repo_head_hexsha": "dc6e16abb03363e5b307225e4c02297c231d56da", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4074074074, "max_line_length": 123, "alphanum_fraction": 0.5521267723, "include": true, "reason": "import numpy", "num_tokens": 657}
|
import numpy as np
import math
from ..miniworld import MiniWorldEnv, Room
from ..entity import Box, ImageFrame
from gym import spaces
class Hallway(MiniWorldEnv):
"""
Environment in which the goal is to go to a red box
at the end of a hallway
"""
def __init__(self, length=10, stochastic=False, dense_reward=True, **kwargs):
assert length >= 5
self.length = length
self.stochastic = stochastic
self.dense_reward = dense_reward
super().__init__(
max_episode_steps=250,
domain_rand=True,
**kwargs
)
# Allow only movement actions (left/right/forward)
self.action_space = spaces.Discrete(self.actions.move_forward+1)
def _gen_world(self):
# Create a long rectangular room
room = self.add_rect_room(
min_x=-1, max_x=-1 + self.length,
min_z=-2, max_z=2
)
# Place the box at the end of the hallway
if not self.stochastic:
self.box = self.place_entity(
Box(color='red'),
pos=[room.max_x-2, 0, 1]
)
else:
self.box = self.place_entity(
Box(color='red'),
min_x=room.max_x - 1
)
self.entities.append(ImageFrame(
pos=[1, 1.35, 2],
dir=math.pi/2,
width=2.5,
tex_name='portraits/viktor_vasnetsov'
))
self.entities.append(ImageFrame(
pos=[1 + self.length / 2, 1.55, 2],
dir=math.pi/2,
width=2.5,
tex_name='portraits/robert_dampier'
))
self.entities.append(ImageFrame(
pos=[1, 1.35, -2],
dir=-math.pi/2,
width=2.5,
tex_name='portraits/nathaniel_jocelyn'
))
self.entities.append(ImageFrame(
pos=[1 + self.length / 2, 1.45, -2],
dir=-math.pi/2,
width=2.5,
tex_name='portraits/robert_leopold'
))
self.entities.append(ImageFrame(
pos=[-1+self.length, 1.35, 0],
dir=math.pi,
width=2,
tex_name='chars/ch_5'
))
self.entities.append(ImageFrame(
pos=[-1, 1.35, 0],
dir=0,
width=2,
tex_name='chars/ch_0'
))
# Place the agent a random distance away from the goal
self.place_agent(
dir=self.rand.float(-math.pi/4, math.pi/4),
max_x=room.max_x - 3
)
def step(self, action):
obs, reward, done, info = super().step(action)
# note that reward is always 0 here
if self.dense_reward:
dist = np.linalg.norm(self.agent.pos - self.box.pos)
max_dist = np.linalg.norm([self.min_z-self.max_z, self.max_x-self.min_x])
reward = -1 * dist / max_dist / self.max_episode_steps
if self.near(self.box):
reward = 1
done = True
else:
if self.near(self.box):
reward += self._reward()
done = True
return obs, reward, done, info
|
{"hexsha": "66c9a4391f84429e26357fcf0b7b5a1921a4a4b2", "size": 3221, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym_miniworld/envs/hallway.py", "max_stars_repo_name": "PrieureDeSion/gym-miniworld", "max_stars_repo_head_hexsha": "896509fe59c18650ed8483e4df3394f098f07c3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-26T00:38:28.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-26T00:38:28.000Z", "max_issues_repo_path": "gym_miniworld/envs/hallway.py", "max_issues_repo_name": "PrieureDeSion/gym-miniworld", "max_issues_repo_head_hexsha": "896509fe59c18650ed8483e4df3394f098f07c3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym_miniworld/envs/hallway.py", "max_forks_repo_name": "PrieureDeSion/gym-miniworld", "max_forks_repo_head_hexsha": "896509fe59c18650ed8483e4df3394f098f07c3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5044247788, "max_line_length": 85, "alphanum_fraction": 0.5169202111, "include": true, "reason": "import numpy", "num_tokens": 785}
|
import numpy as np
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
class Dataset(object):
def __init__(self, X, Y, T, n_candidate, n_safety, n_test, seed=None, include_T=False, include_intercept=True, standardize=False):
n_train = n_candidate + n_safety
n_samples = n_train + n_test
if include_T and not(T is None):
X = np.hstack((X, T[:,None]))
if standardize:
X = X - X.mean(0)[None,:]
X = X / X.std(0,ddof=1)[None,:]
if include_intercept:
X = np.hstack((X, np.ones(len(X))[:,None]))
# Store the base datasets
self._X = X
self._R = Y
self._T = T
self.n_classes = np.unique(Y).shape[0]
# Compute indices for the splits
self._inds = {
'all' : np.arange(0, n_samples),
'train' : np.arange(0, n_train),
'test' : np.arange(n_train, n_samples),
'opt' : np.arange(0, n_candidate),
'saf' : np.arange(n_candidate, n_train)
}
# Compute indices for T=0/T=1 splits
for k, inds in list(self._inds.items()):
self._inds['%s_0'%k] = inds[T[inds]==0]
self._inds['%s_1'%k] = inds[T[inds]==1]
# Store the default seed
self._seed = seed
@property
def X(self):
return self._X.copy()
@property
def Y(self):
return self._R.copy()
@property
def T(self):
return self._T.copy()
@property
def n_features(self):
return self._X.shape[1]
@property
def n_train(self):
return len(self._inds['train'])
@property
def n_test(self):
return len(self._inds['test'])
@property
def n_optimization(self):
return len(self._inds['opt'])
@property
def n_safety(self):
return len(self._inds['saf'])
def _get_splits(self, index_key, t=None):
if not(t is None):
index_key += ('_%d' % t)
inds = self._inds[index_key]
return self._X[inds], self._R[inds], self._T[inds]
def _get_splits_by_type(self, index_key, truncate=True, reorder=False, seed=None):
X0, Y0, _ = self._get_splits(index_key, t=0)
X1, Y1, _ = self._get_splits(index_key, t=1)
if reorder:
rnd = np.random.RandomState(self._seed if (seed is None) else seed)
I0 = rnd.choice(X0.shape[0], X0.shape[0], replace=False)
I1 = rnd.choice(X1.shape[0], X1.shape[0], replace=False)
X0, Y0 = X0[I0], Y0[I0]
X1, Y1 = X1[I1], Y1[I1]
if truncate:
k = min(X0.shape[0], X1.shape[0])
X0, Y0 = X0[:k], Y0[:k]
X1, Y1 = X1[:k], Y1[:k]
return X0, Y0, X1, Y1
def all_sets(self, t=None):
return self._get_splits('all', t=t)
def training_splits(self, t=None):
return self._get_splits('train', t=t)
def testing_splits(self, t=None):
return self._get_splits('test', t=t)
def optimization_splits(self, t=None):
return self._get_splits('opt', t=t)
def safety_splits(self, t=None):
return self._get_splits('saf', t=t)
def all_sets_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('all', truncate=truncate, reorder=reorder, seed=seed)
def training_splits_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('train', truncate=truncate, reorder=reorder, seed=seed)
def testing_splits_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('test', truncate=truncate, reorder=reorder, seed=seed)
def optimization_splits_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('opt', truncate=truncate, reorder=reorder, seed=seed)
def safety_splits_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('saf', truncate=truncate, reorder=reorder, seed=seed)
class RLDataset(object):
def __init__(self, S, A, R, T, P, n_actions, n_candidate, n_safety, n_test, min_reward, max_reward, gamma=1.0, seed=None, Rc_func=(lambda s,a,r,t: r)):
n_train = n_candidate + n_safety
n_samples = n_train + n_test
# Store the base datasets
T = T if not(T is None) else np.zeros(len(S))
self.gamma = gamma
self._S = S
self._A = A
self._R_raw = R
self._Rc = np.array([ Rc_func(s,a,r,t) for (s,a,r,t) in zip(S,A,R,T) ])
self._apply_corrections = True
self._T = T
self._P = P
self.n_actions = n_actions
self.max_reward = max_reward
self.min_reward = min_reward
# Compute indices for the splits
self._inds = {
'all' : np.arange(0, n_samples),
'train' : np.arange(0, n_train),
'test' : np.arange(n_train, n_samples),
'opt' : np.arange(0, n_candidate),
'saf' : np.arange(n_candidate, n_train)
}
# Compute indices for T=0/T=1 splits
for k, inds in list(self._inds.items()):
self._inds['%s_0'%k] = inds[T[inds]==0]
self._inds['%s_1'%k] = inds[T[inds]==1]
# Store the default seed
self._seed = seed
def enable_R_corrections(self):
self._apply_corrections = True
def disable_R_corrections(self):
self._apply_corrections = False
@property
def _R(self):
if self._apply_corrections:
return self._Rc
return self._R_raw
@property
def n_features(self):
return self._S[0].shape[1]
@property
def n_train(self):
return len(self._inds['train'])
@property
def n_test(self):
return len(self._inds['test'])
@property
def n_optimization(self):
return len(self._inds['opt'])
@property
def n_safety(self):
return len(self._inds['saf'])
def _get_splits(self, index_key, t=None, corrected_R=True):
if not(t is None):
index_key += ('_%d' % t)
inds = self._inds[index_key]
R = self._R[inds] if corrected_R else self._R_raw[inds]
return self._S[inds], self._A[inds], R, self._T[inds], self._P[inds]
def _get_splits_by_type(self, index_key, truncate=True, reorder=False, seed=None, corrected_R=True):
S0, A0, R0, _ = self._get_splits(index_key, t=0, corrected_R=corrected_R)
S1, A1, R1, _ = self._get_splits(index_key, t=1, corrected_R=corrected_R)
if reorder:
rnd = np.random.RandomState(self._seed if (seed is None) else seed)
I0 = rnd.choice(S0.shape[0], S0.shape[0], replace=False)
I1 = rnd.choice(S1.shape[0], S1.shape[0], replace=False)
S0, A0, R0, P0 = S0[I0], A0[I0], R0[I0], P0[I0]
S1, A1, R1, P1 = S1[I1], A1[I1], R1[I1], P1[I1]
if truncate:
k = min(S0.shape[0], S1.shape[0])
S0, A0, R0, P0 = S0[:k], A0[:k], R0[:k], P0[:k]
S1, A1, R1, P1 = S1[:k], A1[:k], R1[:k], P1[:k]
return S0, A0, R0, P0, S1, A1, R1, P1
@property
def S(self):
return self._S.copy()
@property
def A(self):
return self._A.copy()
@property
def R(self):
return self._R.copy()
@property
def T(self):
return self._T.copy()
@property
def P(self):
return self._P.copy()
def all_sets(self, t=None, corrected_R=True):
return self._get_splits('all', t=t, corrected_R=corrected_R)
def training_splits(self, t=None, corrected_R=True):
return self._get_splits('train', t=t, corrected_R=corrected_R)
def testing_splits(self, t=None, corrected_R=True):
return self._get_splits('test', t=t, corrected_R=corrected_R)
def optimization_splits(self, t=None, corrected_R=True):
return self._get_splits('opt', t=t, corrected_R=corrected_R)
def safety_splits(self, t=None, corrected_R=True):
return self._get_splits('saf', t=t, corrected_R=corrected_R)
def all_sets_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('all', truncate=truncate, reorder=reorder, seed=seed)
def training_splits_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('train', truncate=truncate, reorder=reorder, seed=seed)
def testing_splits_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('test', truncate=truncate, reorder=reorder, seed=seed)
def optimization_splits_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('opt', truncate=truncate, reorder=reorder, seed=seed)
def safety_splits_by_type(self, truncate=True, reorder=False, seed=None):
return self._get_splits_by_type('saf', truncate=truncate, reorder=reorder, seed=seed)
class BanditDataset(RLDataset):
def __init__(self, S, A, R, n_actions, n_candidate, n_safety, n_test, min_reward, max_reward, seed=None, P=None, T=None, Rc_func=(lambda s,a,r,t: r)):
S = S[:,None,:] # Convert S into a sequence of length-1 trajectories
A = A[:,None]
# Compute reference probabilities if they aren't provided
if P is None:
self.train_proba_gp()
X = np.hstack((self._S[:,0,:],self._T[:,None]))
Ps = self._proba_gp.predict_proba(X)
P = np.array([ [Ps[i,a]] for i,a in enumerate(A) ])
else:
P = P[:,None]
self._proba_gp = None
self._return_gp = None
super().__init__(S, A, R, T, P, n_actions, n_candidate, n_safety, n_test, min_reward, max_reward, seed=seed, gamma=1.0, Rc_func=Rc_func)
def train_proba_gp(self, use_pct=0.1):
kernel = 1.0 * RBF(1.0)
self._proba_gp = GaussianProcessClassifier(kernel)
X = np.hstack((self._S[:,0,:],self._T[:,None]))
I = np.arange(X.shape[0])
np.random.shuffle(I)
n_attempts = 0
n_train = int(use_pct*X.shape[0])
while len(np.unique(self._A[I[:n_train]])) < self.n_actions and n_attempts < 100:
np.random.shuffle(I)
n_attempts += 1
if len(np.unique(self._A[I[:n_train]])) < self.n_actions and n_attempts == 100:
raise RuntimeError('Unable to train GP on a representative sample of actions')
I = I[:n_train]
self._proba_gp.fit(X[I],self._A[I][:,0])
def train_return_gp(self, returns, use_pct=0.1):
Y = np.zeros(len(self._R))
for i,r in enumerate(self._R):
Y[i] = np.where(r==returns)[0]
kernel = 1.0 * RBF(1.0)
self._return_gp = GaussianProcessClassifier(kernel)
X = np.hstack((self._S[:,0,:],self._T[:,None],self._A))
n_train = int(use_pct*X.shape[0])
I = np.arange(X.shape[0])
np.random.shuffle(I)
I = I[:n_train]
self._return_gp.fit(X[I],Y[I])
def _get_splits(self, index_key, t=None, flatten=False, corrected_R=True):
if not(t is None):
index_key += ('_%d' % t)
inds = self._inds[index_key]
S = self._S[inds][:,0,:] if flatten else self._S[inds]
A = self._A[inds][:,0] if flatten else self._A[inds]
P = self._P[inds][:,0] if flatten else self._P[inds]
R = self._R[inds] if corrected_R else self._R_raw[inds]
return S, A, R, self._T[inds], P
def all_sets(self, t=None, flatten=False, corrected_R=True):
return self._get_splits('all', t=t, flatten=flatten, corrected_R=corrected_R)
def training_splits(self, t=None, flatten=False, corrected_R=True):
return self._get_splits('train', t=t, flatten=flatten, corrected_R=corrected_R)
def testing_splits(self, t=None, flatten=False, corrected_R=True):
return self._get_splits('test', t=t, flatten=flatten, corrected_R=corrected_R)
def optimization_splits(self, t=None, flatten=False, corrected_R=True):
return self._get_splits('opt', t=t, flatten=flatten, corrected_R=corrected_R)
def safety_splits(self, t=None, flatten=False, corrected_R=True):
return self._get_splits('saf', t=t, flatten=flatten, corrected_R=corrected_R)
def get_simulator(self, use_classification=False):
# Train the predictor for reference probabilities if it hasn't been defined yet
if self._proba_gp is None:
self.train_proba_gp()
# Train the predictor for returns if it hasn't been defined yet
returns = np.unique(self._R)
if self._return_gp is None:
self.train_return_gp(returns)
def predict(S, A, T):
if S.ndim == 1:
X = np.hstack((S[None,:],np.array([[T]])))
P = self._proba_gp.predict(X).astype(int)
X = np.hstack((X, np.array([[A]])))
R = returns[self._return_gp.predict(X).astype(int)]
return S, A, R[0], T, P[0]
else:
X = np.hstack((S[:,0,:],T[:,None]))
P = self._proba_gp.predict(X).astype(int)[:,None]
X = np.hstack((X, A))
R = returns[self._return_gp.predict(X).astype(int)]
return S, A, R, T, P
return predict
|
{"hexsha": "7778588d8cae9e8d700aada4a53c61caf0b1d776", "size": 11707, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/datasets/dataset.py", "max_stars_repo_name": "sgiguere/RobinHood-NeurIPS-2019-", "max_stars_repo_head_hexsha": "4bc3283b1cba13b1addf07f3fccf667f4c8f4a08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-07-09T07:32:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T12:21:28.000Z", "max_issues_repo_path": "Python/datasets/dataset.py", "max_issues_repo_name": "sgiguere/RobinHood-NeurIPS-2019-", "max_issues_repo_head_hexsha": "4bc3283b1cba13b1addf07f3fccf667f4c8f4a08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/datasets/dataset.py", "max_forks_repo_name": "sgiguere/RobinHood-NeurIPS-2019-", "max_forks_repo_head_hexsha": "4bc3283b1cba13b1addf07f3fccf667f4c8f4a08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-09T12:35:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-06T13:54:19.000Z", "avg_line_length": 35.9110429448, "max_line_length": 152, "alphanum_fraction": 0.6895874263, "include": true, "reason": "import numpy", "num_tokens": 3661}
|
#!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
import plawt
# Simple data to display in various forms
x = np.linspace(0, 2 * np.pi, 400)
y = np.sin(x ** 2)
plt.close('all')
f, axarr = plt.subplots(2, 2)
f.suptitle('Matplotlib: Grid of subplots')
axarr[0, 0].plot(x, y)
axarr[0, 0].set_title('Axis [0,0]')
axarr[0, 0].text(0, 0.8, 'Text here', fontsize=8)
axarr[0, 1].plot(x, y)
axarr[0, 1].set_title('Axis [0,1]')
axarr[0, 1].text(0, 0.8, 'Text here')
axarr[0, 1].text(0, -1.0, 'Text here too')
axarr[1, 0].plot(x, y ** 2)
axarr[1, 0].set_title('Axis [1,0]')
axarr[1, 1].plot(x, y ** 2)
axarr[1, 1].set_title('Axis [1,1]')
f.subplots_adjust(hspace=0.3)
plt.savefig('matplotlibsubplotgrid.png')
##########
plawt.plot({
0: {'x': x, 'y': y},
'nrows':2, 'ncols': 2,
'title': 'Plawt: Grid of subplots',
'subtitle': 'Axis [0,0]',
'hspace': 0.3,
'text': {'x':0, 'y':0.8, 's': 'Text here', 'fontsize': 8},
'filename': 'plawtsubplotgrid.png'
}, {
0: {'x': x, 'y': y},
'subtitle': 'Axis [0,1]',
'text': [{'x':0, 'y':0.8, 's': 'Text here'},
{'x':0, 'y':-1.0, 's':'Text here too'}],
}, {
0: {'x': x, 'y': y**2},
'subtitle': 'Axis [1,0]'
}, {
0: {'x': x, 'y': y**2},
'subtitle': 'Axis [1,1]'
})
|
{"hexsha": "337c43ab08a86e08bf7dcca140778c381449db75", "size": 1228, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/subplotgridtest.py", "max_stars_repo_name": "mef51/plawt", "max_stars_repo_head_hexsha": "7d5dbcd64d97499eaf7896d2f6e50826d54d2e6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-01-27T04:01:39.000Z", "max_stars_repo_stars_event_max_datetime": "2017-10-03T13:59:24.000Z", "max_issues_repo_path": "examples/subplotgridtest.py", "max_issues_repo_name": "mef51/plawt", "max_issues_repo_head_hexsha": "7d5dbcd64d97499eaf7896d2f6e50826d54d2e6c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2017-03-21T18:39:41.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-03T00:39:24.000Z", "max_forks_repo_path": "examples/subplotgridtest.py", "max_forks_repo_name": "mef51/plawt", "max_forks_repo_head_hexsha": "7d5dbcd64d97499eaf7896d2f6e50826d54d2e6c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-01-30T08:06:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-15T14:49:15.000Z", "avg_line_length": 24.56, "max_line_length": 59, "alphanum_fraction": 0.5602605863, "include": true, "reason": "import numpy", "num_tokens": 540}
|
\section{Development}
When trying to follow a traditional software engineering approach to in
Haskell, one soon runs into several dead ends: due to the different paradigm
and style, trying to apply some methods feels forced or unnatural.
Traditionally, in Haskell the approach when formalizing a piece of code just
involves a mathematical validation, using pure Mathematics for a formal design
through equations, that can then verified by a theorem prover such as Agda or
Coq. Furthermore, the problems only increase when we try to use UML: this
methodology was clearly not designed for other than Object-Oriented programming
and it is not possible to create traditional diagrams for Haskell without
running in unforgivable simplifications, inaccuracies regarding the system or
simply nonsensical diagrams.\\
However, due to the academic character of this thesis, it is needed to provide
some formal specification of the system using the learned methodologies for
software development, so we will try to use the appropriate mathematical
concepts embedded (as long as they are manageable and in the scope of the
thesis), as well as more familiar specification systems for Haskell projects
like the type specification. Please, bear in mind that this is not usually the
case in Haskell projects and the recommended guidelines just include formal
specification through mathematical definition and theorem proving.\\
\subsection{Use Cases}
To start designing the library, first we have to formally specify the use cases
of possible users of the library. The agents involved in this specifications
are simply the end user and the library itself, which will provide functions
for the user by demand.\\
\begin{table}[h]
\centering
\rowcolors{2}{gray90}{white}
\begin{tabular}{
!{\color{azulUC3M}\vline} p{4cm}
!{\color{azulUC3M}\vline} p{6cm}
!{\color{azulUC3M}\vline}}
\arrayrulecolor{azulUC3M}
\rowcolor{azulUC3M}
\multicolumn{1}{l !{\color{white}\vline}}
{\color{white}{\texttt{ID}}}
& \multicolumn{1}{l}
{\color{white}{\texttt{UC-XX}}} \\
\textit{Title} & \\
\textit{Actor} & \\
\textit{Preconditions} & \\
\textit{Description} & \\
\hline
\end{tabular}
\caption{Use Case template}
\label{uc-ex}
\end{table}
All the use cases that will be covered will be specified in individual tables
following the template in the Table \ref{uc-ex}. Each of the use cases will
receive a unique identifier of the format \texttt{UC-XX}, where \texttt{XX} is
a double digit number. This unique identifier will be used later on different
matrices to trace requirements. The complete set of use cases for the library
is stated following this page.
\newpage
\begin{uc3m-table}{UC-01}{Use Case \texttt{UC-01}}
\textit{Title} & \textbf{Solve a search problem} \\
\textit{Actor} & User \\
\textit{Preconditions} &
The user has a compatible representation of the problem programmed in
Haskell, and the package \texttt{agis} is already installed in their system.
\\
\textit{Description} &
The user imports the module containing the algorithm that he wants to use,
and includes the functionality in their code. Then, the user can run the code
to obtain the solution found.\\
\end{uc3m-table}
\begin{uc3m-table}{UC-02}{Use Case \texttt{UC-02}}
\textit{Title} & \textbf{Solve a search problem and get statistics} \\
\textit{Actor} & User \\
\textit{Preconditions} &
The user has a compatible representation of the problem programmed in
Haskell, and the package \texttt{agis} is already installed in their system.
\\
\textit{Description} &
The user imports the module containing the monadic version of the algorithm
that he wants to use, and includes the functionality in their code. Then, the
user can run the code to obtain the solution found and several search
statistics.\\
\end{uc3m-table}
\begin{uc3m-table}{UC-03}{Use Case \texttt{UC-03}}
\textit{Title} & \textbf{Design a new search algorithm} \\
\textit{Actor} & User \\
\textit{Preconditions} &
The package \texttt{agis} is already installed in the user's system. \\
\textit{Description} &
The user can import the module containing several functions that they can use
to build their algorithm, as well as using a monadic version of those
functions to gain a better understanding on the algorithm.\\
\end{uc3m-table}
\begin{uc3m-table}{UC-04}{Use Case \texttt{UC-04}}
\textit{Title} & \textbf{Test a new algorithm} \\
\textit{Actor} & User \\
\textit{Preconditions} &
The package \texttt{agis} is already installed in the user's system, and the
user has already implemented their algorithm using the library types and
functions. \\
\textit{Description} &
The user can import several toy problems that the library offers to test that
algorithm to check the behavior or performance.\\
\end{uc3m-table}
\begin{uc3m-table}{UC-05}{Use Case \texttt{UC-05}}
\textit{Title} & \textbf{Compare a new algorithm} \\
\textit{Actor} & User \\
\textit{Preconditions} &
The package \texttt{agis} is already installed in the user's system, and the
user has already implemented their algorithm using the library types and
functions.\\
\textit{Description} &
The user can import more algorithms from the library and trivially apply one
or another to the same problem space, to check the performance of both of
them side by side.\\
\end{uc3m-table}
\newpage
\subsection{Requirements}
Once all the use cases have been defined for the library, we can design all the
functional and non-functional requirements of the library. These requirements
will be formalized in a similar table to those for the use cases, that can be
seen in Table \ref{r-ex}.
\begin{table}[h]
\centering
\rowcolors{2}{gray90}{white}
\begin{tabular}{
!{\color{azulUC3M}\vline} p{4cm}
!{\color{azulUC3M}\vline} p{6cm}
!{\color{azulUC3M}\vline}}
\arrayrulecolor{azulUC3M}
\rowcolor{azulUC3M}
\multicolumn{1}{l !{\color{white}\vline}}
{\color{white}{\texttt{ID}}}
& \multicolumn{1}{l}
{\color{white}{\texttt{FR-XX || NFR-XX}}} \\
\textit{Title} & \\
\textit{Description} & \\
\textit{Priority} & \\
\textit{Use-case(s)} & \\
\hline
\end{tabular}
\caption{Requirement template}
\label{r-ex}
\end{table}
Every functional requirement (that specifies a function that has to be offered
by the library) will be tagged using an unique identifier with the format
\texttt{FR-XX}, where the \texttt{XX} represent a double digit number. On the
other hand, all the non-functional requirements (associated with preconditions
or other context necessary for the library to correctly work) will be
identified with the tags \texttt{NFR-XX}, where once again the \texttt{XX}
stands for a double digit number.
\newpage
\subsubsection{Functional Requirements}
\begin{uc3m-table}{FR-01}{Functional Requirement \texttt{FR-01}}
\textit{Title} & \textbf{Set of data types and classes} \\
\textit{Description} &
All end-user types have to be well documented and offered to the user due to
Haskell's strongly typing system.\\
\textit{Priority} & High \\
\textit{Use-case(s)} & \texttt{UC-01, UC-02, UC-03, UC-04, UC-05} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-02}{Functional Requirement \texttt{FR-02}}
\textit{Title} & \textbf{Pure general search method} \\
\textit{Description} &
Following the general search algorithm mentioned in \cite{rusell-2003-aima},
offer a function with similar capabilities while purely functional. As the
one in the book, it should behave differently by the order of nodes in the
open list. Due to this open list of nodes, it will use non-linear memory. \\
\textit{Priority} & High \\
\textit{Use-case(s)} & \texttt{UC-03} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-03}{Functional Requirement \texttt{FR-03}}
\textit{Title} & \textbf{Pure linear-memory search method} \\
\textit{Description} &
Offer a method that is able to perform search using linear memory in a purely
functional way.\\
\textit{Priority} & High \\
\textit{Use-case(s)} & \texttt{UC-03} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-04}{Functional Requirement \texttt{FR-04}}
\textit{Title} & \textbf{Pure data structure interface} \\
\textit{Description} &
A data structure interface to hold the nodes open list in general search has
to be provided. This interface has to be defined as a Haskell class and be
well documented, as well as exported for the user to implement their own data
structures.\\
\textit{Priority} & High \\
\textit{Use-case(s)} & \texttt{UC-03} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-05}{Functional Requirement \texttt{FR-05}}
\textit{Title} & \textbf{Library of pure data structures} \\
\textit{Description} &
Using the aforementioned data structure interface, the library should also
provide a set of curated, purely functional data structures as the ones
exposed in \cite{okasaki-1999-purely}. The exact set of data structures is
left to decide once the implementation starts, choosing the more convenient
ones for both the library and the user. \\
\textit{Priority} & High \\
\textit{Use-case(s)} & \texttt{UC-03} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-06}{Functional Requirement \texttt{FR-06}}
\textit{Title} & \textbf{Library of toy problems} \\
\textit{Description} &
The library should include a set of already implemented problems that enables
the users to solve them by using algorithms that use the library's data types
(whether the ones provided by the library or ones implemented by themselves).
\\
\textit{Priority} & High \\
\textit{Use-case(s)} & \texttt{UC-04} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-07}{Functional Requirement \texttt{FR-07}}
\textit{Title} & \textbf{Pure Breadth-First Search} \\
\textit{Description} &
Offer a function able to perform a Breadth-First Search that returns the list
of all solutions found that way in the problem space. Breadth-First Search
should be implemented using general search (\texttt{FR-02}) and a First-In,
First-Out queue (\texttt{FR-05}). \\
\textit{Priority} & Medium \\
\textit{Use-case(s)} & \texttt{UC-01} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-08}{Functional Requirement \texttt{FR-08}}
\textit{Title} & \textbf{Pure Depth-First Search} \\
\textit{Description} &
Offer a function able to perform a Depth-First Search that returns the list
of all solutions found that way in the problem space. Depth-First Search
should be implemented using linear-memory search (\texttt{FR-03}) or a
Last-In, First-Out stack (\texttt{FR-05}).\\
\textit{Priority} & Medium \\
\textit{Use-case(s)} & \texttt{UC-01} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-09}{Functional Requirement \texttt{FR-09}}
\textit{Title} & \textbf{Pure Iterative Depth-First Search} \\
\textit{Description} &
Offer a function able to perform an Iterative Depth-First Search that returns
the list of all solutions found that way in the problem space. Iterative
Depth-First Search should be implemented using linear-memory search
iteratively (\texttt{FR-03}) or a Last-In, First-Out depth-bounded stack
(\texttt{FR-05}). \\
\textit{Priority} & Medium \\
\textit{Use-case(s)} & \texttt{UC-01} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-10}{Functional Requirement \texttt{FR-10}}
\textit{Title} & \textbf{Pure Uniform-Cost Search} \\
\textit{Description} &
Offer a function able to perform an Uniform-Cost Search that returns
the list of all solutions found that way in the problem space. Uniform-Cost
Search should be implemented using linear-memory search (\texttt{FR-03}).\\
\textit{Priority} & High \\
\textit{Use-case(s)} & \texttt{UC-01} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-11}{Functional Requirement \texttt{FR-11}}
\textit{Title} & \textbf{Pure Greedy Search} \\
\textit{Description} &
Offer a function able to perform a Greedy Search that returns the list of all
solutions found that way in the problem space. Greedy Search should be
implemented using linear-memory search (\texttt{FR-03})
expanding the nodes with a minimal-heuristic order.\\
\textit{Priority} & Medium \\
\textit{Use-case(s)} & \texttt{UC-01} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-12}{Functional Requirement \texttt{FR-12}}
\textit{Title} & \textbf{Pure A* Search} \\
\textit{Description} &
Offer a function able to perform an A* Search that returns
the list of all solutions found that way in the problem space. A*
Search should be implemented using general search (\texttt{FR-02})
using a priority queue to expand the nodes (\texttt{FR-05}). \\
\textit{Priority} & Medium \\
\textit{Use-case(s)} & \texttt{UC-01} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-13}{Functional Requirement \texttt{FR-13}}
\textit{Title} & \textbf{Pure Iterative-Deepening A* Search} \\
\textit{Description} &
Offer a function able to perform a IDA* Search that returns
the list of all solutions found that way in the problem space. IDA*
Search should be implemented using linear-memory search (\texttt{FR-03})
and expanding the nodes in cost-heuristic search order.\\
\textit{Priority} & Medium \\
\textit{Use-case(s)} & \texttt{UC-01} \\
\end{uc3m-table}
%%%%%%%%%%%%%%%%%%%%%%%%%%% MONADIC FRs %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{uc3m-table}{FR-14}{Functional Requirement \texttt{FR-14}}
\textit{Title} & \textbf{Search monad} \\
\textit{Description} &
Design a monad that is able to collect run-time statistic in searches. The
search monad should be able to keep these logs:
\begin{enumerate}
\item Number of expanded nodes.
\item Number of enqueued nodes (if the search uses a data structure).
\item Maximum length of the queue (if the search uses a data structure).
\end{enumerate}\\
\textit{Priority} & Medium \\
\textit{Use-case(s)} & \texttt{UC-02} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-15}{Functional Requirement \texttt{FR-15}}
\textit{Title} & \textbf{Monadic general search method} \\
\textit{Description} &
Following the general search algorithm mentioned in \cite{rusell-2003-aima},
offer a function with similar capabilities, using a monadic functional style:
using the search monad (\texttt{FR-14}) to collect run-time statistics of the
search. This search should use a data structure (\texttt{FR-18}) to modify
the behavior of the search.\\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-03} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-16}{Functional Requirement \texttt{FR-16}}
\textit{Title} & \textbf{Monadic linear-memory search method} \\
\textit{Description} &
Offer a method that is able to perform a search while collecting run-time
statistics and using linear memory to do so. This method has to be visible
for the user to include in their own algorithms as well. \\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-03} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-17}{Functional Requirement \texttt{FR-17}}
\textit{Title} & \textbf{Monadic data structure interface} \\
\textit{Description} &
Adapt the data structure interface defined for \texttt{FR-04} to handle
monadic algorithms. That way, the user can also implement the search monad
into their algorithms no matter if they use data structures.\\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-03} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-18}{Functional Requirement \texttt{FR-18}}
\textit{Title} & \textbf{Library of monadic data structures} \\
\textit{Description} &
Adapt the data structures offered for pure algorithms (\texttt{FR-05}) to the
search monad. \\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-03} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-19}{Functional Requirement \texttt{FR-19}}
\textit{Title} & \textbf{Monadic Breadth-First Search} \\
\textit{Description} &
Offer a function able to perform a Breadth-First Search that returns the
solution wrapped in the search monad with the statistics. Breadth-First Search
should be implemented using general search (\texttt{FR-15}) and a First-In,
First-Out queue (\texttt{FR-18}). \\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-02} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-20}{Functional Requirement \texttt{FR-20}}
\textit{Title} & \textbf{Monadic Depth-First Search} \\
\textit{Description} &
Offer a function able to perform a Depth-First Search that returns the
solution wrapped in the search monad with the statistics. Depth-First Search
should be implemented using linear-memory search (\texttt{FR-16}) or a
Last-In, First-Out stack (\texttt{FR-18}).\\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-02} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-21}{Functional Requirement \texttt{FR-21}}
\textit{Title} & \textbf{Monadic Iterative Depth-First Search} \\
\textit{Description} &
Offer a function able to perform an Iterative Depth-First Search that returns
the solution wrapped in the search monad with the statistics. Iterative
Depth-First Search should be implemented using linear-memory search
iteratively (\texttt{FR-16}) or a Last-In, First-Out depth-bounded stack
(\texttt{FR-18}). \\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-02} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-22}{Functional Requirement \texttt{FR-22}}
\textit{Title} & \textbf{Monadic Uniform-Cost Search} \\
\textit{Description} &
Offer a function able to perform an Uniform-Cost Search that returns
the solution wrapped in the search monad with the statistics. Uniform-Cost
Search should be implemented using linear-memory search (\texttt{FR-16}).\\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-02} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-23}{Functional Requirement \texttt{FR-23}}
\textit{Title} & \textbf{Monadic Greedy Search} \\
\textit{Description} &
Offer a function able to perform a Greedy Search that returns solution
wrapped in the search monad with the statistics. Greedy Search should be
implemented using linear-memory search (\texttt{FR-16}) expanding the nodes
with a minimal-heuristic order.\\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-02} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-24}{Functional Requirement \texttt{FR-24}}
\textit{Title} & \textbf{Monadic A* Search} \\
\textit{Description} &
Offer a function able to perform an A* Search that returns
the solution wrapped in the search monad with the statistics. A*
Search should be implemented using general search (\texttt{FR-15})
using a priority queue to expand the nodes (\texttt{FR-18}). \\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-02} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-25}{Functional Requirement \texttt{FR-25}}
\textit{Title} & \textbf{Monadic Iterative-Deepening A* Search} \\
\textit{Description} &
Offer a function able to perform a IDA* Search that returns
the solution wrapped in the search monad with the statistics. IDA*
Search should be implemented using linear-memory search (\texttt{FR-16})
and expanding the nodes in cost-heuristic search order.\\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-02} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-26}{Functional Requirement \texttt{FR-26}}
\textit{Title} & \textbf{Pure Benchmark Interface} \\
\textit{Description} &
Offer a simple and clean interface to benchmark pure algorithms in batch and
obtain averaged timings of the results. \\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-04, UC-05} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-27}{Functional Requirement \texttt{FR-27}}
\textit{Title} & \textbf{Monadic Benchmark Interface} \\
\textit{Description} &
Offer a simple and clean interface to benchmark monadic algorithms in batch
and obtain averaged timings of the results. \\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-04, UC-05} \\
\end{uc3m-table}
\begin{uc3m-table}{FR-28}{Functional Requirement \texttt{FR-28}}
\textit{Title} & \textbf{Benchmarks Suite} \\
\textit{Description} &
Offer a set of benchmarks to test each algorithm, using the search domains
already provided by the library. \\
\textit{Priority} & Low \\
\textit{Use-case(s)} & \texttt{UC-04, UC05} \\
\end{uc3m-table}
\newpage
\subsubsection{Non-Functional Requirements}
\begin{uc3m-table}{NFR-01}{Non-Functional Requirement \texttt{NFR-01}}
\textit{Title} & \textbf{Operating System} \\
\textit{Description} &
The library can be used in any operating system that is able to run GHC and
the Haskell platform, namely: Ubuntu, Arch Linux, FreeBSD, Gentoo Linux
(x86-64 and x86), Fedora, Debian and NixOS.\\
\textit{Priority} & High \\
\end{uc3m-table}
\begin{uc3m-table}{NFR-02}{Non-Functional Requirement \texttt{NFR-02}}
\textit{Title} & \textbf{Haskell version} \\
\textit{Description} &
The Haskell platform has to be installed in the system, which includes the
GHC as well as other packaging, testing and documented tools needed for the
system. The version included must be 8.0 or higher.\\
\textit{Priority} & High \\
\end{uc3m-table}
\begin{uc3m-table}{NFR-03}{Non-Functional Requirement \texttt{NFR-03}}
\textit{Title} & \textbf{Cabal version} \\
\textit{Description} &
Cabal is a packaging tool, used for building and installing packages and
dependencies in Haskell. The Cabal version has to be 1.24.0.0 or later.\\
\textit{Priority} & High \\
\end{uc3m-table}
\begin{uc3m-table}{NFR-04}{Non-Functional Requirement \texttt{NFR-04}}
\textit{Title} & \textbf{Dependencies} \\
\textit{Description} &
All the dependencies needed for the library to properly work must be
installed in the system. These dependencies are controlled by Cabal and
require no further intervention by the user if Cabal is used for installing.
The complete list of dependencies can be found in the package configuration
file (\texttt{.cabal}).\\
\textit{Priority} & High \\
\end{uc3m-table}
\newpage
\subsubsection{Use Cases to Requirements Traceability Matrix}
To better understand the relationship between use cases and requirements, a
traceability matrix is provided. In this matrix, we can see which requirements
are supposed to satisfy which use cases. Please notice that only functional
requirements are considered for the traceability matrix (since non-functional
requirements are stated as preconditions for the whole system to work at all).\\
\begin{table}[!htbp]
\centering
\rowcolors{2}{gray90}{white}
\begin{tabular}{*{6}{| c} |}
\hline
& \texttt{UC-01} & \texttt{UC-02} & \texttt{UC-03} & \texttt{UC-04}
& \texttt{UC-05} \\
\hline
\texttt{FR-01} & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark \\
\texttt{FR-02} & & & \checkmark & & \\
\texttt{FR-03} & & & \checkmark & & \\
\texttt{FR-04} & & & \checkmark & & \\
\texttt{FR-05} & & & \checkmark & & \\
\texttt{FR-06} & & & & \checkmark & \\
\texttt{FR-07} & \checkmark & & & & \\
\texttt{FR-08} & \checkmark & & & & \\
\texttt{FR-09} & \checkmark & & & & \\
\texttt{FR-10} & \checkmark & & & & \\
\texttt{FR-11} & \checkmark & & & & \\
\texttt{FR-12} & \checkmark & & & & \\
\texttt{FR-13} & \checkmark & & & & \\
\texttt{FR-14} & & \checkmark & & & \\
\texttt{FR-15} & & & \checkmark & & \\
\texttt{FR-16} & & & \checkmark & & \\
\texttt{FR-17} & & & \checkmark & & \\
\texttt{FR-18} & & & \checkmark & & \\
\texttt{FR-19} & & \checkmark & & & \\
\texttt{FR-20} & & \checkmark & & & \\
\texttt{FR-21} & & \checkmark & & & \\
\texttt{FR-22} & & \checkmark & & & \\
\texttt{FR-23} & & \checkmark & & & \\
\texttt{FR-24} & & \checkmark & & & \\
\texttt{FR-25} & & \checkmark & & & \\
\texttt{FR-26} & & & & \checkmark & \checkmark \\
\texttt{FR-27} & & & & \checkmark & \checkmark \\
\texttt{FR-28} & & & & \checkmark & \checkmark \\
\hline
\end{tabular}
\caption{Use Case - Requirement traceability matrix}
\label{mat:uc-r}
\end{table}
It is possible to see in Table \ref{mat:uc-r} that there is a full coverage
between requirements and use-cases, so the formulation of the latter is
correct. Next step is to define a set of module that correctly satisfies the
requirements. After designing this module, a second traceability matrix is
needed to verify that all modules in the library are associated to
requirements.
\newpage
\subsubsection{Type Signature Graph}
To provide a visual representation of the library's design, one usually defines
a class diagram that presents a map with all the interactions among classes in
a given project. However, this artifact is strictly endemic to the
Object-Oriented Programming paradigm, and cannot be applied to this project.
Due to the size of the project, and the undeniable help of a visual
representation like the class diagram, it is included in this thesis a
\textit{type signature graph}. This representation presents a flow of types
defined and arguments among modules, presenting the functions contained in each
of them. With this model we try to obtain the undeniable insight provided by a
graphical representation, and the formality that implies the type signature
design, widely used as a model in Haskell development. \\
\begin{sidewaysfigure}
\centering
\includegraphics[width=0.9\textwidth]{img/type-graph-pure.png}
\vspace{1cm}
\caption{Type signature graph of the \texttt{Search.Pure} library}
\label{graph-pure}
\end{sidewaysfigure}
\begin{sidewaysfigure}
\centering
\includegraphics[width=0.9\textwidth]{img/type-graph-monadic.png}
\vspace{1cm}
\caption{Type signature graph of the \texttt{Search.Monadic} library}
\label{graph-monadic}
\end{sidewaysfigure}
These graphs (Figures \ref{graph-pure}, \ref{graph-monadic}) show the basic
control and import flow of the library, and are specially useful for
understanding implementation basics and dependencies among functions. As a
special remark, the specific functions provided by the \texttt{ToyProblem}
modules are omitted (due to lack of interest for the rest of the diagram) as
well as the \texttt{Benchmark} tools, and it is important to notice that every
time the generic type \texttt{a} is mentioned has to follow the constraints
\texttt{Eq a} and \texttt{Hashable a}, since it is the type of the state of the
problem space. Such constraints were dropped from the graph for readability
reasons.\\
We can see how both parts of the library share the same main structure: A
\texttt{Base} module that defines all the main types needed, a \texttt{General}
module that defines the more generalized functions to perform the search, a set
of \texttt{DataStructure} implementations and two modules, \texttt{Uninformed}
and \texttt{Informed} that implement a set of (respectively) uninformed and
heuristic algorithms, using the algorithms and data structures defined in the
former modules. For example, the Breadth-First Search algorithm (\texttt{bfs})
is defined using a \texttt{Queue} data structure and initial conditions of a
\texttt{generalSearch}.\\
Even if both parts of the library seem the same, a closer look reveals that the
monad designed to keep track of the search (\texttt{SearchM}) is returned
instead of a list; a fact that is not visible because it is encapsulated in the
\texttt{Algorithm} type. Also, some noticeable differences like the creation of
new functions to keep better track of search statistics are present in the
\texttt{Monadic} part of the library.\\
\newpage
\newcommand*\rot{\rotatebox{90}}
\begin{table}[!htbp]
\centering
\rowcolors{2}{gray90}{white}
\begin{tabular}{*{15}{| c} |}
\hline
& \rot{\texttt{Search.Pure.Base}}
& \rot{\texttt{Search.Pure.General}}
& \rot{\texttt{Search.Pure.DataStructure.*}}
& \rot{\texttt{Search.Pure.Uninformed}}
& \rot{\texttt{Search.Pure.Informed}}
& \rot{\texttt{Search.Pure.ToyProblem.*}}
& \rot{\texttt{Search.Pure.Benchmark}}
& \rot{\texttt{Search.Monadic.Base}}
& \rot{\texttt{Search.Monadic.General}}
& \rot{\texttt{Search.Monadic.DataStructure.*}}
& \rot{\texttt{Search.Monadic.Uninformed}}
& \rot{\texttt{Search.Monadic.Informed}}
& \rot{\texttt{Search.Monadic.ToyProblem.*}}
& \rot{\texttt{Search.Monadic.Benchmark}} \\
\hline
\texttt{FR-01}
& \checkmark & & & & &
& & \checkmark & & & &
& & \\
\texttt{FR-02}
& & \checkmark & & & &
& & & & & &
& & \\
\texttt{FR-03}
& & \checkmark & & & &
& & & & & &
& & \\
\texttt{FR-04}
& \checkmark & & & & &
& & & & & &
& & \\
\texttt{FR-05}
& & & \checkmark & & &
& & & & & &
& & \\
\texttt{FR-06}
& & & & & &
\checkmark & & & & &
& & \checkmark & \\
\texttt{FR-07}
& & & & \checkmark & &
& & & & & &
& & \\
\texttt{FR-08}
& & & & \checkmark & &
& & & & & &
& & \\
\texttt{FR-09}
& & & & \checkmark & &
& & & & & &
& & \\
\texttt{FR-10}
& & & & \checkmark & &
& & & & & &
& & \\
\texttt{FR-11}
& & & & & \checkmark &
& & & & & &
& & \\
\texttt{FR-12}
& & & & & \checkmark &
& & & & & &
& & \\
\texttt{FR-13}
& & & & & \checkmark &
& & & & & &
& & \\
\texttt{FR-14}
& & & & & &
& & \checkmark & & & &
& & \\
\texttt{FR-15}
& & & & & &
& & & \checkmark & & &
& & \\
\texttt{FR-16}
& & & & & &
& & & \checkmark & & &
& & \\
\texttt{FR-17}
& & & & & &
& & \checkmark & & & &
& & \\
\texttt{FR-18}
& & & & & &
& & & & \checkmark & &
& & \\
\texttt{FR-19}
& & & & & &
& & & & & \checkmark &
& & \\
\texttt{FR-20}
& & & & & &
& & & & & \checkmark &
& & \\
\texttt{FR-21}
& & & & & &
& & & & & \checkmark &
& & \\
\texttt{FR-22}
& & & & & &
& & & & & \checkmark &
& & \\
\texttt{FR-23}
& & & & & &
& & & & & &
\checkmark & & \\
\texttt{FR-24}
& & & & & &
& & & & & &
\checkmark & & \\
\texttt{FR-25}
& & & & & &
& & & & & &
\checkmark & & \\
\texttt{FR-26}
& & & & & &
& \checkmark & & & & &
& & \\
\texttt{FR-27}
& & & & & &
& & & & & &
& & \checkmark \\
\texttt{FR-28}
& & & & & &
& \checkmark & & & & &
& & \checkmark \\
\hline
\end{tabular}
\caption{Requirement - Module traceability matrix}
\label{mat:r-m}
\end{table}
\subsubsection{Modules to Requirements Traceability Matrix}
As mentioned in previous sections, it is also important to generate a second
traceability matrix that relates all the library modules with the requirement
they are fulfilling, to ensure that all the requirements are covered and all
modules have a purpose from the initial use cases.\\
In Table \ref{mat:r-m} it is patent that all the requirements have been covered
in the design and that all modules in the design are actually needed for the
use cases that were required for the library. Once that the design is verified
to be correct, it is possible to use the current design and artifacts to start
the implementation of the code and develop the library.\\
\newpage
\subsection{Implementation}
After verifying that the current design is correct and fulfills all the
necessary requirements, it is time to start implementing the functionalities in
Haskell. Since the code can be obscure at some points, this section details all
the design decisions along the implementation, along with the mechanics used to
achieve it.\\
\subsubsection{General Search}
To perform searches like a Breadth-First search, it is needed to enqueue the
nodes in a structure and check in such structure which one is the next one to
be expanded in the search. This general behavior is mainly modified by the
nature of the data structure used to store the nodes: depending on it, we can
use a queue (for Breadth-First Search), a stack (for a Depth-First Search) or
more complex data structures such as a priority queue (for Uniform-Cost Search
or A* Search, for instance). This general search is indeed described in
\cite{rusell-2003-aima} when first explaining search methods. This algorithm
is the main inspiration for the final implementation of the
\texttt{generalSearch} method.\\
\begin{lstlisting}[style=haskell,
caption=Pure \texttt{generalSearch} implementation, label=pure:general]
generalSearch :: (DataStructure ds, Eq a, Hashable a)
=> ProblemSpace a -- ^ 'ProblemSpace' to be solved
-> Cost a -- ^ 'Cost' function to use
-> Heuristic a -- ^ 'Heuristic' function to use
-> ds a -- ^ 'DataStructure' that manages the node expansion
-> [Node a] -- ^ Returns the list of all final nodes (solutions)
generalSearch problem g h nodes
| isEmpty nodes = []
| getGoalF problem (getState n) = n : generalSearch problem g h nodes'
| otherwise = generalSearch problem g h ds'
where (nodes', n) = next nodes
expanded = expand n g h (getActions problem)
ds' = addList expanded nodes'
\end{lstlisting}
We can see in Listing \ref{pure:general} how the implementation is
straightforward: To find all the possible paths to obtain a solution, the
function checks if the node is final and enqueues if to the list of solutions,
or expands it and adds the resulting nodes (if any) to the data structure. This
could keep on going until the data structure is completely empty, but it will
just expand as many nodes as strictly necessary to find the solutions the
function is asked for.\\
This way of relying on the data structure makes a super versatile piece of code
out of this implementation, but also makes it extremely hard to formally prove:
a possible way to do so would be to prove that all the nodes are expanded in a
given order (i.e that a Breadth-First Search expands the nodes in a FIFO way),
but that task depends on the data structure's implementation, so it is not
possible to prove the correctness of this implementation for all possible
entries. To make up for this, we the library includes all unit and integration
tests possible to check correct behavior of it and the default data structures
provided in it.\\
\subsubsection{Linear-Memory Search}
With the \texttt{generalSearch} method we could virtually mimic any possible
algorithm's node expansion order, by adding those functionalities to the data
structure: a stack-like structure will expand nodes in a DFS fashion, adding a
limit of depth will result in an IDFS-like behavior, sorting the nodes in a
priority queue would result in a A* Search. However, stating that all these
behaviors would be a valid implementation of the aforementioned algorithms is a
great underestimation of these designs: there are other aspects to analyze of
an algorithm rather than just the node expansion order
\cite{korf-2014-correct}.\\
A specially problematic aspect for our \texttt{generalSearch} method is the
memory usage: the fact of enqueueing the nodes in the data structure makes it
use an exponential memory depending on the branching factor of the search, and
the fact that the objects in Haskell are immutable only makes this worse. This
is acceptable when performing an A* Search (there is no other way for us to
expand the nodes in order than to sort them in a queue with all the nodes to be
expanded) but trying to use a stack as a data structure to perform a
Depth-First Search will also result in it using exponential memory (all the
nodes get enqueued and passed along the structure). This is not acceptable and
we need to implement a different method for it.\\
Fortunately, implementing this linear-memory method is as easy as relying on
one of Haskell's more natural mechanisms: recursive calls in a tree shape.
However, instead of simple depth-first search, we can take advantage of one of
the most widespread abstractions of the language: \texttt{map}. This function
performs a given function on each of the elements of a list. The main advantage
is that this is one of the abstractions that uses concurrency so may result in
a performance improvement under some certain conditions. In our case, we can
define the method \texttt{depthSearch}, whose pure implementation is defined in
the Listing \ref{pure:depth}.\\
\begin{lstlisting}[style=haskell,
caption=Pure \texttt{depthSearch} implementation, label=pure:depth]
depthSearch :: (Eq a, Hashable a)
=> ProblemSpace a -- ^ 'ProblemSpace' to be solved
-> Cost a -- ^ 'Cost' function to use
-> Heuristic a -- ^ 'Heuristic' function to use
-> NodeEvaluation a -- ^ 'NodeEvaluation' to sort the expanded nodes
-> Node a -- ^ Current 'Node' to be expanded
-> [Node a] -- ^ Returns the list of all final nodes (solutions)
depthSearch problem g h f node
| getGoalF problem (getState node) = return node
| otherwise = concatMap (depthSearch problem g h f) sorted
where sorted = sortBy (\n n' -> compare (f n) (f n')) expanded
expanded = expand node g h (getActions problem)
\end{lstlisting}
We can see that this implementation includes some interesting mechanics. The
function performs 3 basic tasks: check if a given node contains a final state
inside and leave it, expand non-final nodes and flatten the list. When
flattening the list, the nodes that result in an empty list (that is, that
cannot be expanded) are pruned from the solutions list. By using this, we don't
rely on a sequential implementation and we can take advantage of the
paradigm: this mapping of nodes are actually done concurrently and can increase
the performance compared to a simple recursive call. Also notice that the
function accepts a \texttt{NodeEvaluation} function to sort the nodes. Although
this is not explicitly used in any of the predefined algorithms, can be useful
for an user to sort nodes in order to be expanded.\\
However, it is important to note that due to the fact that the each nodes
include in themselves information of their path (the key part to solve the k
path-finding problem) makes the size of the nodes slightly increase as the
search increases. For that reason, the memory is not strictly linear, but it
still provides a huge advantage over the previous method discussed. Since the
memory used is not linear due to the allocation but to the increasing size of
the nodes, this solution will be accepted as good enough for this purpose.\\
This method can be easily modified to perform another type of useful and
widespread linear-memory search: a limited memory search (of any given bound by
the user), that expand nodes if possible or if a given \texttt{NodeEvaluation}
is within the bound given by the user. This function can be read in Listing
\ref{pure:limit}.\\
\begin{lstlisting}[style=haskell,
caption=Pure \texttt{limitedDepthSearch} implementation, label=pure:limit]
limitedDepthSearch :: (Eq a, Hashable a)
=> ProblemSpace a -- ^ 'ProblemSpace' to be solved
-> Cost a -- ^ 'Cost' function to use
-> Heuristic a -- ^ 'Heuristic' function to use
-> NodeEvaluation a -- ^ 'NodeEvaluation' to sort the expanded nodes
-> Double -- ^ Limit to be imposed to the 'NodeEvaluation'
-> Node a -- ^ Current 'Node' to be expanded
-> [Node a] -- ^ Returns the list of all final nodes (solutions)
limitedDepthSearch problem g h f l node
| getGoalF problem (getState node) = return node
| otherwise = concatMap (limitedDepthSearch problem g h f l) sorted
where sorted = sortBy (\n n' -> compare (f n) (f n')) expanded
expanded = filter ((<l) . f) $ expand node g h (getActions problem)
\end{lstlisting}
With this function, we can implement different search algorithms like
Iterative Depth-First Search or Iterative Deepening A* Search, by performing
a bounded search and increasing the bound over and over.\\
\subsubsection{Branch and Bound Search}
The last well known general search algorithm that we can provide is a
branch-and-bound fashion search: performing a depth-first search and using the
cost of that solution as the current cost bound. Then, keep on expanding nodes
but making that search be a limited one, by the bound we have found in the
previous solution. Each time a new solution is found, update the bound and keep
on the search until there are no more nodes to expand. This algorithm also uses
linear-space to perform the search, and a pseudocode for it can be found in
\cite{zhang-1995-bnb} along with all necessary explanations.\\
However, this pseudocode is explicitly imperative and seems hard to recreate
using a purely functional approach. A naive approach to this matter would be to
use the previously mentioned algorithms to solve this problem: perform a
\texttt{depthSearch} and store the cost, then perform a
\texttt{limitedDepthSearch} with the current bound and try to found a better
one, and repeat this procedure until no new solution is found. However, this
will not use linear space: instead, it will perform $n$ searches that will,
indeed, use linear-memory each one. This solution is definitely
non-acceptable.\\
The best solution, is to use a fold method, and use such accumulator to hold
both the list of solutions (in a Last In, First Out order) and the current best
cost. Using this fold and recursively calling the function on the nodes, we can
replicate the exact behavior of the algorithm in a purely functional way in a
single search. The code of this function is written in the Listing
\ref{pure:bnb}.\\
\begin{lstlisting}[style=haskell,
caption=Pure \texttt{depthBNB} implementation, label=pure:bnb]
depthBNB :: (Eq a, Hashable a)
=> ProblemSpace a -- ^ 'ProblemSpace' to be solved
-> Cost a -- ^ 'Cost' function to use
-> Heuristic a -- ^ 'Heuristic' function to use
-> NodeEvaluation a -- ^ 'NodeEvaluation' to sort and bound expanded nodes
-> Node a -- ^ Current 'Node' to be expanded
-> (Double, [Node a]) -- ^ The current bound and intermediate solutions
-- found (in ascending cost order)
-> (Double, [Node a]) -- ^ The final bound and all solutions found (in
-- ascending cost order)
depthBNB problem g h f n (l, sol) = foldl bnbStep (l, sol) sorted
where sorted = sortBy (\n n' -> compare (f n) (f n')) expanded
expanded = expand n g h (getActions problem)
bnbStep (bound, solutions) n
| f n >= bound = (bound, solutions)
| getGoalF problem (getState n) = (f n, n:solutions)
| otherwise = depthBNB problem g h f n (bound, solutions)
\end{lstlisting}
\subsubsection{Uninformed Algorithms}
Using the previously developed methods, we are able to create a set of
uninformed, purely-functional algorithms ready to use by the users. These
algorithms are just partially-applied general functions which are imposed some
initial conditions to behave as the algorithm is supposed to. Apart from the
convenience that this implies, another main reason to design the search
algorithms this way is because those general search methods are available for
users to build their own search algorithms. Just like these partially-applied
functions result in one algorithm or the other, the users of the framework have
absolute freedom to use them to create their own algorithms.\\
\begin{lstlisting}[style=haskell,
caption=Pure uninformed search algorithms, label=pure:uninf]
-- | 'bfs' runs a Breadth-First Search
bfs :: (Eq a, Hashable a) => Algorithm a
bfs problem = generalSearch problem noCost noHeuristic (startQueue $
getInitial problem)
-- | 'dfs' runs a Depth-First Search
dfs :: (Eq a, Hashable a) => Algorithm a
dfs problem = depthSearch problem noCost noHeuristic noSorting initial
where initial = newNode (getInitial problem)
-- | 'idfs' runs an Iterative Deepening Depth-First Search. The first argument,
-- a pair of 'Int's (@step@, @inf@), represent the main parameters of the
-- search: each new iteration the depth test is incremented by adding @step@ as
-- long as the new depth is lower than @inf@.
idfs :: (Eq a, Hashable a) => (Int, Int) -> Algorithm a
idfs (step, inf) p = stepIDFS 0
where stepIDFS d = if d < inf
then limitedDepthSearch p noCost noHeuristic depth (fromIntegral d) i
++ stepIDFS (d + step)
else []
i = newNode $ getInitial p
depth = fromIntegral . length . getPath
-- | 'ucs' runs an Uniform-Cost Search with a given cost function
ucs :: (Eq a, Hashable a) => Cost a -> Algorithm a
ucs g problem = generalSearch problem g noHeuristic sortedCost
where sortedCost = startPriorityQueue (getInitial problem) getCost
\end{lstlisting}
In the Listing \ref{pure:uninf} we can see some of the best know brute-force
search algorithms. Breadth-First Search is implemented using a
\texttt{generalSearch} call with a queue data structure (First In, First Out).
On the other hand, the Depth-First Search algorithm uses the simplest
implementation of the \texttt{depthSearch} general method starting in the
initial node of the problem space. Iterative Deepening Depth-First Search is
implemented by performing a \texttt{limitedDepthSearch} over and over,
increasing the depth bound using a given step until that bound is bigger that
some limit given by the user. All these algorithms have been implemented
following the description provided in \cite{rusell-2003-aima}. The last
algorithm in the set is Uniform-Cost Search, which performs a breadth-first,
cost-based search in the problem space. To do so, \texttt{generalSearch} is
used with a priority queue that sorts the nodes in cost ascending order. This
algorithm is usually called Dijkstra's, but this implementation is based on the
description given in \cite{rusell-2003-aima} and due to the nature of the
framework and the language it follows all the criteria exposed in
\cite{felner-2011-dijkstra} to be named Uniform-Cost Search: it works in
implicit graphs (as most all \texttt{ProblemSpace} objects are exactly that),
and Dijkstra's would require to have all nodes input in the $Q$ list.\\
\subsubsection{Informed Algorithms}
In a similar way, we define a set of informed algorithms. The code for these
algorithms can be found in Listing \ref{pure:inf}
\begin{lstlisting}[style=haskell, caption=Pure informed search algorithms,
label=pure:inf]
-- | 'hillClimbing' runs a Hill Climbing Heuristic Search.
hillClimbing :: (Eq a, Hashable a) => Cost a -> Heuristic a -> Algorithm a
hillClimbing g h problem = generalSearch problem g h sortedH
where sortedH = startPriorityQueue (getInitial problem) getHeuristic
-- | 'aStar' runs an A* Search.
aStar :: (Eq a, Hashable a) => Cost a -> Heuristic a -> Algorithm a
aStar g h problem = generalSearch problem g h sortedAStar
where sortedAStar = startPriorityQueue (getInitial problem) aStarF
aStarF n = getCost n + getHeuristic n
-- | 'idAStar' runs an Iterative-Deepening A* Search.
idAStar :: (Eq a, Hashable a) =>
(Double, Double) -> Cost a -> Heuristic a -> Algorithm a
idAStar (step, inf) g h p = stepIDAStar 0
where stepIDAStar l = if l < inf
then limitedDepthSearch p g h aStarF l initial
++ stepIDAStar (l + step)
else []
aStarF n = getCost n + getHeuristic n
initial = newNode (getInitial p)
-- | 'beam' runs a Beam Search of a given beam width.
beam :: (Eq a, Hashable a) =>
Int -> Cost a -> Heuristic a -> Algorithm a
beam w g h problem = generalSearch problem g h stack
where stack = startBeamStack (getInitial problem) w getHeuristic
-- | 'dfBNB' performs a Depth-First Branch & Bound Search. Due to the nature of
-- this algorithm, it does not return the list of all solutions in the problem
-- space: Instead, it returns all the solutions that it has found in ascending
-- cost order.
dfBNB :: (Eq a, Hashable a) => Cost a -> Heuristic a -> Algorithm a
dfBNB g h problem = snd $ depthBNB problem g h aStarF initial (inf, [])
where aStarF n = getCost n + getHeuristic n
initial = (newNode . getInitial) problem
inf = 1/0
\end{lstlisting}
The first of them is a greedy heuristic search algorithm, better known under
the name of Hill Climbing. This algorithm performs a \texttt{generalSearch}
with a priority queue that sorts the nodes in ascending order of heuristic
value. Arguably, the most popular informed search algorithm, A* Search, is
implemented in a similar way: runs a \texttt{generalSearch} using a priority
queue as well, but that priority queue uses a function $f(n) = g(n) +
h(n)$ to sort the nodes in ascending order, where $g$ is the cost function and
$h$ is the heuristic function \cite{dechter-1985-astar}. A more lightweight but
similar algorithm is Iterative-Deepening A*, which performs a depth search
using the aforementioned function under a certain bound that increases each new
iteration. This searches take linear time to find the solution, and if IDS
performs a search inside a certain depth, IDA* performs a search inside a
certain $f$-spectrum. These algorithms follow the specifications covered in
\cite{rusell-2003-aima}.\\
Another algorithm offered is Beam Search, which uses a data structure called
\texttt{BeamStack} in the library: it only enqueues the best $k$ nodes of a
given expansion, following a \texttt{NodeEvaluation} function. In the previous
case, $k$ is the width of the beam, that is decided by the user. The last
algorithm specified is Depth-First Branch and Bound, which just uses the
aforementioned \texttt{depthBNB} general method using $f$ as the evaluation
function to perform the Branch and Bound, as specified in
\cite{zhang-1995-bnb}.\\
\subsubsection{The Search Monad}
These are basically all algorithms provided by the library: a user can import
all these algorithms and model their problem as a \texttt{ProblemSpace}, define
all necessary functions and get the list of solutions. However, if the user is
more interested in an academic or educational point of view, this functions do
not provide some important information: How many nodes are being expanded? How
many nodes where enqueued when we first found a solution?\\
Solving this questions using purely functional reasoning is a real challenge:
the easiest way to do this is using a global variable, outside the scope of the
recursivity, that stores all the necessary statistics. However, since this is
modifying the state of the machine, it is an illegal operation in Haskell. If
we try to use a simple counter we will not obtain always the correct results:
In a depth-first recursion, the counter will only be able to count the expanded
nodes in the solution's path. An accumulator will have a performance cost and
most code should be rewritten to hold it.\\
For those reasons, the solution best solution found for this problem was to use
a monad. A monad is a model that is able to ``bypass'' the explicit data flow
imposed by the pure functional paradigm. This model let us embed a context in a
computation, which is helpful in different situations like error handling,
logging or (like in this specific case) counting operations and execution
traces \cite{wadler-1993-monad}. To do so, the \texttt{SearchM} monad is
included in the library. In this section, its design will be explained as well
as proved correct following the monad laws.\\
\begin{lstlisting}[style=haskell, caption=\texttt{Statistics} implementation,
label=monad:stat]
data Statistics = Statistics
{ nodesExpanded :: Integer -- ^ Number of nodes expanded through the whole
-- search
, nodesEnqueued :: Integer -- ^ Number of nodes that have been enqueued
-- through the whole search
, maxQueueLength :: Integer -- ^ Maximum number of nodes that have been
-- enqueued at the same time
} deriving Eq
mergeStats :: Statistics -> Statistics -> Statistics
mergeStats (Statistics exp enq maxL) (Statistics exp' enq' maxL') =
Statistics (exp + exp') (enq + enq') (max maxL maxL')
\end{lstlisting}
In Listing \ref{monad:stat} we can see the definition of a \texttt{Statistics}
object, which is a record that gathers information about the current search
being performed: the total number of expanded nodes, the total number of nodes
that were added to the data structure, and the maximum size of the data
structure throughout the execution. Also, the method \texttt{mergeStats} is
defined to combine two different statistic logs: sum the expanded and enqueued
nodes and compare the maximum lengths.
\begin{lstlisting}[style=haskell, caption=\texttt{SearchM} implementation,
label=monad:searchm]
data SearchM a = SearchM { getNode :: a -- ^ Node with solution
, getStats :: Statistics -- ^ Complete search
-- statistics
}
instance Functor SearchM where
fmap f (SearchM n stats) = SearchM (f n) stats
instance Applicative SearchM where
pure n = SearchM n (Statistics 0 0 0)
SearchM f s <*> SearchM n s' = SearchM (f n) (mergeStats s s')
instance Monad SearchM where
return = pure
SearchM n s >>= f = let SearchM n' s' = f n in SearchM n' (mergeStats s s')
\end{lstlisting}
Using the \texttt{Statistics} record, making the monad is a much more
accessible task. In the Listing \ref{monad:searchm} we can see how we define
\texttt{SearchM} as a wrapper for a \texttt{Node} object that contains all the
search statistics. After defining the object, it has to implement all the
necessary classes to be a monad: All monads are also functors (often
endofuctors, since they map a category to themselves) with application and two
natural transformations \cite{street-1972-monads}. For that reason, we have to
define \texttt{fmap} (a generalized version of the \texttt{map} function on
lists) to define it as a functor, and the functions \texttt{pure} (to embed
pure expressions in the functor) and \texttt{<*>} (to sequence computations and
combine their results) to define the application of it.\\
However, by including it as a monad (specially in a library, as a resource
available to the end user), we need to ensure that it behaves as such. The way
to do it is formally proving that the monad fulfills the monad laws
\cite{wadler-1993-monad}. To prove this, we are going to use the same notation
used in Wadler's work: The bind operator, defined in Haskell as \texttt{>{}>=},
will be used in this thesis as $\star$, and lambda clauses will be defined
using the Greek letter ($\lambda$). However, due to a simpler approach and the
increased intuition on the behavior that they provide, the formulation of the
rules will be the ones provided in \cite{lipovaca-2011-learn}.
\begin{itemize}
\item \textbf{Left identity}: this law states that putting a pure value in
context and then binding it to a function has to be equivalent to applying
the function to the variable.
$$
unit \ x \star f = f(x) \\
$$
Which, in Haskell, is equivalent to the expression \texttt{return x >{}>= f ==
f x}, for a variable \texttt{x :: a} and a function \texttt{f :: a -> m b}.
The formal proof for this law is covered in Listing \ref{monad:leftid}.
\begin{lstlisting}[style = haskell, caption = Left identity formal proof for
\texttt{SearchM}, label = monad:leftid]
return x >>= f == f x
-- { definition of return, where s_id :: Statistics with the identity value }
SearchM x s_id >>= f == f x
-- { definition of >>= }
let SearchM x' s = f x in SearchM x' (mergeStats s_id s) == f x
-- { definition of f }
SearchM x' (mergeStats s_id s) == SearchM x' s
-- { applying identity of s_id }
SearchM x' s == SearchM x' s
-- { equality }
True
\end{lstlisting}
\item \textbf{Right identity}: this law states that binding a monadic value to
an empty context is no different from the initial monadic value itself.
$$
m \star \ \lambda x \rightarrow unit x = m \\
$$
This statement can be written in Haskell as
\texttt{SearchM x s >{}>= (\symbol{92}x -> return x) = SearchM x s}, for a variable
\texttt{x :: a}. The formal proof for this law is covered in Listing
\ref{monad:rightid}.
\begin{lstlisting}[style = haskell, caption = Right identity formal proof for
\texttt{SearchM}, label = monad:rightid]
SearchM x s >>= (\n -> return n) == SearchM x s
-- { definition of return, where s_id :: Statistics with identity value }
SearchM x s >>= (\n -> SearchM n s_id) == SearchM x s
-- { definition of >>= }
SearchM x (mergeStats s_id s) == SearchM x s
-- { applying identity of s_id }
SearchM x s == SearchM x s
-- { equality }
True
\end{lstlisting}
\item \textbf{Associativity}: This law states that the binding of different
monadic operations is independent of the nesting in which it occurs.
$$
m \star f \star g = m \star (\lambda x \rightarrow f(x) \star g)\\
$$
For a variable \texttt{x :: a}, and functions \texttt{f :: a -> m b},
\texttt{g :: a -> m b}, the formal proof for this law is covered in Listing
\ref{monad:assoc}.
\begin{lstlisting}[style = haskell, caption = Associativity formal proof for
\texttt{SearchM}, label = monad:assoc]
(SearchM x s >>= f) >>= g == SearchM x s >>= (\n -> f n >>= g)
-- { left side - definition of >>= (1); let f x = x'}
SearchM x' (mergeStats s s') >>= g
-- { left side - definition of >>= (2); let g x' = x''}
SearchM x'' (mergeStats (mergeStats s s') s'')
-- { right side - definition of >>= (3); }
SearchM x s >>= (\n -> SearchM ((g . f) n) (mergeStats s_f s_g))
-- { right side - definition of >>= (4); let (f . g) x = x''}
SearchM x'' (mergeStats (mergeStats s' s'') s)
-- { assuming associativity of mergeStats, let result be s_f }
SearchM x'' s_f == SearchM x'' s_f
-- { equality }
True
\end{lstlisting}
In this proof, the associativity of \texttt{mergeStats} is mentioned as an
axiom. This can be seen true in Listing \ref{monad:stat} where the
implementation of this function displays three associative operations: two
sums and one maximum.\\
\end{itemize}
\subsubsection{Adapting the Library to the Search Monad}
Once the monad has been correctly implemented, the library can be replicated
using the monad. However, since,the monad encapsulates context in the
computations to be able to keep track of search statistics. For that reason,
we cannot rely on the lazy evaluation as before: if we ask for the head of the
list of solutions and the statistics, the statistics cover all the computations
to be performed in the whole list, which is an operation that may be endless.
For that reason, it was decided to suppress the idea of returning a list of
solutions in the monadic part of the library: instead, the first solution found
(if any) is returned, as in a traditional search library.\\
If the previous sections have covered the code in the \texttt{Search.Pure} part
of the library, this will instead cover the slight modifications applied to the
code in the \texttt{Search.Monadic} modules to hold the search statistics.
These modifications consists of just modifying the recursive calls inside
do-notation blocks alongside with some logging functions to update the state
embedded inside the monad.\\
These logging functions have a simple mechanism: They create a dummy
\texttt{SearchM} object that will be bound (\texttt{>>=}) to the next object
inside the do-block (do-blocks are indeed just syntactic sugar for a series of
operations bound together \cite{haskell-2010}). Taking into account that we have
defined the bind operation to replace the object inside the monad and to merge
both object's statistics, this produces the desired outcome.\\
Taking a closer look at the rewritten \texttt{generalSearch} function in
Listing \ref{monad:general}, we can see how the main control flow of the
function is the same: check if there are no more nodes to expand, if the node
is final, or if the node has to be expanded again. However, the function now
has a do-block that uses logging functions to keep track of the statistics as
it works. We can see how adapting the library to the monad is an affordable
task compared to rewriting the functions using accumulators, but there are some
aspects that require attention to work properly.\\
\begin{lstlisting}[style=haskell,
caption=Monadic \texttt{generalSearch} implementation, label=monad:general]
generalSearch :: (DataStructure ds, Eq a, Hashable a)
=> ProblemSpace a -- ^ 'ProblemSpace' to be solved
-> Cost a -- ^ 'Cost' function to use
-> Heuristic a -- ^ 'Heuristic' function to use
-> ds a -- ^ 'DataStructure' that manages the node expansion
-> SearchM (Maybe (Node a)) -- ^ Returns the solution obtained
generalSearch problem g h nodes
| isEmpty nodes = do
logExpanded
return Nothing
| getGoalF problem (getState n) = do
logExpanded
return (Just n)
| otherwise = do
logExpanded
logEnqueued (length expanded)
logLength ds'
generalSearch problem g h ds'
where (nodes', n) = next nodes
expanded = expand n g h (getActions problem)
ds' = addList expanded nodes'
\end{lstlisting}
\newpage
%%% Local Variables:
%%% TeX-master: "tfg"
%%% End:
|
{"hexsha": "fd8ad20b5db79530fd413c7e80afaf406137fd0c", "size": 66787, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "thesis/4-development.tex", "max_stars_repo_name": "DiegoVicen/bachelor-thesis", "max_stars_repo_head_hexsha": "feb1657ef4082402434d5e6519ec57eac85ac7a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-01-25T13:07:49.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-02T18:49:06.000Z", "max_issues_repo_path": "thesis/4-development.tex", "max_issues_repo_name": "DiegoVicen/bachelor-thesis", "max_issues_repo_head_hexsha": "feb1657ef4082402434d5e6519ec57eac85ac7a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thesis/4-development.tex", "max_forks_repo_name": "DiegoVicen/bachelor-thesis", "max_forks_repo_head_hexsha": "feb1657ef4082402434d5e6519ec57eac85ac7a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.8023826209, "max_line_length": 87, "alphanum_fraction": 0.6571638193, "num_tokens": 17258}
|
#!/usr/bin/env python3
# coding: utf-8
import argparse
SD_FACTOR1 = 2.5
SD_FACTOR2 = 4
def main(args):
import os
import numpy as np
import allel
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def read_vcfs(vcf_list, fields):
callset = {f: np.array([],dtype='float32') for f in fields }
for vcf in vcf_list:
current_callset = allel.read_vcf(vcf, fields=fields)
for f in fields:
callset[f] = np.append(callset[f], current_callset[f])
return callset
# Helper functions
def bimodal(x,mu1,sigma1,A1,mu2,sigma2,A2):
return gauss(x,mu1,sigma1,A1)+gauss(x,mu2,sigma2,A2)
def gauss(x,mu,sigma,A):
return A*np.exp(-(x-mu)**2/2/sigma**2)
def plot_dist_and_cutoff(field, cutoff, plot_prefix,title):
out = plt.hist(callset['variants/{}'.format(field)], bins=100, label=field)
plt.axvline(cutoff, color='black', linestyle=':', label='Cut-off')
plt.title(title)
plt.savefig('{}.{}.png'.format(plot_prefix,field))
plt.close()
def get_cutoff_stddev(field, sd_factor ,minus=False):
if minus:
cutoff = np.round(np.nanmean(callset['variants/{}'.format(field)]) - \
np.sqrt(np.nanmean( (np.nanmean(callset['variants/{}'.format(field)]) - \
callset['variants/{}'.format(field)])**2)) * sd_factor)
else:
cutoff = np.round(np.nanmean(callset['variants/{}'.format(field)]) + \
np.sqrt(np.nanmean( (np.nanmean(callset['variants/{}'.format(field)]) - \
callset['variants/{}'.format(field)])**2)) * sd_factor)
print(field + ' ' + str(cutoff))
return cutoff
callset = read_vcfs(args.vcf, fields=['variants/QD', 'variants/FS', \
'variants/MQ', 'variants/MQRankSum', \
'variants/ReadPosRankSum'])
# QD
out = plt.hist(callset['variants/QD'], bins=100, label='QD')
x = out[1]
y = out[0]
x= [ x[i] + (x[i + 1] - x[i])/2 for i in range(0,len(x) - 1)]
params,cov = curve_fit(bimodal,x,y)
plt.plot(x,bimodal(x,*params),color='red',lw=3,label='Fit')
mean_1 = params[0]
sd_1 = abs(params[1])
mean_2 = params[3]
sd_2 = abs(params[4])
print("mean_1: {}, sd_1: {}, mean_2: {}, sd_2: {}".format(mean_1, sd_1, mean_2, sd_2))
if mean_1 >= mean_2:
cutoff = mean_1 - SD_FACTOR1 * sd_1
else:
cutoff = mean_2 - SD_FACTOR1 * sd_2
cutoff = np.floor(cutoff)
plt.axvline(cutoff, color='black', linestyle=':', label='Cut-off')
plt.title('QD < {QD:.1f}'.format(QD=cutoff))
plt.savefig('{}.QD.png'.format(args.plot))
plt.close()
QD = cutoff
# FS
FS = get_cutoff_stddev('FS', SD_FACTOR1)
plot_dist_and_cutoff('FS', FS, args.plot, title='FS > {FS:.1f}'.format(FS=FS))
# MQ
MQ=50.0
# MQRankSum
MQRankSum = np.nanquantile(callset['variants/MQRankSum'], 0.1)
plot_dist_and_cutoff('MQRankSum', MQRankSum, args.plot, title='MQRankSum < {MQRankSum:.1f}'.format(MQRankSum=MQRankSum))
# ReadPosRankSum
ReadPosRankSum = get_cutoff_stddev('ReadPosRankSum', SD_FACTOR2, minus=True)
plot_dist_and_cutoff('ReadPosRankSum', ReadPosRankSum, args.plot, title='ReadPosRankSum < {ReadPosRankSum:.1f}'.format(ReadPosRankSum=ReadPosRankSum))
# Create filter string
filter = "QD < {QD:.1f} || FS > {FS:.1f} || MQ < {MQ:.1f} || MQRankSum < {MQRankSum:.1f} || ReadPosRankSum < {ReadPosRankSum:.1f}".format(QD=QD, FS=FS, MQ=MQ, MQRankSum=MQRankSum, ReadPosRankSum=ReadPosRankSum)
out_file = open(args.filter_file, 'w')
out_file.write(filter)
out_file.close()
print(filter)
# Standard boilerplate to call the main() function to begin
# the program.
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Estimate filtering parameters from VCF file")
parser.add_argument(
"-vcf",
"--vcf",
help="VCF file. All variants (PASS or not PASS), are used! Multiple --vcf arguments are possible",
required=True, action="append")
parser.add_argument(
"-f",
"--filter_file",
help="Output file with the filter line",
required=True)
parser.add_argument(
"-p",
"--plot",
help="Plot file prefix",
default=None,
required=True
)
args = parser.parse_args()
main(args)
|
{"hexsha": "d9e235b01dce536bc14fa34e7c4794e09dfa533d", "size": 4696, "ext": "py", "lang": "Python", "max_stars_repo_path": "workflow/scripts/estimate_parental_filtering_params.py", "max_stars_repo_name": "ibebio/vc-gatk4-snakemake", "max_stars_repo_head_hexsha": "154074c72c847fb251cf5f8fd878b42053c07c92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-18T13:00:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-18T13:00:21.000Z", "max_issues_repo_path": "workflow/scripts/estimate_parental_filtering_params.py", "max_issues_repo_name": "ibebio/vc-gatk4-snakemake", "max_issues_repo_head_hexsha": "154074c72c847fb251cf5f8fd878b42053c07c92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "workflow/scripts/estimate_parental_filtering_params.py", "max_forks_repo_name": "ibebio/vc-gatk4-snakemake", "max_forks_repo_head_hexsha": "154074c72c847fb251cf5f8fd878b42053c07c92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0289855072, "max_line_length": 214, "alphanum_fraction": 0.5779386712, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1312}
|
import torch
from torch.optim import Optimizer
from typing import Callable, Union, List
import numpy as np
import matplotlib.pyplot as plt
__all__ = ['get_lr', 'change_lr', 'plot_schedule', 'save_optimizer', 'load_optimizer']
def get_lr(optim: Optimizer) -> List[float]:
return [param_group['lr'] for param_group in optim.param_groups]
# UNTESTED
def change_lr(optim: Optimizer, lrs: Union[float, List[float]]):
"""
Change the learning rate of an optimizer
:param optim: optimizer
:param lrs: target learning rate
"""
if isinstance(lrs, list):
assert len(lrs) == len(optim.param_groups)
else:
lrs = [lrs for _ in range(len(optim.param_groups))]
for param_group, lr in zip(optim.param_groups, lrs):
param_group['lr'] = lr
def plot_schedule(schedule_fn: Callable[[int], float], iterations: int=30):
"""
Plot the learning rate schedule function
:param schedule_fn: a function that returns a learning rate given an iteration
:param iterations: maximum number of iterations (or epochs)
:return:
"""
iterations = np.arange(iterations)
lrs = np.array(list(map(schedule_fn, iterations)))
plt.plot(iterations, lrs)
plt.xlabel("Iterations")
plt.ylabel("Learning Rate")
plt.show()
# UNTESTED
def save_optimizer(optimizer: Optimizer, path: str):
"""
Save optimizer state for resuming training
:param optimizer:
:param path:
"""
torch.save(optimizer.state_dict(), path)
print("Optimizer state saved.")
# UNTESTED
def load_optimizer(optimizer: Optimizer, path: str):
"""
Load optimizer state for resuming training
:param optimizer:
:param path:
"""
optimizer.load_state_dict(torch.load(path))
print("Optimizer state loaded.")
|
{"hexsha": "04009bf2a126efedb10850acff4f499e18c2d40f", "size": 1794, "ext": "py", "lang": "Python", "max_stars_repo_path": "nntoolbox/optim/utils.py", "max_stars_repo_name": "nhatsmrt/nn-toolbox", "max_stars_repo_head_hexsha": "689b9924d3c88a433f8f350b89c13a878ac7d7c3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-07-11T15:57:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-08T13:52:45.000Z", "max_issues_repo_path": "nntoolbox/optim/utils.py", "max_issues_repo_name": "nhatsmrt/nn-toolbox", "max_issues_repo_head_hexsha": "689b9924d3c88a433f8f350b89c13a878ac7d7c3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-18T22:21:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-18T22:21:57.000Z", "max_forks_repo_path": "nntoolbox/optim/utils.py", "max_forks_repo_name": "nhatsmrt/nn-toolbox", "max_forks_repo_head_hexsha": "689b9924d3c88a433f8f350b89c13a878ac7d7c3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-07T10:07:09.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-07T10:07:09.000Z", "avg_line_length": 25.2676056338, "max_line_length": 86, "alphanum_fraction": 0.6845039019, "include": true, "reason": "import numpy", "num_tokens": 428}
|
# Para visualizar gráficos en la terminal interactiva
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'ipympl')
"""
# Actividad en clases: Series de Fourier
Objetivos:
- Componer señales periodicas en base a sinusoides
- Visualizar señales con matplotlib
- Escuchar las señales con IPython
"""
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display
from utils import Audio
from typing import Tuple
def create_tone(freq: float=220., Fs: int=44100, duration=0.5, volume:float=0.8) -> Tuple[np.ndarray, np.ndarray]:
time = np.arange(0, duration, step=1/Fs)
waveform = np.cos(2.0*np.pi*freq*time)
return volume*waveform/np.amax(np.absolute(waveform))
def plot_signal(signal: np.ndarray, Fs: int=44100) -> None:
fig, ax = plt.subplots(figsize=(6, 3), tight_layout=True)
time = np.linspace(0, len(signal)/Fs, num=len(signal))
ax.plot(time, signal)
#ax.set_xlim([0, 0.05])
ax.set_ylim([-1, 1])
ax.set_ylabel('Amplitud')
ax.set_xlabel('Segundos')
def listen_signal(signal: np.ndarray, Fs: int=44100) -> None:
display(Audio(signal, rate=Fs))
signal = create_tone(freq=440)
plot_signal(signal)
listen_signal(signal)
"""
Genere tonos puros con distinta frecuencia, visualice y escuche las señales
Implemente una función que genere una señal triangular en base a su serie de Fourier. La función debe recibir como argumentos
- la frecuencia fundamental
- la frecuencia de muestreo
- el volumen
- el número de armónicos
Visualize y escuche la señal generada. Discuta con sus compañeros sobre los efectos de modificar los distintos parámetros
"""
"""
- Bonus: Canciones y efectos sonoros
"""
do, re, mi, fa, sol, la, si = 261.63, 293.66, 329.63, 349.23, 392.00, 440.00, 493.88
song = [do, re, mi, fa, sol, la, si]
listen_signal(np.concatenate([create_tone(freq=freq) for freq in song]))
|
{"hexsha": "a93385068d32710faa1ef462b518bf1eaac50ed5", "size": 1890, "ext": "py", "lang": "Python", "max_stars_repo_path": "class-activities/unit1/sinewaves.py", "max_stars_repo_name": "phuijse/UACH-INFO183", "max_stars_repo_head_hexsha": "0e1b6bef0bd80cda2753bd11e62016268f2de638", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-08-27T23:53:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-16T23:31:05.000Z", "max_issues_repo_path": "class-activities/unit1/sinewaves.py", "max_issues_repo_name": "phuijse/UACH-INFO183", "max_issues_repo_head_hexsha": "0e1b6bef0bd80cda2753bd11e62016268f2de638", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "class-activities/unit1/sinewaves.py", "max_forks_repo_name": "phuijse/UACH-INFO183", "max_forks_repo_head_hexsha": "0e1b6bef0bd80cda2753bd11e62016268f2de638", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-01-04T17:43:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-07T16:07:18.000Z", "avg_line_length": 30.4838709677, "max_line_length": 125, "alphanum_fraction": 0.7317460317, "include": true, "reason": "import numpy", "num_tokens": 549}
|
"""Basic definitions for the transforms module."""
import numpy as np
import torch
from torch import nn
import nflows.utils.typechecks as check
class InverseNotAvailable(Exception):
"""Exception to be thrown when a transform does not have an inverse."""
pass
class InputOutsideDomain(Exception):
"""Exception to be thrown when the input to a transform is not within its domain."""
pass
class Transform(nn.Module):
"""Base class for all transform objects."""
def forward(self, inputs, context=None):
raise NotImplementedError()
def inverse(self, inputs, context=None):
raise InverseNotAvailable()
class CompositeTransform(Transform):
"""Composes several transforms into one, in the order they are given."""
def __init__(self, transforms):
"""Constructor.
Args:
transforms: an iterable of `Transform` objects.
"""
super().__init__()
self._transforms = nn.ModuleList(transforms)
@staticmethod
def _cascade(inputs, funcs, context):
batch_size = inputs.shape[0]
outputs = inputs
total_logabsdet = inputs.new_zeros(batch_size)
for func in funcs:
outputs, logabsdet = func(outputs, context)
total_logabsdet += logabsdet
return outputs, total_logabsdet
def forward(self, inputs, context=None):
funcs = self._transforms
return self._cascade(inputs, funcs, context)
def inverse(self, inputs, context=None):
funcs = (transform.inverse for transform in self._transforms[::-1])
return self._cascade(inputs, funcs, context)
class MultiscaleCompositeTransform(Transform):
"""A multiscale composite transform as described in the RealNVP paper.
Splits the outputs along the given dimension after every transform, outputs one half, and
passes the other half to further transforms. No splitting is done before the last transform.
Note: Inputs could be of arbitrary shape, but outputs will always be flattened.
Reference:
> L. Dinh et al., Density estimation using Real NVP, ICLR 2017.
"""
def __init__(self, num_transforms, split_dim=1):
"""Constructor.
Args:
num_transforms: int, total number of transforms to be added.
split_dim: dimension along which to split.
"""
if not check.is_positive_int(split_dim):
raise TypeError("Split dimension must be a positive integer.")
super().__init__()
self._transforms = nn.ModuleList()
self._output_shapes = []
self._num_transforms = num_transforms
self._split_dim = split_dim
def add_transform(self, transform, transform_output_shape):
"""Add a transform. Must be called exactly `num_transforms` times.
Parameters:
transform: the `Transform` object to be added.
transform_output_shape: tuple, shape of transform's outputs, excl. the first batch
dimension.
Returns:
Input shape for the next transform, or None if adding the last transform.
"""
assert len(self._transforms) <= self._num_transforms
if len(self._transforms) == self._num_transforms:
raise RuntimeError(
"Adding more than {} transforms is not allowed.".format(
self._num_transforms
)
)
if (self._split_dim - 1) >= len(transform_output_shape):
raise ValueError("No split_dim in output shape")
if transform_output_shape[self._split_dim - 1] < 2:
raise ValueError(
"Size of dimension {} must be at least 2.".format(self._split_dim)
)
self._transforms.append(transform)
if len(self._transforms) != self._num_transforms: # Unless last transform.
output_shape = list(transform_output_shape)
output_shape[self._split_dim - 1] = (
output_shape[self._split_dim - 1] + 1
) // 2
output_shape = tuple(output_shape)
hidden_shape = list(transform_output_shape)
hidden_shape[self._split_dim - 1] = hidden_shape[self._split_dim - 1] // 2
hidden_shape = tuple(hidden_shape)
else:
# No splitting for last transform.
output_shape = transform_output_shape
hidden_shape = None
self._output_shapes.append(output_shape)
return hidden_shape
def forward(self, inputs, context=None):
if self._split_dim >= inputs.dim():
raise ValueError("No split_dim in inputs.")
if self._num_transforms != len(self._transforms):
raise RuntimeError(
"Expecting exactly {} transform(s) "
"to be added.".format(self._num_transforms)
)
batch_size = inputs.shape[0]
def cascade():
hiddens = inputs
for i, transform in enumerate(self._transforms[:-1]):
transform_outputs, logabsdet = transform(hiddens, context)
outputs, hiddens = torch.chunk(
transform_outputs, chunks=2, dim=self._split_dim
)
assert outputs.shape[1:] == self._output_shapes[i]
yield outputs, logabsdet
# Don't do the splitting for the last transform.
outputs, logabsdet = self._transforms[-1](hiddens, context)
yield outputs, logabsdet
all_outputs = []
total_logabsdet = inputs.new_zeros(batch_size)
for outputs, logabsdet in cascade():
all_outputs.append(outputs.reshape(batch_size, -1))
total_logabsdet += logabsdet
all_outputs = torch.cat(all_outputs, dim=-1)
return all_outputs, total_logabsdet
def inverse(self, inputs, context=None):
if inputs.dim() != 2:
raise ValueError("Expecting NxD inputs")
if self._num_transforms != len(self._transforms):
raise RuntimeError(
"Expecting exactly {} transform(s) "
"to be added.".format(self._num_transforms)
)
batch_size = inputs.shape[0]
rev_inv_transforms = [transform.inverse for transform in self._transforms[::-1]]
split_indices = np.cumsum([np.prod(shape) for shape in self._output_shapes])
split_indices = np.insert(split_indices, 0, 0)
split_inputs = []
for i in range(len(self._output_shapes)):
flat_input = inputs[:, split_indices[i] : split_indices[i + 1]]
split_inputs.append(flat_input.view(-1, *self._output_shapes[i]))
rev_split_inputs = split_inputs[::-1]
total_logabsdet = inputs.new_zeros(batch_size)
# We don't do the splitting for the last (here first) transform.
hiddens, logabsdet = rev_inv_transforms[0](rev_split_inputs[0], context)
total_logabsdet += logabsdet
for inv_transform, input_chunk in zip(
rev_inv_transforms[1:], rev_split_inputs[1:]
):
tmp_concat_inputs = torch.cat([input_chunk, hiddens], dim=self._split_dim)
hiddens, logabsdet = inv_transform(tmp_concat_inputs, context)
total_logabsdet += logabsdet
outputs = hiddens
return outputs, total_logabsdet
class InverseTransform(Transform):
"""Creates a transform that is the inverse of a given transform."""
def __init__(self, transform):
"""Constructor.
Args:
transform: An object of type `Transform`.
"""
super().__init__()
self._transform = transform
def forward(self, inputs, context=None):
return self._transform.inverse(inputs, context)
def inverse(self, inputs, context=None):
return self._transform(inputs, context)
class LogTransform(Transform):
def __init__(self):
super().__init__()
def forward(self, inputs, context=None):
print('forward:', inputs)
return inputs, inputs.new_zeros(inputs.shape[0])
def inverse(self, inputs, context=None):
print('inverse:', inputs)
return inputs, inputs.new_zeros(inputs.shape[0])
|
{"hexsha": "860b3494d05c106795e3d0b83d66052581c998ca", "size": 8253, "ext": "py", "lang": "Python", "max_stars_repo_path": "nflows/transforms/base.py", "max_stars_repo_name": "mshakerinava/nflows", "max_stars_repo_head_hexsha": "d86cb1478ff36ffd3e005e980d92a3b0bbffbf02", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nflows/transforms/base.py", "max_issues_repo_name": "mshakerinava/nflows", "max_issues_repo_head_hexsha": "d86cb1478ff36ffd3e005e980d92a3b0bbffbf02", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nflows/transforms/base.py", "max_forks_repo_name": "mshakerinava/nflows", "max_forks_repo_head_hexsha": "d86cb1478ff36ffd3e005e980d92a3b0bbffbf02", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6857142857, "max_line_length": 96, "alphanum_fraction": 0.6304374167, "include": true, "reason": "import numpy", "num_tokens": 1726}
|
[STATEMENT]
lemma infer_v_pair2I:
fixes v\<^sub>1::v and v\<^sub>2::v
assumes "\<Theta>; \<B>; \<Gamma> \<turnstile> v\<^sub>1 \<Rightarrow> \<tau>\<^sub>1" and "\<Theta>; \<B>; \<Gamma> \<turnstile> v\<^sub>2 \<Rightarrow> \<tau>\<^sub>2"
shows "\<exists>\<tau>. \<Theta>; \<B>; \<Gamma> \<turnstile> V_pair v\<^sub>1 v\<^sub>2 \<Rightarrow> \<tau> \<and> b_of \<tau> = B_pair (b_of \<tau>\<^sub>1) (b_of \<tau>\<^sub>2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
obtain z1 and b1 and c1 and z2 and b2 and c2 where zbc: "\<tau>\<^sub>1 = (\<lbrace> z1 : b1 | c1 \<rbrace>) \<and> \<tau>\<^sub>2 = (\<lbrace> z2 : b2 | c2 \<rbrace>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>z1 b1 c1 z2 b2 c2. \<tau>\<^sub>1 = \<lbrace> z1 : b1 | c1 \<rbrace> \<and> \<tau>\<^sub>2 = \<lbrace> z2 : b2 | c2 \<rbrace> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using \<tau>.exhaust
[PROOF STATE]
proof (prove)
using this:
(\<And>x1 x2 x3. ?y = \<lbrace> x1 : x2 | x3 \<rbrace> \<Longrightarrow> ?P) \<Longrightarrow> ?P
goal (1 subgoal):
1. (\<And>z1 b1 c1 z2 b2 c2. \<tau>\<^sub>1 = \<lbrace> z1 : b1 | c1 \<rbrace> \<and> \<tau>\<^sub>2 = \<lbrace> z2 : b2 | c2 \<rbrace> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by meson
[PROOF STATE]
proof (state)
this:
\<tau>\<^sub>1 = \<lbrace> z1 : b1 | c1 \<rbrace> \<and> \<tau>\<^sub>2 = \<lbrace> z2 : b2 | c2 \<rbrace>
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
obtain z::x where "atom z \<sharp> ( v\<^sub>1, v\<^sub>2,\<Theta>, \<B>,\<Gamma>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>z. atom z \<sharp> (v\<^sub>1, v\<^sub>2, \<Theta>, \<B>, \<Gamma>) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using obtain_fresh
[PROOF STATE]
proof (prove)
using this:
(\<And>a. atom a \<sharp> ?x \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>z. atom z \<sharp> (v\<^sub>1, v\<^sub>2, \<Theta>, \<B>, \<Gamma>) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
atom z \<sharp> (v\<^sub>1, v\<^sub>2, \<Theta>, \<B>, \<Gamma>)
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
hence "atom z \<sharp> ( v\<^sub>1, v\<^sub>2) \<and> atom z \<sharp> (\<Theta>, \<B>,\<Gamma>)"
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> (v\<^sub>1, v\<^sub>2, \<Theta>, \<B>, \<Gamma>)
goal (1 subgoal):
1. atom z \<sharp> (v\<^sub>1, v\<^sub>2) \<and> atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
[PROOF STEP]
using fresh_prodN
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> (v\<^sub>1, v\<^sub>2, \<Theta>, \<B>, \<Gamma>)
?a \<sharp> (?x, ?y) = (?a \<sharp> ?x \<and> ?a \<sharp> ?y)
?x \<sharp> (?a, ?b, ?c) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c)
?x \<sharp> (?a, ?b, ?c, ?d) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d)
?x \<sharp> (?a, ?b, ?c, ?d, ?e) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d \<and> ?x \<sharp> ?e)
?x \<sharp> (?a, ?b, ?c, ?d, ?e, ?f) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d \<and> ?x \<sharp> ?e \<and> ?x \<sharp> ?f)
?x \<sharp> (?a, ?b, ?c, ?d, ?e, ?f, ?g) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d \<and> ?x \<sharp> ?e \<and> ?x \<sharp> ?f \<and> ?x \<sharp> ?g)
?x \<sharp> (?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d \<and> ?x \<sharp> ?e \<and> ?x \<sharp> ?f \<and> ?x \<sharp> ?g \<and> ?x \<sharp> ?h)
?x \<sharp> (?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h, ?i) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d \<and> ?x \<sharp> ?e \<and> ?x \<sharp> ?f \<and> ?x \<sharp> ?g \<and> ?x \<sharp> ?h \<and> ?x \<sharp> ?i)
?x \<sharp> (?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h, ?i, ?j) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d \<and> ?x \<sharp> ?e \<and> ?x \<sharp> ?f \<and> ?x \<sharp> ?g \<and> ?x \<sharp> ?h \<and> ?x \<sharp> ?i \<and> ?x \<sharp> ?j)
?x \<sharp> (?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h, ?i, ?j, ?k, ?l) = (?x \<sharp> ?a \<and> ?x \<sharp> ?b \<and> ?x \<sharp> ?c \<and> ?x \<sharp> ?d \<and> ?x \<sharp> ?e \<and> ?x \<sharp> ?f \<and> ?x \<sharp> ?g \<and> ?x \<sharp> ?h \<and> ?x \<sharp> ?i \<and> ?x \<sharp> ?j \<and> ?x \<sharp> ?k \<and> ?x \<sharp> ?l)
goal (1 subgoal):
1. atom z \<sharp> (v\<^sub>1, v\<^sub>2) \<and> atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
atom z \<sharp> (v\<^sub>1, v\<^sub>2) \<and> atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
hence " \<Theta>; \<B>; \<Gamma> \<turnstile> V_pair v\<^sub>1 v\<^sub>2 \<Rightarrow> \<lbrace> z : [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b | CE_val (V_var z) == CE_val (V_pair v\<^sub>1 v\<^sub>2) \<rbrace>"
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> (v\<^sub>1, v\<^sub>2) \<and> atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
using assms infer_v_pairI zbc
[PROOF STATE]
proof (prove)
using this:
atom z \<sharp> (v\<^sub>1, v\<^sub>2) \<and> atom z \<sharp> (\<Theta>, \<B>, \<Gamma>)
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v\<^sub>1 \<Rightarrow> \<tau>\<^sub>1
\<Theta> ; \<B> ; \<Gamma> \<turnstile> v\<^sub>2 \<Rightarrow> \<tau>\<^sub>2
\<lbrakk>atom ?z \<sharp> (?v1.0, ?v2.0); atom ?z \<sharp> (?\<Theta>, ?\<B>, ?\<Gamma>); ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v1.0 \<Rightarrow> ?t1.0; ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> ?v2.0 \<Rightarrow> ?t2.0\<rbrakk> \<Longrightarrow> ?\<Theta> ; ?\<B> ; ?\<Gamma> \<turnstile> [ ?v1.0 , ?v2.0 ]\<^sup>v \<Rightarrow> \<lbrace> ?z : [ b_of ?t1.0 , b_of ?t2.0 ]\<^sup>b | [ [ ?z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ ?v1.0 , ?v2.0 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
\<tau>\<^sub>1 = \<lbrace> z1 : b1 | c1 \<rbrace> \<and> \<tau>\<^sub>2 = \<lbrace> z2 : b2 | c2 \<rbrace>
goal (1 subgoal):
1. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
obtain \<tau> where "\<tau> = (\<lbrace> z : B_pair b1 b2 | CE_val (V_var z) == CE_val (V_pair v\<^sub>1 v\<^sub>2) \<rbrace>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>\<tau>. \<tau> = \<lbrace> z : [ b1 , b2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<tau> = \<lbrace> z : [ b1 , b2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<tau> = \<lbrace> z : [ b1 , b2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
hence "b_of \<tau> = B_pair (b_of \<tau>\<^sub>1) (b_of \<tau>\<^sub>2)"
[PROOF STATE]
proof (prove)
using this:
\<tau> = \<lbrace> z : [ b1 , b2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
goal (1 subgoal):
1. b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
using b_of.simps zbc
[PROOF STATE]
proof (prove)
using this:
\<tau> = \<lbrace> z : [ b1 , b2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
b_of \<lbrace> ?z : ?b | ?c \<rbrace> = ?b
\<tau>\<^sub>1 = \<lbrace> z1 : b1 | c1 \<rbrace> \<and> \<tau>\<^sub>2 = \<lbrace> z2 : b2 | c2 \<rbrace>
goal (1 subgoal):
1. b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
by presburger
[PROOF STATE]
proof (state)
this:
b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
\<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
\<tau> = \<lbrace> z : [ b1 , b2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
\<tau> = \<lbrace> z : [ b1 , b2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
using b_of.simps
[PROOF STATE]
proof (prove)
using this:
\<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<lbrace> z : [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
\<tau> = \<lbrace> z : [ b1 , b2 ]\<^sup>b | [ [ z ]\<^sup>v ]\<^sup>c\<^sup>e == [ [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v ]\<^sup>c\<^sup>e \<rbrace>
b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
b_of \<lbrace> ?z : ?b | ?c \<rbrace> = ?b
goal (1 subgoal):
1. \<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
\<exists>\<tau>. \<Theta> ; \<B> ; \<Gamma> \<turnstile> [ v\<^sub>1 , v\<^sub>2 ]\<^sup>v \<Rightarrow> \<tau> \<and> b_of \<tau> = [ b_of \<tau>\<^sub>1 , b_of \<tau>\<^sub>2 ]\<^sup>b
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6191, "file": "MiniSail_TypingL", "length": 25}
|
[STATEMENT]
lemma fbd_ifbd_inv2_iff: "((bd\<^sub>\<F> \<circ> bd\<^sup>-\<^sub>\<F>) \<phi> = \<phi>) = (Sup_pres \<phi>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((bd\<^sub>\<F> \<circ> bd\<^sup>-\<^sub>\<F>) \<phi> = \<phi>) = Sup_pres \<phi>
[PROOF STEP]
using fbd_ifbd_inv2 fbd_ifbd_inv2_inv
[PROOF STATE]
proof (prove)
using this:
Sup_pres ?\<phi> \<Longrightarrow> (bd\<^sub>\<F> \<circ> bd\<^sup>-\<^sub>\<F>) ?\<phi> = ?\<phi>
(bd\<^sub>\<F> \<circ> bd\<^sup>-\<^sub>\<F>) ?\<phi> = ?\<phi> \<Longrightarrow> Sup_pres ?\<phi>
goal (1 subgoal):
1. ((bd\<^sub>\<F> \<circ> bd\<^sup>-\<^sub>\<F>) \<phi> = \<phi>) = Sup_pres \<phi>
[PROOF STEP]
by force
|
{"llama_tokens": 306, "file": "Transformer_Semantics_Kleisli_Transformers", "length": 2}
|
from vg.compat import v1 as vg
def find_rigid_transform(a, b, compute_scale=False, fail_in_degenerate_cases=True):
"""
Args:
a: a Nx3 array of vertex locations
b: a Nx3 array of vertex locations
a and b are in correspondence -- we find a transformation such that the first
point in a will be moved to the location of the first point in b, etc.
Returns: (R,T) such that a.dot(R) + T ~= b
R is a 3x3 rotation matrix
T is a 1x3 translation vector
Based on Arun et al, "Least-squares fitting of two 3-D point sets," 1987.
See also Eggert et al, "Estimating 3-D rigid body transformations: a
comparison of four major algorithms," 1997.
If compute_scale is True, also computes and returns: (s, R,T) such that s*(R.dot(a))+T ~= b
In noisy cases, when there is a reflection, this algorithm can fail. In those cases the
right thing to do is to try a less noise sensitive algorithm like RANSAC. But if you want
a result anyway, even knowing that it might not be right, set fail_in_degenerate_cases=True.
"""
import numpy as np
k = vg.shape.check(locals(), "a", (-1, 3))
vg.shape.check(locals(), "b", (k, 3))
a = a.T
b = b.T
a_mean = np.mean(a, axis=1)
b_mean = np.mean(b, axis=1)
a_centered = a - a_mean.reshape(-1, 1)
b_centered = b - b_mean.reshape(-1, 1)
c = a_centered.dot(b_centered.T)
u, s, v = np.linalg.svd(c, full_matrices=False)
v = v.T
R = v.dot(u.T)
if np.linalg.det(R) < 0:
if (
np.any(s == 0) or not fail_in_degenerate_cases
): # This is only valid in the noiseless case; see the paper
v[:, 2] = -v[:, 2]
R = v.dot(u.T)
else:
raise ValueError(
"find_rigid_transform found a reflection that it cannot recover from. Try RANSAC or something..."
)
if compute_scale:
scale = np.sum(s) / (np.linalg.norm(a_centered) ** 2)
T = (b_mean - scale * (R.dot(a_mean))).reshape(1, 3)
return scale, R.T, T
else:
T = (b_mean - R.dot(a_mean)).reshape(1, 3)
return R.T, T
def find_rigid_rotation(a, b, allow_scaling=False):
"""
Args:
a: a Nx3 array of vertex locations
b: a Nx3 array of vertex locations
Returns: R such that a.dot(R) ~= b
See link: http://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
"""
import numpy as np
k = vg.shape.check(locals(), "a", (-1, 3))
vg.shape.check(locals(), "b", (k, 3))
a = a.T
b = b.T
if a.size == 3:
cx = np.cross(a.ravel(), b.ravel())
a = np.hstack([a.reshape(-1, 1), cx.reshape(-1, 1)])
b = np.hstack([b.reshape(-1, 1), cx.reshape(-1, 1)])
c = a.dot(b.T)
u, _, v = np.linalg.svd(c, full_matrices=False)
v = v.T
R = v.dot(u.T)
if np.linalg.det(R) < 0:
v[:, 2] = -v[:, 2]
R = v.dot(u.T)
if allow_scaling:
scalefactor = np.linalg.norm(b) / np.linalg.norm(a)
R = R * scalefactor
return R.T
|
{"hexsha": "81bf573d53978fb85b030717097383f3ec4aafb9", "size": 3082, "ext": "py", "lang": "Python", "max_stars_repo_path": "entente/rigid_transform.py", "max_stars_repo_name": "metabolize/entente", "max_stars_repo_head_hexsha": "c1b16bb7c7fb83b31db4e8ddaf65f1504374fe7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-05-09T17:11:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T20:27:39.000Z", "max_issues_repo_path": "entente/rigid_transform.py", "max_issues_repo_name": "metabolize/entente", "max_issues_repo_head_hexsha": "c1b16bb7c7fb83b31db4e8ddaf65f1504374fe7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 94, "max_issues_repo_issues_event_min_datetime": "2018-10-02T15:45:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-14T14:20:38.000Z", "max_forks_repo_path": "entente/rigid_transform.py", "max_forks_repo_name": "metabolize/entente", "max_forks_repo_head_hexsha": "c1b16bb7c7fb83b31db4e8ddaf65f1504374fe7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-01-21T00:59:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T20:26:28.000Z", "avg_line_length": 30.2156862745, "max_line_length": 113, "alphanum_fraction": 0.5833874108, "include": true, "reason": "import numpy", "num_tokens": 914}
|
import unittest
import numpy as np
try:
import bokeh
from openmdao.visualization.meta_model_viewer.meta_model_visualization import MetaModelVisualization
except ImportError:
bokeh = None
import openmdao.api as om
@unittest.skipUnless(bokeh, "Bokeh is required")
class UnstructuredMetaModelCompTests(unittest.TestCase):
def test_missing_training_data_in_parameter(self):
# Model
interp = om.MetaModelUnStructuredComp()
# Training Data
x_train = np.linspace(0, 10, 20)
y_train = np.linspace(0, 20, 20)
# Inputs
interp.add_input('simple_x', 0., training_data=x_train)
interp.add_input('sin_x', 0.)
#Outputs
interp.add_output('cos_x', 0., training_data=.5*np.cos(y_train))
# Surrogate Model
interp.options['default_surrogate'] = om.ResponseSurface()
prob = om.Problem()
prob.model.add_subsystem('interp', interp)
prob.setup()
with self.assertRaises(Exception) as context:
viz = MetaModelVisualization(interp)
msg = "No training data present for one or more parameters"
self.assertTrue(msg in str(context.exception))
def test_single_input_parameter(self):
# Model
interp = om.MetaModelUnStructuredComp()
# Training Data
x_train = np.linspace(0, 10, 20)
y_train = np.linspace(0, 20, 20)
# Inputs
interp.add_input('simple_x', 0., training_data=x_train)
#Outputs
interp.add_output('cos_x', 0., training_data=.5*np.cos(y_train))
# Surrogate Model
interp.options['default_surrogate'] = om.ResponseSurface()
prob = om.Problem()
prob.model.add_subsystem('interp', interp)
prob.setup()
with self.assertRaises(Exception) as context:
viz = MetaModelVisualization(interp)
msg = 'Must have more than one input value'
self.assertTrue(msg in str(context.exception))
def test_training_point_array_width(self):
# Model
interp = om.MetaModelUnStructuredComp()
# Training Data
x_train = np.linspace(0, 10, 20)
y_train = np.linspace(0, 20, 20)
# Inputs
interp.add_input('x', 0., training_data=x_train)
interp.add_input('y', 0., training_data=x_train)
#Outputs
interp.add_output('cos_x', 0., training_data=.5*np.cos(y_train))
# Surrogate Model
interp.options['default_surrogate'] = om.ResponseSurface()
prob = om.Problem()
prob.model.add_subsystem('interp', interp)
prob.setup()
viz = MetaModelVisualization(interp)
training_points_output = viz._unstructured_training_points()
self.assertTrue(training_points_output.shape[1] == 2)
def test_training_point_array_for_nan_values(self):
# Model
interp = om.MetaModelUnStructuredComp()
# Training Data
x_train = np.linspace(0, 10, 20)
y_train = np.linspace(0, 20, 20)
# Inputs
interp.add_input('x', 0., training_data=x_train)
interp.add_input('y', 0., training_data=x_train)
#Outputs
interp.add_output('cos_x', 0., training_data=.5*np.cos(y_train))
# Surrogate Model
interp.options['default_surrogate'] = om.ResponseSurface()
prob = om.Problem()
prob.model.add_subsystem('interp', interp)
prob.setup()
viz = MetaModelVisualization(interp)
training_points_output = viz._unstructured_training_points()
for i in range(0, 2):
self.assertFalse(np.any(np.isnan(training_points_output[:, i])))
def test_make_predictions(self):
# Model
interp = om.MetaModelUnStructuredComp()
# Training Data
x_train = np.linspace(0, 10, 20)
y_train = np.linspace(10, 20, 20)
# Inputs
interp.add_input('simple_x', 0., training_data=x_train)
interp.add_input('sin_x', 0., training_data=x_train)
#Outputs
interp.add_output('cos_x', 0., training_data=.5*np.cos(y_train))
# Surrogate Model
interp.options['default_surrogate'] = om.ResponseSurface()
prob = om.Problem()
prob.model.add_subsystem('interp', interp)
prob.setup()
viz = MetaModelVisualization(interp)
resolution = 50
data = dict({'simple_x': np.array([np.random.rand(resolution**2, 1)]),
'sin_x': np.array([np.random.rand(resolution**2, 1)])})
pred_array = viz._make_predictions(data)
self.assertTrue(pred_array.shape == (resolution**2, 1))
def test_working_response_surface(self):
# Model
interp = om.MetaModelUnStructuredComp()
# Training Data
x_train1 = np.linspace(0, 10, 20)
x_train2 = np.linspace(0, 20, 20)
x_train3 = np.linspace(0, 30, 20)
x_train4 = np.linspace(0, 40, 20)
y_train = np.linspace(10, 20, 20)
# Inputs
interp.add_input('input_1', 0., training_data=x_train1)
interp.add_input('input_2', 0., training_data=x_train2)
interp.add_input('input_3', 0., training_data=x_train3)
interp.add_input('input_4', 0., training_data=x_train4)
# Outputs
interp.add_output('output_1', 0., training_data=.5 * np.cos(y_train))
interp.add_output('output_2', 0., training_data=.5 * np.sin(y_train))
# Surrogate Model
interp.options['default_surrogate'] = om.ResponseSurface()
prob = om.Problem()
prob.model.add_subsystem('interp', interp)
prob.setup()
prob.final_setup()
def test_not_top_level_prob(self):
# Model
interp = om.MetaModelUnStructuredComp()
# Training Data
x_train1 = np.linspace(0, 10, 20)
x_train2 = np.linspace(0, 20, 20)
x_train3 = np.linspace(0, 30, 20)
x_train4 = np.linspace(0, 40, 20)
y_train = np.linspace(10, 20, 20)
# Inputs
interp.add_input('input_1', 0., training_data=x_train1)
interp.add_input('input_2', 0., training_data=x_train2)
interp.add_input('input_3', 0., training_data=x_train3)
interp.add_input('input_4', 0., training_data=x_train4)
# Outputs
interp.add_output('output_1', 0., training_data=.5 * np.cos(y_train))
interp.add_output('output_2', 0., training_data=.5 * np.sin(y_train))
# Surrogate Model
interp.options['default_surrogate'] = om.ResponseSurface()
prob = om.Problem(model=interp)
prob.setup()
prob.final_setup()
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "55b1f90f88a32f8fd39727176d800bcf076dccc7", "size": 6696, "ext": "py", "lang": "Python", "max_stars_repo_path": "openmdao/visualization/meta_model_viewer/tests/test_unstruct.py", "max_stars_repo_name": "bollwyvl/OpenMDAO", "max_stars_repo_head_hexsha": "4d7a31b2bb39674e2be0d6a13cbe22de3f5353af", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openmdao/visualization/meta_model_viewer/tests/test_unstruct.py", "max_issues_repo_name": "bollwyvl/OpenMDAO", "max_issues_repo_head_hexsha": "4d7a31b2bb39674e2be0d6a13cbe22de3f5353af", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openmdao/visualization/meta_model_viewer/tests/test_unstruct.py", "max_forks_repo_name": "bollwyvl/OpenMDAO", "max_forks_repo_head_hexsha": "4d7a31b2bb39674e2be0d6a13cbe22de3f5353af", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1621621622, "max_line_length": 104, "alphanum_fraction": 0.6252986858, "include": true, "reason": "import numpy", "num_tokens": 1640}
|
# PART 5 – ASSESSMENT OF LOCAL METAL LOSS
# Determine Asessment Applicability
#Determine the assessment applicability
# @doc DesignCodeCriteria
# @doc MaterialToughness
# @doc CyclicService
# @doc Part5ComponentType
print("Begin -- Assessment Applicability and Component Type Checks\n")
creep_range = CreepRangeTemperature("Carbon Steel (UTS ≤ 414MPa (60 ksi))"; design_temperature=100.0, units="nmm-mm-mpa")
design = DesignCodeCriteria("ASME B&PV Code, Section VIII, Division 1")
toughness = MaterialToughness("Certain")
cyclic = CyclicService(100, "Meets Part 14")
x = Part5ComponentType("Cylindrical Vessel", vessel_orientation="horizontal", material="Carbon and Low Alloy Steels", D=24.75,Lss=120.0,H=0.0, NPS=3.0, design_temperature=500.0, units="lbs-in-psi")
part5_applicability = Part5AsessmentApplicability(x,design,toughness,cyclic,creep_range)
# For all assessments - determine the inspection data grid
M1 = [0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300]
M2 = [0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.100, 0.220, 0.280, 0.250, 0.240, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300]
M3 = [0.300, 0.300, 0.300, 0.300, 0.300, 0.215, 0.255, 0.215, 0.145, 0.275, 0.170, 0.240, 0.250, 0.250, 0.280, 0.290, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300]
M4 = [0.300, 0.300, 0.300, 0.300, 0.300, 0.170, 0.270, 0.190, 0.190, 0.285, 0.250, 0.225, 0.275, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300]
M5 = [0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300]
M6 = [0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300]
CTPGrid = hcat(M6,M5,M4,M3,M2,M1) # build in descending order
CTPGrid = rotl90(CTPGrid) # rotate to correct orientation
# Level 1 fit for service
annex2c_tmin_category = "Straight Pipes Subject To Internal Pressure" # ["Cylindrical Shell","Spherical Shell","Hemispherical Head","Elliptical Head","Torispherical Head","Conical Shell","Toriconical Head","Conical Transition","Nozzles Connections in Shells",
# "Junction Reinforcement Requirements at Conical Transitions","Tubesheets","Flat head to cylinder connections","Bolted Flanges","Straight Pipes Subject To Internal Pressure","Boiler Tubes","Pipe Bends Subject To Internal Pressure",
# "MAWP for External Pressure","Branch Connections","API 650 Storage Tanks"]
equipment_group = "piping" # "vessel", "tank"
flaw_location = "external" # "External","Internal"
metal_loss_categorization = "LTA" # "LTA" or "Groove-Like Flaw"
units = "lbs-in-psi" # "lbs-in-psi" or "nmm-mm-mpa"
tnom = .3 # nominal or furnished thickness of the component adjusted for mill undertolerance as applicable.
trd = .3 # uniform thickness away from the local metal loss location established by thickness measurements at the time of the assessment.
FCAml = 0.0 # Future Corrosion Allowance applied to the region of metal loss.
FCA = 0.0 # Future Corrosion Allowance applied to the region away from the metal loss (see Annex 2C, paragraph 2C.2.8).
LOSS = 0.0 #the amount of uniform metal loss away from the local metal loss location at the time of the assessment.
Do = 3.5 # Outside Diameter
D = Do - 2*(tnom)
P = 1480.0 # internal design pressure.
S = 20000.0 # allowable stress.
E = 1.0 # weld joint efficiency or quality factor from the original construction code, if unknown use 0.7.
MA = 0.0 # mechanical allowances (thread or groove depth); for threaded components, the nominal thread depth (dimension h of ASME B.1.20.1) shall apply.
Yb31 = 0.4 # coefficient from ASME B31 Piping codes used for determining the pipe wall thickness, the coefficient can be determined from the following table that is valid for tmin < Do / 6 Annex 2C .
t = trd # thickness of the shell or pipe adjusted for mill tolerance, LOSS and FCA , or cylinder thickness at a conical transition for a junction reinforcement calculation adjusted for mill tolerance, LOSS and FCA , as applicable.
tsl = 0.0 # supplemental thickness for mechanical loads other than pressure that result in longitudinal stress; this thickness is usually obtained from the results of a weight case in a stress analysis of the piping system (see paragraph 2C.2.7).
spacings = 0.5 # spacings determine by visual inspection to adequately ccategorizse the corrosion -----------+ may add to CTP_Grid function for plotting purposes
# Flaw dimensions
s = 6.0 # longitudinal extent or length of the region of local metal loss based on future corroded thickness,
c = 2.0 # circumferential extent or length of the region of local metal loss (see Figure 5.2 and Figure 5.10), based on future corroded thickness, tc .
Ec = 1.0 # circumferential weld joint efficiency. note if damage on weld see # 2C.2.5 Treatment of Weld and Riveted Joint Efficiency, and Ligament Efficiency
El = 1.0 # longitudinal weld joint efficiency. note if damage on weld see # 2C.2.5 Treatment of Weld and Riveted Joint Efficiency, and Ligament Efficiency
RSFa = 0.9 # remaining strength factor - consult API 579 is go lower than 0.9
# For all assessments determine far enough from structural discontinuity
# Flaw-To-Major Structural Discontinuity Spacing
L1msd = [12.0] # distance to the nearest major structural discontinuity.
L2msd = [12.0] # distance to the nearest major structural discontinuity.
L3msd = [12.0] # distance to the nearest major structural discontinuity.
L4msd = [12.0] # distance to the nearest major structural discontinuity.
L5msd = [12.0] # distance to the nearest major structural discontinuity.
Lmsd = minimum([L1msd,L2msd,L3msd,L4msd,L5msd])
if (Lmsd[1] >= (1.8*(sqrt(D*(trd - LOSS - FCA)))))
print("Satisfied - Flaw is located far enough from structural discontinuity\n")
lmsd_satisfied = 1
else
print("Not satisfied - Flaw is too close to a structural discontinuity - Conduct a level 3 assessment\n")
lmsd_satisfied = 0
end
# Groove Like Flaw dimensions
gl = .05 # length of the Groove-Like Flaw based on future corroded condition.
gw = .4 # width of the Groove-Like Flaw based on future corroded condition.
gr = 0.1 # radius at the base of a Groove-Like Flaw based on future corroded condition.
β = 40.0 # see (Figure 5.4) :: orientation of the groove-like flaw with respect to the longitudinal axis or a parameter to compute an effective fracture toughness for a groove being evaluated as a crack-like flaw, as applicable.
# Perform level 1 assessment
if (part5_applicability[1] == 1 && lmsd_satisfied == 1) # begin level 1 assessment
#let part_5_lta_output = Array{Any,2},
part_5_lta_output = Part5LTALevel1(annex2c_tmin_category; equipment_group=equipment_group, flaw_location=flaw_location, metal_loss_categorization=metal_loss_categorization, units=units, tnom=tnom,
trd=trd, FCA=FCA, FCAml=FCAml, LOSS=LOSS, Do=Do, D=D, P=P, S=S, E=E, MA=MA, Yb31=Yb31, tsl=tsl, spacings=spacings, s=s, c=c, El=El, Ec=Ec, RSFa=RSFa, gl=gl, gw=gw, gr=gr,β=β)
#end # let end
elseif (part5_applicability[1] == 0 && lmsd_satisfied == 0)
print("Level 1 Criteria Not Met - Perform Level 2 or 3 as applicable")
elseif (part5_applicability[1] == 1 && lmsd_satisfied == 0)
print("Level 1 Criteria Not Met - Perform Level 2 or 3 as applicable")
elseif (part5_applicability[1] == 0 && lmsd_satisfied == 1)
print("Level 1 Criteria Not Met - Perform Level 2 or 3 as applicable")
end
#=
# Level 2 Assessment
# STEP 1 – Determine the CTP (see paragraph 5.3.3.2).
# Conducted at the top of the script
# STEP 2 – Determine the wall thickness to be used in the assessment using Equation (5.3) or Equation (5.4), as applicable.
tc = trd - LOSS - FCA # wall thickness away from the damaged area adjusted for LOSS and FCA , as applicable. # eq (5.3)
tc = trd - FCA # wall thickness away from the damaged area adjusted for LOSS and FCA , as applicable. # eq (5.4)
# STEP 3 – Determine the minimum measured thickness, tmm , and the flaw dimensions s and c (see paragraph 5.3.3.2.b).
tmm = CTP_Grid(CTPGrid) # minimum measured thickness determined at the time of the inspection.
# STEP 4 – Determine the remaining thickness ratio, Rt , using Equation (5.5) and the longitudinal flaw length parameter, λ , using Equation (5.6).
Rt = (tmm-FCA) / tc # remaining thickness ratio. # (5.5)
lambda = (1.285*s)/(sqrt(D*tc)) # longitudinal flaw length parameter eq (5.6)
# STEP 5 – Check the limiting flaw size criteria in paragraph 5.4.2.2.e. If all of these requirements are satisfied, then proceed to STEP 6; otherwise, the flaw is not acceptable per the Level 2 Assessment procedure.
x = FlawSizeLimitCriteria("piping","lbs-in-psi")
flaw_size_accept = FlawSizeLevel1Acceptance(x,"piping")
RSFa = .9 # allowable remaining strength factor (see Part 2).
if (Rt < RSFa)
Q = round(1.123*((((1-Rt)/(1-Rt/RSFa))^2-1)^.5),digits=2) # factor used to determine the length for thickness averaging based on an allowable Remaining Strength Factor (see Part 2) and the remaining thickness ratio, Rt (see Table 4.8).
elseif (Rt >= RSFa)
Q = 50.0
end
L = Q*(sqrt(Dml*tc)) # length for thickness averaging along the shell.
# If visual inspection or NDE methods are utilized to quantify the metal loss, an alternative spacing can be used as long as the metal loss on the component can be adequately characterized.
if (flaw_location == "Internal")
Ls = minimum([L,(2*trd)]) # recommended spacing of thickness readings
print("Recommended spacing of thickness readings = ",Ls)
elseif (flaw_location == "External")
print("Can Determine alterante spacing providing component can be adequately characterized - Using Spacing = ", spacings)
end
# Piping
if (part5_applicability[1] == 1) # begin level 1 assessment
print("Begin part 5 - Level 1 assessment - applicability criteria has been met")
end # end piping level 1
=#
|
{"hexsha": "297229c5f0a513de7307302825811ccabd213fd8", "size": 10184, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/part5_local_metal_loss_assessment.jl", "max_stars_repo_name": "JuliaTagBot/FitnessForService.jl", "max_stars_repo_head_hexsha": "530fa8c4764967275220561bcf525291c88d7cd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/part5_local_metal_loss_assessment.jl", "max_issues_repo_name": "JuliaTagBot/FitnessForService.jl", "max_issues_repo_head_hexsha": "530fa8c4764967275220561bcf525291c88d7cd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/part5_local_metal_loss_assessment.jl", "max_forks_repo_name": "JuliaTagBot/FitnessForService.jl", "max_forks_repo_head_hexsha": "530fa8c4764967275220561bcf525291c88d7cd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 72.2269503546, "max_line_length": 263, "alphanum_fraction": 0.7203456402, "num_tokens": 3231}
|
#!/usr/bin/env python3
#
# Author: Yipeng Sun
# License: BSD 2-clause
# Last Change: Thu Jul 29, 2021 at 02:36 AM +0200
import numpy as np
from .io import read_branches
# Find total number of events (unique events) out of total number of candidates.
def extract_uid(ntp, tree, run_branch='runNumber', event_branch='eventNumber',
conditional=None, run_array=None, event_array=None):
if run_array is None or event_array is None:
run, event = read_branches(ntp, tree, (run_branch, event_branch))
else:
run, event = run_array, event_array
if conditional is not None:
run = run[conditional]
event = event[conditional]
run = np.char.mod('%d', run)
event = np.char.mod('%d', event)
run = np.char.add(run, '-')
ids = np.char.add(run, event)
uid, idx, count = np.unique(ids, return_index=True, return_counts=True)
num_of_evt = ids.size
num_of_ids = uid.size
num_of_dupl_ids = uid[count > 1].size
# num_of_evt_w_dupl_id = np.sum(count[count > 1]) - num_of_dupl_ids
num_of_evt_w_dupl_id = num_of_evt - num_of_ids
return uid, idx, num_of_evt, num_of_ids, \
num_of_dupl_ids, num_of_evt_w_dupl_id
def find_common_uid(ntp1, ntp2, tree1, tree2, **kwargs):
uid1, idx1 = extract_uid(ntp1, tree1, **kwargs)[0:2]
uid2, idx2 = extract_uid(ntp2, tree2, **kwargs)[0:2]
uid_comm, uid_comm_idx1, uid_comm_idx2 = np.intersect1d(
uid1, uid2, assume_unique=True, return_indices=True)
return uid_comm, idx1[uid_comm_idx1], idx2[uid_comm_idx2]
def gen_histo(array, bins=200, scale=1.05, data_range=None, **kwargs):
if data_range is None:
data_min = array.min()
data_max = array.max()
data_min = data_min*scale if data_min < 0 else data_min/scale
data_max = data_max/scale if data_max < 0 else data_max*scale
return np.histogram(array, bins, (data_min, data_max), **kwargs)
return np.histogram(array, bins, data_range, **kwargs)
|
{"hexsha": "25c33303e20e088f5f750d531d12b81bc3065476", "size": 2000, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyTuplingUtils/utils.py", "max_stars_repo_name": "umd-lhcb/pyTuplingUtils", "max_stars_repo_head_hexsha": "dd2efe154f1418a70295eabd8919e16ace2785cc", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyTuplingUtils/utils.py", "max_issues_repo_name": "umd-lhcb/pyTuplingUtils", "max_issues_repo_head_hexsha": "dd2efe154f1418a70295eabd8919e16ace2785cc", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-04-20T17:25:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-13T21:05:14.000Z", "max_forks_repo_path": "pyTuplingUtils/utils.py", "max_forks_repo_name": "umd-lhcb/pyTuplingUtils", "max_forks_repo_head_hexsha": "dd2efe154f1418a70295eabd8919e16ace2785cc", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2580645161, "max_line_length": 80, "alphanum_fraction": 0.6785, "include": true, "reason": "import numpy", "num_tokens": 576}
|
import os
import random
import cv2
import torch
import numpy as np
from torch.utils.data import Dataset
from PIL import Image, ImageFile
def read_image(img_path):
"""Keep reading image until succeed.
This can avoid IOError incurred by heavy IO process."""
got_img = False
if not os.path.exists(img_path):
raise IOError("{} does not exist".format(img_path))
while not got_img:
try:
img = Image.open(img_path).convert('RGB')
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
return img
class ImageDataset(Dataset):
def __init__(self, data_root, train_file, transform):
self.data_root = data_root
self.transform = transform
self.train_list = []
train_file_buf = open(train_file)
line = train_file_buf.readline().strip()
while line:
image_path, image_label = line.split(' ')
self.train_list.append((os.path.join(self.data_root, image_path), int(image_label)))
line = train_file_buf.readline().strip()
def __len__(self):
return len(self.train_list)
def __getitem__(self, index, transform=None):
img_path, label = self.train_list[index]
img = read_image(img_path)
if self.transform is not None:
img = self.transform(img)
return img, label, img_path
|
{"hexsha": "8e64a17c9a76a4b508c43bbea9a22899e51c662a", "size": 1529, "ext": "py", "lang": "Python", "max_stars_repo_path": "addition_module/DMUE/pretrain/utils/dataset.py", "max_stars_repo_name": "weihaoxie/FaceX-Zoo", "max_stars_repo_head_hexsha": "db0b087e4f4d28152e172d6c8d3767a8870733b4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1329, "max_stars_repo_stars_event_min_datetime": "2021-01-13T07:06:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:23:39.000Z", "max_issues_repo_path": "addition_module/DMUE/pretrain/utils/dataset.py", "max_issues_repo_name": "weihaoxie/FaceX-Zoo", "max_issues_repo_head_hexsha": "db0b087e4f4d28152e172d6c8d3767a8870733b4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 115, "max_issues_repo_issues_event_min_datetime": "2021-01-13T10:42:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T03:57:52.000Z", "max_forks_repo_path": "addition_module/DMUE/pretrain/utils/dataset.py", "max_forks_repo_name": "weihaoxie/FaceX-Zoo", "max_forks_repo_head_hexsha": "db0b087e4f4d28152e172d6c8d3767a8870733b4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 351, "max_forks_repo_forks_event_min_datetime": "2021-01-13T07:21:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T14:11:39.000Z", "avg_line_length": 30.58, "max_line_length": 110, "alphanum_fraction": 0.6200130804, "include": true, "reason": "import numpy", "num_tokens": 322}
|
import group_theory.subgroup data.equiv.basic data.fintype algebra.big_operators
open equiv
variables {α : Type*}
def is_transposition (f : perm α) : Prop :=
∃ x y, f x = y ∧ f y = x ∧ ∀ a, a ≠ x → a ≠ y → f a = a
lemma is_transposition_inv {f : perm α} : is_transposition f →
is_transposition (f⁻¹) :=
λ ⟨x, y, h⟩, ⟨x, y, h.2.1 ▸ equiv.left_inv _ _, h.1 ▸ equiv.left_inv _ _,
λ a hax hay, by conv {to_lhs, rw ← h.2.2 a hax hay};
exact equiv.left_inv _ _⟩
variable [fintype α]
lemma product_of_transpositions (f : perm α) : ∃ s : list (perm α),
f = s.prod ∧ ∀ g ∈ s, is_transposition g := sorry
lemma sign_well_defined_aux : ∀ (n : ℕ) (f : perm α) (m l : list (perm α)),
(∀ g ∈ l, is_transposition g) → l.prod = f → l.length = n →
(∀ g ∈ m, is_transposition g) → m.prod = f → l.length % 2 = m.length % 2
| 0 := λ f m,
match m with
| [] := by simp {contextual := tt}
| (k :: m) := λ l hl hlf hl0, begin
end
end
lemma sign_well_defined (f : perm α) :
(∀ l : list (perm α), (∀ g ∈ l, is_transposition g) → l.prod = f → l.length % 2 = 0) ∨
(∀ l : list (perm α), (∀ g ∈ l, is_transposition g) → l.prod = f → l.length % 2 = 0) :=
have ∀ n : ℕ, ∀ l m : list (perm α), (∀ g ∈ l, is_transposition g) → l.prod = f → l.length = n
(∀ g ∈ m, is_transposition g) → m.prod = f → l.length % 2 = m.length % 2 | 0 :=
begin
end
|
{"author": "ChrisHughes24", "repo": "leanstuff", "sha": "9efa85f72efaccd1d540385952a6acc18fce8687", "save_path": "github-repos/lean/ChrisHughes24-leanstuff", "path": "github-repos/lean/ChrisHughes24-leanstuff/leanstuff-9efa85f72efaccd1d540385952a6acc18fce8687/transpostions.lean"}
|
[STATEMENT]
lemma continuous_on_swap_args:
assumes "continuous_on (A\<times>B) (\<lambda>(x,y). d x y)"
shows "continuous_on (B\<times>A) (\<lambda>(x,y). d y x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on (B \<times> A) (\<lambda>(x, y). d y x)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. continuous_on (B \<times> A) (\<lambda>(x, y). d y x)
[PROOF STEP]
have "(\<lambda>(x,y). d y x) = (\<lambda>(x,y). d x y) \<circ> prod.swap"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>(x, y). d y x) = (\<lambda>(x, y). d x y) \<circ> prod.swap
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
(\<lambda>(x, y). d y x) = (\<lambda>(x, y). d x y) \<circ> prod.swap
goal (1 subgoal):
1. continuous_on (B \<times> A) (\<lambda>(x, y). d y x)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>(x, y). d y x) = (\<lambda>(x, y). d x y) \<circ> prod.swap
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<lambda>(x, y). d y x) = (\<lambda>(x, y). d x y) \<circ> prod.swap
goal (1 subgoal):
1. continuous_on (B \<times> A) (\<lambda>(x, y). d y x)
[PROOF STEP]
by (metis assms continuous_on_compose continuous_on_swap product_swap)
[PROOF STATE]
proof (state)
this:
continuous_on (B \<times> A) (\<lambda>(x, y). d y x)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 590, "file": null, "length": 7}
|
from transformers import AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer
from transformers import AutoTokenizer, MBartTokenizer
from src.envs import build_env
import torch.nn.functional as F
import datasets
import random
import pandas as pd
from datasets import Dataset
import torch
import os
from datasets import load_dataset, load_metric
import io
import numpy as np
import sympy as sp
from src.utils import AttrDict
from src.hf_utils import postprocess_text, create_dataset_train, create_dataset_test
torch.cuda.empty_cache()
def preprocess_function_new(examples):
inputs = [prefix + ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
model_inputs = tokenizer(
inputs, max_length=max_input_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
targets, max_length=max_target_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
print(device)
params = params = AttrDict({
# environment parameters
'env_name': 'char_sp',
'int_base': 10,
'balanced': False,
'positive': True,
'precision': 10,
'n_variables': 1,
'n_coefficients': 0,
'leaf_probs': '0.75,0,0.25,0',
'max_len': 512,
'max_int': 5,
'max_ops': 15,
'max_ops_G': 15,
'clean_prefix_expr': True,
'rewrite_functions': '',
'tasks': 'prim_fwd',
'operators': 'add:10,sub:3,mul:10,div:5,sqrt:4,pow2:4,pow3:2,pow4:1,pow5:1,ln:4,exp:4,sin:4,cos:4,tan:4,asin:1,acos:1,atan:1,sinh:1,cosh:1,tanh:1,asinh:1,acosh:1,atanh:1',
})
language = 'ro' # SPECIFY LANGUAGE HERE.
env = build_env(params)
path1 = "data/train/ode2_10k.train" # SPECIFY PATH OF TRAINING DATA HERE.
train_dataset = create_dataset_train(path=path1, count=10000, language = language)
path2 = "data/valid/ode2.valid" # SPECIFY PATH OF VALIDATION DATA HERE. WE WILL USE ALL OF VALIDATION DATA, NO NEED TO SPECIFY COUNT.
valid_dataset = create_dataset_test(path=path2, language= language)
"""# Tokenizing the Data"""
Model_Type = 'mbart'
is_source_en = True
if Model_Type == 'mbart':
model_checkpoint = "facebook/mbart-large-en-{}".format(language) # SPECIFY PRE-TRAINED MODEL HERE.
metric = load_metric("sacrebleu")
tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO")
elif Model_Type == 'Marian':
if is_source_en:
model_checkpoint = "Helsinki-NLP/opus-mt-en-{}".format(language)
else:
model_checkpoint = "Helsinki-NLP/opus-mt-{}-en".format(language)
metric = load_metric("sacrebleu")
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=False)
if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]:
prefix = "not important."
else:
prefix = ""
"""# Create the Final Data Set"""
datasetM = {'train': train_dataset,
'validation': valid_dataset}
max_input_length = 1024 # Set to 512 if it is Marian-MT
max_target_length = 1024 # Set to 512 if it is Marian-MT
source_lang = "en"
target_lang = language
tokenized_datasets_train = datasetM['train'].map(preprocess_function_new, batched=True, num_proc = 48)
tokenized_datasets_valid = datasetM['validation'].map(preprocess_function_new, batched=True)
"""# Fine-tuning the model"""
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
d = 'prim_ode2_10k' # Saving Folder Name
batch_size = 8
args = Seq2SeqTrainingArguments(
"test-translation_{}".format(d),
evaluation_strategy="epoch",
learning_rate=1e-4,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
weight_decay=0.01,
save_total_limit=3,
num_train_epochs=15,
predict_with_generate=False,
fp16=True,
)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=tokenized_datasets_train,
eval_dataset=tokenized_datasets_valid,
data_collator=data_collator,
tokenizer=tokenizer
)
trainer.train()
model_name = 'mbart_prim_ode2_10k_en_ro' # SPECIFY MODEL SAVING NAME HERE.
torch.save(model, 'models/{}'.format(model_name))
|
{"hexsha": "94139dc9d92526188c959b6d9d100c21e828b6ff", "size": 4371, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainer.py", "max_stars_repo_name": "softsys4ai/differentiable-proving", "max_stars_repo_head_hexsha": "ed9b0c1a2803a3d2f75b60b78ec864c6e57fb8c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-24T18:24:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T22:40:20.000Z", "max_issues_repo_path": "trainer.py", "max_issues_repo_name": "softsys4ai/differentiable-proving", "max_issues_repo_head_hexsha": "ed9b0c1a2803a3d2f75b60b78ec864c6e57fb8c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trainer.py", "max_forks_repo_name": "softsys4ai/differentiable-proving", "max_forks_repo_head_hexsha": "ed9b0c1a2803a3d2f75b60b78ec864c6e57fb8c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1136363636, "max_line_length": 175, "alphanum_fraction": 0.7286662091, "include": true, "reason": "import numpy,import sympy", "num_tokens": 1200}
|
# -*- coding: utf-8 -*-
"""customer-conversion-prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fwoTRHqz3_T-_RekFYT9qNkeqasMEBQ7
**Predict Customer Conversion (Churn) with Machine Learning**
Importing necessary libraries
"""
import numpy as np
import pandas as pd
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
"""Reading and exploring the dataset"""
df = pd.read_csv("customer-churn-dataset.csv")
df
df.shape
df.columns.values
"""To check for missing values or (NA)"""
df.isna().sum()
"""Dataset Statistics"""
df.describe()
df['Churn'].value_counts()
"""Visualizing the conversion"""
sns.countplot(df['Churn'])
"""Percentage-wise results"""
numRetained = df[df.Churn == 'No'].shape[0]
numChurned = df[df.Churn == 'Yes'].shape[0]
# print the percentage of customers that stayed
print(numRetained/(numRetained + numChurned) * 100,'% of customers stayed with the company')
# peint the percentage of customers that left
print(numChurned/(numRetained + numChurned) * 100, '% of customers left the company')
"""Gender-wise visualization of customer conversion"""
sns.countplot(x ='gender', hue='Churn', data=df)
"""Visualization of customer conversion for the internet service"""
sns.countplot(x='InternetService', hue='Churn', data=df)
"""Visualization of Numerical data"""
numericFeatures = ['tenure', 'MonthlyCharges']
fig, ax = plt.subplots(1,2, figsize=(28, 8))
df[df.Churn == "No"][numericFeatures].hist(bins=20, color='blue', alpha=0.5, ax=ax)
df[df.Churn == "Yes"][numericFeatures].hist(bins=20, color='orange', alpha=0.5, ax=ax)
"""Dropping unnecessary columns from the dataset"""
cleanDF = df.drop('customerID', axis=1)
# Convert all the non-numeric columns to numeric
for column in cleanDF.columns:
if cleanDF[column].dtype == np.number:
continue
cleanDF[column] = LabelEncoder().fit_transform(cleanDF[column])
cleanDF.dtypes
"""Scaling of data"""
x = cleanDF.drop('Churn', axis=1)
y = cleanDF['Churn']
x = StandardScaler().fit_transform(x)
"""Split the data into 80% for training and 20% for testing"""
xtrain, xtest, ytrain, ytest = train_test_split(x,y, test_size=0.2, random_state=42)
"""Creating and Training the Logistic Regression Model"""
model = LogisticRegression()
# Train the model
model.fit(xtrain, ytrain)
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=None, max_iter=100,
multi_class='auto', n_jobs=None, penalty='l2',
random_state=None, solver='lbfgs', tol=0.0001, verbose=0,
warm_start=False)
"""Creating predictions on the Test data"""
predictions = model.predict(xtest)
# print the predictions
print(predictions)
"""Final scores - precision, recall and f1-score"""
print(classification_report(ytest, predictions))
|
{"hexsha": "ef8cf21938d283f4685b5e2b87cec041a3a25187", "size": 3178, "ext": "py", "lang": "Python", "max_stars_repo_path": "customer-conversion-prediction/customer_conversion_prediction.py", "max_stars_repo_name": "rajgmishra/machine-learning-datasets", "max_stars_repo_head_hexsha": "26df1446dc6140d4ab19503c3108e192733481d7", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "customer-conversion-prediction/customer_conversion_prediction.py", "max_issues_repo_name": "rajgmishra/machine-learning-datasets", "max_issues_repo_head_hexsha": "26df1446dc6140d4ab19503c3108e192733481d7", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "customer-conversion-prediction/customer_conversion_prediction.py", "max_forks_repo_name": "rajgmishra/machine-learning-datasets", "max_forks_repo_head_hexsha": "26df1446dc6140d4ab19503c3108e192733481d7", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3965517241, "max_line_length": 92, "alphanum_fraction": 0.731277533, "include": true, "reason": "import numpy", "num_tokens": 785}
|
module LogSynth
using Counters, Markdown, Random
export SkipListDistribution, AliasTableDistribution
md"""
A `SkipListDistribution` provides an implementation of a multinomial
distribution that has ``O(log(n))`` sample time, but which allows the
underlying probability for any element to be adjusted in ``O(log(n))``
time and which supports appending new values to the distribution at
any time (also in ``O(log(n))`` time).
This is done by keeping a unidirectional skip-list that contains the
cumulative distribution. This skip-list can be searched with a random
value ``u \in [0, \sum_i p_i)`` to find the largest index ``m`` such
that ``\sum_{i=1}^m p_i < u``.
Typically, these probabilities are counts, but that isn't a requirement.
A common use of this is to implement a Pitman-Yor process where each sample
has a probability of producing a previously unseen element and each time
a value is sampled, the probability of producing that sample in the future
is increased.
"""
struct SkipListDistribution{Weight <: AbstractFloat, Index <: Integer}
height::Int32
weight::Vector{Vector{Weight}}
child::Vector{Vector{Index}}
α::Float64
discount::Float64
skipProbability::Float64
end
function SkipListDistribution{Weight, Index}(height; alpha=0.0, discount=0.0, skipProbability=0.25) where {Weight <: AbstractFloat, Index <: Integer}
init(
SkipListDistribution{Weight, Index}(height, Vector{Weight}(), Vector{Index}(),
alpha, discount, skipProbability))
end
function SkipListDistribution(height = 10; alpha=0.0, discount=0.0, skipProbability=0.25)
init(
SkipListDistribution{Float64, Int32}(height, Vector{Float64}(), Vector{Int32}(),
alpha, discount, skipProbability))
end
init(self::SkipListDistribution{Weight, Index}) where {Weight, Index}= begin
for i in 1:self.height
push!(self.weight, Vector{Weight}())
push!(self.child, Vector{Index}())
end
return self
end
md"""
Given a probability as an index, return a vector of indexes into the
skip list
"""
function getindexvector(dist::SkipListDistribution{Weight, Index}, p::Float64) where {Weight <: AbstractFloat, Index <: Int}
0 ≤ weight ≤ 1 || error("Weight should be in [0,1]")
n = p * total(dist)
r = Vector{Index}()
j = 1
w = 0.0
for level = 1:dist.height
let v = dist.list[level]
while j <= length(v) && w + v[j].weight < n
j += 1
w += v[j].weight
end
push!(r, j)
j = v[j].firstChild
end
end
return r
end
import Base: length
Base.length(dist::SkipListDistribution) = Base.length(dist.weight[end])
function getindexvector(dist::SkipListDistribution{Weight, Index}, i::Integer) where {Weight <: AbstractFloat, Index <: Integer}
1 ≤ i ≤ length(dist) || error("Index out of bounds. Expected i ∈ [1, $(length(dist))]")
r = zeros(Index, dist.height)
j = i
for level = dist.height:-1:1
k = 1
while k < length(dist.child[level]) && dist.child[level][k+1] <= i
k += 1
end
r[level] = k
end
return r
end
total(dist::SkipListDistribution{Weight, Index}) where {Weight <: AbstractFloat, Index <: Integer} = sum(dist.weight[1])
function getindexvector(dist::SkipListDistribution{Weight, Index}, p::Float64) where {Weight <: AbstractFloat, Index <: Integer}
0 ≤ p ≤ 1 || error("Sample probability must be in [0, 1] but was $p")
p = p * total(dist)
r = zeros(Index, dist.height)
w = 0.0
j = 1
for level = 1:dist.height
while j < length(dist.weight[level]) && w + dist.weight[level][j] ≤ p
w += dist.weight[level][j]
j += 1
end
r[level] = j
j = dist.child[level][j]
end
return r
end
getindex(dist::SkipListDistribution{Weight, Index}, p::Float64) where {Weight <: AbstractFloat, Index <: Integer} =
getindexvector(dist, p)[end]
function inc!(dist::SkipListDistribution{Weight, Index}, p::Float64, Δw::Weight) where {Weight <: AbstractFloat, Index <: Integer}
index = getindexvector(dist, p)
for (level,i) in enumerate(index)
dist.weight[level][i] += Δw
end
return index[end]
end
import Base.push!
function Base.push!(dist::SkipListDistribution{Weight, Index}, w::Weight) where {Weight <: AbstractFloat, Index <: Integer}
if length(dist) == 0
for level in 1:dist.height
push!(dist.child[level], 1)
push!(dist.weight[level], w)
end
else
# add the new entry to a subset of levels with zero weight
child = length(dist) + 1
for level in dist.height:-1:1
push!(dist.weight[level], 0)
push!(dist.child[level], child)
child = length(dist.child[level])
# probably break out
rand() < dist.skipProbability || break
end
# and then increment all last entries (including old and new)
for level in 1:dist.height
dist.weight[level][end] += w
end
end
dist
end
function draw_value(rng, dist::SkipListDistribution{Weight, Index})::Index where {Weight, Index}
norm = dist.α + total(dist)
newtable = (dist.α + dist.discount * length(dist)) / norm
u = rand(rng)
if u < newtable
push!(dist, 1 - dist.discount)
return length(dist)
else
return inc!(dist, u / (1-newtable), 1.0)
end
end
md"""
An `AliasTableDistribution` provides an implementation of a multinomial
distribution that is good for O(1) sampling from static distributions.
If the distribution changes often, a `SkipListDistribution` may be a
better option.
"""
struct AliasTableDistribution{T}
u::Vector{Float64}
k::Vector{Int32}
v::Vector{T}
cnt::Counter{T}
AliasTableDistribution{T}(cnt::Counter{T}) where T = begin
u, k, v = build_alias_table(cnt)
new{T}(u, k, v, cnt)
end
end
build_alias_table(cnt::Counter{T}) where T = begin
p = float.(values(cnt))
v = collect(keys(cnt))
p = p ./ sum(p)
n = length(v)
u = p .* n
k = collect(1:length(p))
ϵ = 1.0 / n
z = sortperm(p)
little = [z[i] for i in 1:n if p[z[i]] < ϵ]
big = [z[i] for i in n:-1:1 if p[z[i]] > ϵ]
i = 1
j = 1
while i ≤ length(little) && j ≤ length(big)
deficit = 1 - u[little[i]]
k[little[i]] = big[j]
i += 1
u[big[j]] -= deficit
if u[big[j]] < 1
push!(little, big[j])
j += 1
elseif u[big[j]] == 1
j += 1
end
end
return u, k, v
end
draw_value(rng::AbstractRNG, dist::AliasTableDistribution{T}) where T = begin
n = length(dist.u)
z = n * rand(rng) + 1
i = Int(floor(z))
z = z - i
if z < dist.u[i]
return dist.v[i]
else
return dist.v[dist.k[i]]
end
end
end # module
|
{"hexsha": "031ebe69d13aabceed57db1dffd722daae009d71", "size": 7024, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/LogSynth.jl", "max_stars_repo_name": "tdunning/LogSynth.jl", "max_stars_repo_head_hexsha": "f585182b5aa16230fecabdbd50ad77776bfe24a2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LogSynth.jl", "max_issues_repo_name": "tdunning/LogSynth.jl", "max_issues_repo_head_hexsha": "f585182b5aa16230fecabdbd50ad77776bfe24a2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LogSynth.jl", "max_forks_repo_name": "tdunning/LogSynth.jl", "max_forks_repo_head_hexsha": "f585182b5aa16230fecabdbd50ad77776bfe24a2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5391304348, "max_line_length": 150, "alphanum_fraction": 0.6121867882, "num_tokens": 1907}
|
SUBROUTINE HC_FVLD ( advise, nfcst, flat, flon, fstype, f34kt,
+ f50kt, f64kt, iret)
C************************************************************************
C* HC_FVLD *
C* *
C* This subroutine finds the forecasted latitudes and longitudes, and *
C* the 34, 50 and the 64 knot or 100 kt forecast wind radii. It also *
C* determines the forecast storm type. If they are not found, the *
C* forecast position of the storm and the 34, 50 and 64 kt or 100kt *
C* strings are set to missing, and the forecast storm type is set blank.*
C* *
C* HC_FVLD ( ADVISE, NFCST, FLAT, FLON, FSTYPE, F34KT, F50KT, F64KT, *
C* IRET ) *
C* *
C* Input parameters: *
C* ADVISE CHAR* Advisory wind string *
C* NFCST INTEGER Number of forecast times *
C* *
C* Output parameters: *
C* FLAT(*) REAL Array of forecast latitudes *
C* FLON(*) REAL Array of forecast longitudes *
C* FSTYPE(*) CHAR* Forecast storm type *
C* F34KT(*) CHAR* 34kt string at all fcst times *
C* F50KT(*) CHAR* 50kt string at all fcst times *
C* F64KT(*) CHAR* 64kt string at all fcst times *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* *
C** *
C* Log: *
C* A. Hardy/GSC 5/00 *
C* D. Kidwell/NCEP 7/01 Added fcst storm type and 34kt fcst *
C* radii at 24, 48, 72 hrs; cleaned up *
C* A. Hardy/SAIC 8/01 Added 34kt fcst radii at 12 and 36 hrs *
C* D. Kidwell/NCEP 2/02 Added check for REMNANT LOW *
C* D. Kidwell/NCEP 3/02 Added f50kt & f64kt strings *
C* A. Hardy/NCEP 9/02 Corrected long. check for forecast pos. *
C* A. Hardy/NCEP 10/02 Added space for 'inlat' index checks *
C* D. Kidwell/NCEP 2/03 Added argument nfcst *
C* A. Hardy/NCEP 10/03 Modified to decode JTWC messages *
C* D. Kidwell/NCEP 6/04 Updated comments for JTWC max wind *
C* m.gamazaychikov/SAIC 07/05 Added check to get the correct JTWC lats*
C* m.gamazaychikov/SAIC 04/08 Add code to decode EXTRATROPICAL string *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
INCLUDE 'BRIDGE.PRM'
C*
CHARACTER*(*) advise, fstype (*), f34kt (*), f50kt (*),
+ f64kt (*)
REAL flat(*), flon(*)
C*
CHARACTER advstr*(DCMXBF), t34*50, t50*50, t64*50, dum*1
LOGICAL west
C------------------------------------------------------------------------
iret = 0
itcm = 0
iext = 0
advstr = advise
CALL ST_LSTR( advstr, ilen, iret)
C
C* Find the forecast latitude and longitude, storm type and wind
C* radii at all forecast times.
C
DO i = 1, nfcst
C
CALL ST_NOCC ( advise(:ilen), 'VALID AT:',
+ i, ipos, iret)
IF ( ipos .eq. 0 ) THEN
CALL ST_NOCC ( advise(:ilen), 'VALID',
+ i, ipos, iret)
END IF
ityp = 0
inlat = 0
idiss = 0
idash = 0
IF ( ipos .gt. 0 ) THEN
ifvd = INDEX( advise( ipos:ilen), 'Z')
idash = INDEX( advise( ipos:ilen), '---')
IF ( idash .gt. 0 ) THEN
CALL ST_ALNM ( advise (ipos+ifvd:ipos+ifvd),
+ ityp, ier )
IF ( ityp .eq. 1 ) THEN
idash = 6
ELSE
idash = 5
END IF
END IF
IF ( ifvd .gt. 0 ) THEN
C
C* When searching for the latitude and longitude
C* designators, a check for the next character of it
C* is necessary to skip over the check sum position
C* found in the old format JTWC reports.
C
ipos = ipos+ifvd + idash
innn = INDEX( advise( ipos:ipos+6), 'N')
IF ( innn .ne. 0 ) THEN
IF (advise(ipos+innn:ipos+innn)
+ .eq. ' ' ) THEN
inlat = innn
ELSE
CALL ST_ALNM (advise(ipos+7:ipos+7),
+ ityp, ier)
inlat = innn
END IF
ELSE
innn = INDEX(advise(ipos:ipos+6), 'S')
IF ( innn .ne. 0 ) THEN
CALL ST_ALNM (advise(ipos+6:ipos+6),
+ ityp, ier)
IF ( (advise(ipos+6:ipos+6) .eq. ' ' ) .or.
+ (ityp .eq. 1 ) )THEN
inlat = innn
END IF
END IF
END IF
idiss = INDEX( advise( ipos:ipos+6), 'DIS')
ireml = MIN ( ipos+40, ilen )
irem = INDEX (advise ( ipos:ireml ), 'REMNANT LOW')
iext=INDEX(advise ( ipos:ireml+210),'EXTRATROPICAL')
END IF
END IF
C
C* Found either north or south latitude and not the word
C* 'DISSIPATING', so continue. Otherwise, set lat. long.
C* and 34, 50 and 64kt wind radii to missing, and storm type
C* to blank.
C
IF ( ( inlat .ne. 0 ) .and. ( idiss .eq. 0 ) ) THEN
CALL ST_C2R ( advise(ipos:ipos+inlat-2), 1, flat(i),
+ num, iret )
IF ( advise(ipos+inlat:ipos+inlat) .eq. 'S' )
+ flat(i) = -flat(i)
ipos = ipos + inlat
IF ( ityp .eq. 1 ) THEN
ipos = ipos + 2
END IF
C
C* Locate the longitude.
C
west = .true.
iwlon = INDEX( advise( ipos:ipos+14), 'W')
IF ( iwlon .eq. 0 ) west = .false.
ielon = INDEX( advise( ipos:ipos+14), 'E')
IF ( ( ielon .ne. 0 ) .and. ((ielon .lt. iwlon ) .or.
+ ( .not. west ) ) ) THEN
iwlon = ielon
END IF
CALL ST_C2R ( advise(ipos:ipos+iwlon-2), 1,
+ flon(i), num, iret )
IF ( advise(ipos+iwlon-1:ipos+iwlon-1) .eq. 'W' )
+ flon(i) = -flon(i)
ipos = ipos + iwlon
ivalid = INDEX ( advise ( ipos:ilen ), 'VALID' )
IF ( ivalid .eq. 0 ) ivalid = ilen - ipos
C
C* Get the forecast wind radii.
C
CALL HC_QUAD ( advise ( ipos:ipos + ivalid ), t64,
+ t50, t34, dum, fstype ( i ), ier )
ikt = INDEX ( advise ( ipos:ilen ), 'KT...GUSTS' )
IF ( ikt .eq. 0 )
+ ikt = INDEX ( advise ( ipos:ilen ), 'KT, GUSTS' )
IF (ikt .gt. 0 ) THEN
C
C* Determine forecast storm type based on gust speed.
C
CALL ST_NUMB ( advise (ipos+ikt-5:ipos+ikt-2),
+ iwnd, ier )
IF ( iwnd .ge. 64 ) THEN
fstype ( i ) = 'HU'
ELSE IF ( (iwnd .lt. 64 ) .and.
+ ( iwnd .ge. 34 ) ) THEN
fstype ( i ) = 'TS'
ELSE IF ( iwnd .lt. 34 ) THEN
fstype ( i ) = 'TD'
END IF
END IF
f34kt ( i ) = t34
f50kt ( i ) = t50
f64kt ( i ) = t64
IF ( ( fstype ( i ) .eq. 'TD' ) .and. ( irem .gt. 0 ) )
+ fstype ( i ) = 'RL'
IF ( iext .gt. 0 ) fstype ( i ) = 'EX'
ELSE
C
C* Use 'MW' to signify Maximum Wind. This represents either
C* the 64 kt winds (for TPC, and JTWC as of 6/1/04) or
C* 100 kt winds (for JTWC before 6/1/04).
C
flat(i) = -9999.
flon(i) = -9999.
fstype(i) = ' '
f34kt (i) = '34 -9999 -9999 -9999 -9999'
f50kt (i) = '50 -9999 -9999 -9999 -9999'
f64kt (i) = 'MW -9999 -9999 -9999 -9999'
END IF
END DO
C
RETURN
END
|
{"hexsha": "f71abcec4e8d10c92d1ce9a221c804e910017b28", "size": 8278, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/bridge/hc/hcfvld.f", "max_stars_repo_name": "sgdecker/gempak", "max_stars_repo_head_hexsha": "92f9a3a8ee667ec49a9082f44380e27f61ca716b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/bridge/hc/hcfvld.f", "max_issues_repo_name": "sgdecker/gempak", "max_issues_repo_head_hexsha": "92f9a3a8ee667ec49a9082f44380e27f61ca716b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/bridge/hc/hcfvld.f", "max_forks_repo_name": "sgdecker/gempak", "max_forks_repo_head_hexsha": "92f9a3a8ee667ec49a9082f44380e27f61ca716b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 40.9801980198, "max_line_length": 73, "alphanum_fraction": 0.4375453008, "num_tokens": 2554}
|
# import json
# import os
# import cv2
# import numpy as np
# from tqdm import tqdm
# from pycocotools import mask as maskUtils
# parent_path = '/data/zequn/datasets/coco/val2017'
# json_file = '/data/zequn/datasets/coco/annotations/instances_val2017.json'
# with open(json_file) as anno_:
# annotations = json.load(anno_)
# # import pdb; pdb.set_trace()
# def apply_mask(image, segmentation):
# alpha = 0.5
# color = (0, 0.6, 0.6)
# threshold = 0.5
# mask = maskUtils.decode(segmentation) # 分割解码
# mask = np.where(mask >= threshold, 1, 0).astype(np.uint8)
# for c in range(3): # 3个通道
# # mask=1执行前一个,否则后一个
# image[:, :, c] = np.where(mask == 1,
# image[:, :, c] *
# (1 - alpha) + alpha * color[c] * 255,
# image[:, :, c])
# return image
# results = []
# for i in range(len(annotations['annotations'])):
# image_id = annotations['annotations'][i]['image_id']
# # 包含size:图片高度宽度 counts:压缩后的mask 通过mask = maskUtils.decode(encoded_mask)解码,得到mask,需要导入from pycocotools import mask as maskUtils
# segmentation = annotations['annotations'][i]['segmentation']
# full_path = os.path.join(parent_path, str(image_id).zfill(12) + '.jpg')
# image = cv2.imread(full_path)
# mask_image = apply_mask(image, segmentation)
# cv2.imshow('demo', mask_image)
# cv2.waitKey(5000)
from pycocotools.coco import COCO
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
import os
pylab.rcParams['figure.figsize'] = (8.0, 10.0)
dataDir = '/data/zequn/datasets/coco'
dataType = 'val2017'
annFile = '{}/annotations/instances_{}.json'.format(dataDir, dataType)
# I为图片具体位置
imgs = os.listdir('/data/zequn/datasets/coco/val2017')
for i, img in enumerate(imgs):
if i < 100:
continue
if i == 200:
break
I = io.imread(dataDir + '/val2017/' + img)
coco = COCO(annFile)
plt.imshow(I)
plt.axis('off')
annIds = coco.getAnnIds(imgIds=int(img.split('.')[0]), iscrowd=None)
# print(annIds)
anns = coco.loadAnns(annIds)
# print(anns)
coco.showAnns(anns)
plt.savefig('vis_coco/{}.png'.format(i+1))
plt.clf()
|
{"hexsha": "397b6234bb915fc75f7c2d9c6af48b824a9eaed4", "size": 2280, "ext": "py", "lang": "Python", "max_stars_repo_path": "mmdetection/vis_coco.py", "max_stars_repo_name": "InukKang/Pedestron", "max_stars_repo_head_hexsha": "b592292389f313907c18b38cc9066c3b6f8ad5a4", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 259, "max_stars_repo_stars_event_min_datetime": "2021-02-03T09:50:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:57:24.000Z", "max_issues_repo_path": "mmdetection/vis_coco.py", "max_issues_repo_name": "Gary828/FcaNet", "max_issues_repo_head_hexsha": "bb14e71cf1c81d2135e48a85755feb33ec6b1795", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2021-02-03T14:54:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T07:53:25.000Z", "max_forks_repo_path": "mmdetection/vis_coco.py", "max_forks_repo_name": "Gary828/FcaNet", "max_forks_repo_head_hexsha": "bb14e71cf1c81d2135e48a85755feb33ec6b1795", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2021-02-03T11:08:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:58:00.000Z", "avg_line_length": 30.8108108108, "max_line_length": 134, "alphanum_fraction": 0.6175438596, "include": true, "reason": "import numpy", "num_tokens": 706}
|
# coding=utf-8
import pandas as pd
import numpy as np
from config.neighborhoods import Neighborhoods
dataFrame = pd.read_csv('data/uber_map.csv')
def rgb(minimum, maximum, value):
minimum, maximum = float(minimum), float(maximum)
r = 255
ratio = (value-minimum)/(maximum - minimum)
bg = 20 + int(max(0, 235*ratio))
b = bg
g = bg
return r, g, b
nb_avgs = []
for nbhood in Neighborhoods:
requests = []
for request in dataFrame[nbhood['name']]:
requests.append(request)
avg = np.mean(requests)
if not nbhood['name']=="Salinas":
nb_avgs.append(avg)
print nbhood['name'],":",avg
minimo = np.min(nb_avgs)
maximo = np.max(nb_avgs)
print ("Minimo:", minimo)
print ("Maximo:", maximo)
index = 0
for nbhood in Neighborhoods:
print nbhood['name']
if not nbhood['name']=="Salinas":
print rgb(minimo, maximo, nb_avgs[index])
index = index + 1
|
{"hexsha": "b4e811ffe9f4f29f5fc937c5f59f7964a28500ec", "size": 929, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/calculate_avgs.py", "max_stars_repo_name": "vandrefonseca/Copia-Uber-Natal", "max_stars_repo_head_hexsha": "09c453d24bc2d817f18508b217875a8351cdc967", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/calculate_avgs.py", "max_issues_repo_name": "vandrefonseca/Copia-Uber-Natal", "max_issues_repo_head_hexsha": "09c453d24bc2d817f18508b217875a8351cdc967", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/calculate_avgs.py", "max_forks_repo_name": "vandrefonseca/Copia-Uber-Natal", "max_forks_repo_head_hexsha": "09c453d24bc2d817f18508b217875a8351cdc967", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8205128205, "max_line_length": 53, "alphanum_fraction": 0.638320775, "include": true, "reason": "import numpy", "num_tokens": 270}
|
struct DeepEnsemble
models::Array
DeepEnsemble(generator, N::Int) = new([generator() for _=1:N])
end
Flux.@functor DeepEnsemble
Flux.trainable(m::DeepEnsemble) = (Flux.trainable(model) for model in m.models)
# Get the mean and variance estimate from each network individually
function individual_forward(m::DeepEnsemble, x)
os = [m(x) for m in m.models]
ndim = Int(size(os[1],1) / 2)
μs = [o[1:ndim, :] for o in os]
σ²s = [softplus.(o[ndim+1:end, :]) .+ 1f-3 for o in os]
μs, σ²s
end
# Compute the prediction and uncertainty of the ensemble
function (m::DeepEnsemble)(x)
μs, σ²s = individual_forward(m, x)
t1 = [σ² .+ μ .^2 for (μ, σ²) in zip(μs, σ²s)]
mean(μs), mean(t1) .- mean(μs).^2
end
# The equation for the gaussian logpdf
de_gaussian_logpdf(μ, σ², y) = -log.(σ²) ./ 2f0 .- (y .- μ).^2 ./ (2f0 .* σ²)
# Logpdf of some some x and y pair according to the full ensemble
Distributions.logpdf(m::DeepEnsemble, x, y) = de_gaussian_logpdf(m(x)..., y)
# Gets the mean negative log pdf for each network
function training_loss(m::DeepEnsemble, x, y, weights = ones(Float32, size(y)...))
μs, σ²s = individual_forward(m, x)
mean([-mean(weights .* de_gaussian_logpdf(μ, σ², y)) for (μ, σ²) in zip(μs, σ²s)])
end
struct DeepClassificationEnsemble
models::Array
DeepClassificationEnsemble(generator, N::Int) = new([generator() for _=1:N])
end
Flux.@functor DeepClassificationEnsemble
Flux.trainable(m::DeepClassificationEnsemble) = (Flux.trainable(model) for model in m.models)
# Get the mean and variance estimate from each network individually
function individual_forward(m::DeepClassificationEnsemble, x)
ps = [softmax(m(x)) for m in m.models]
end
# Compute the prediction and uncertainty of the ensemble
function (m::DeepClassificationEnsemble)(x)
ps = individual_forward(m, x)
mean(ps)
end
# Logpdf of some some x and y pair according to the full ensemble
function Distributions.logpdf(m::DeepClassificationEnsemble, x, y)
log.(sum(m(x) .* y, dims=1) .+ 1f-10)
end
# Gets the mean negative log pdf for each network
function training_loss(m::DeepClassificationEnsemble, x, y, weights = ones(Float32, size(y)...))
ps = individual_forward(m, x)
mean([Flux.Losses.crossentropy(p, y) for p in ps])
end
|
{"hexsha": "a1bf42c1548edcdb00169566e7c698887aab939d", "size": 2381, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/extras/deep_ensembles.jl", "max_stars_repo_name": "ancorso/Shard.jl", "max_stars_repo_head_hexsha": "77b3b891494ca7f12113d86854641404ac961ad6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-09-20T19:18:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T19:14:27.000Z", "max_issues_repo_path": "src/extras/deep_ensembles.jl", "max_issues_repo_name": "ancorso/Shard.jl", "max_issues_repo_head_hexsha": "77b3b891494ca7f12113d86854641404ac961ad6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/extras/deep_ensembles.jl", "max_forks_repo_name": "ancorso/Shard.jl", "max_forks_repo_head_hexsha": "77b3b891494ca7f12113d86854641404ac961ad6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-10-15T00:55:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T02:49:55.000Z", "avg_line_length": 34.0142857143, "max_line_length": 96, "alphanum_fraction": 0.6728265435, "num_tokens": 710}
|
abstract type Grid{d} end
#
# # backward backward compatibility
nodes(::Type{<:Union{ListOfPoints,ListOfPoints{d}}}, grid::Grid{d}) where d = nodes(grid)
nodes(::Type{<:Matrix}, grid::Grid) = copy(from_LOP(nodes(grid)))
node(::Type{<:Union{Point,Point{d}}}, grid::Grid{d}, i::Int) where d = node(grid,i)
node(::Type{<:Vector}, grid::Grid, i::Int) = Vector(node(grid,i))
import Base
Base.ndims(grid::Grid{d}) where d = d
function Base.show(io::IO, grid::Grid)
print(io, typeof(grid))
end
struct EmptyGrid <: Grid{0}
# this grid does not exist ;-)
end
nodes(grid::EmptyGrid) = nothing
n_nodes(grid::EmptyGrid) = 0
node(grid::EmptyGrid, i::Int) = nothing # fail if i!=1 ?
##########################
# Grid made of one point #
##########################
struct PointGrid{d} <: Grid{d}
point::SVector{d,Float64}
end
function (::Type{<:PointGrid})(point::Vector{Float64})
d = length(point)
PointGrid{d}(SVector{d,Float64}(point...))
end
nodes(grid::PointGrid) = [grid.point]
n_nodes(grid::PointGrid) = 1
node(grid::PointGrid, i::Int) = grid.point # fail if i!=1 ?
#####################
# Unstructured Grid #
#####################
struct UnstructuredGrid{d} <: Grid{d}
nodes::ListOfPoints{d}
end
# Old-convention
function UnstructuredGrid{d}(nodes::Matrix{Float64}) where d
N = size(nodes,1)
@assert d == size(nodes,2)
UnstructuredGrid{d}(reshape(reinterpret(Point{d}, vec(copy(nodes'))), (N,)))
end
nodes(grid::UnstructuredGrid) = grid.nodes
n_nodes(grid::UnstructuredGrid) = length(grid.nodes)
node(grid::UnstructuredGrid, i::Int) = grid.nodes[i] # fail if i!=1 ?
node(::Type{<:Point}, grid::UnstructuredGrid, i::Int) = node(grid,i)
function Product(a::UnstructuredGrid{d1}, b::UnstructuredGrid{d2}) where d1 where d2
A = [Base.product(a.nodes, b.nodes)...]
N = length(A)
d = d1 + d2
nodes = reshape(reinterpret(Point{d},(A)),(N,))
return UnstructuredGrid{d}(nodes)
end
#################
# CartesianGrid #
#################
function mlinspace(min, max, n)
# this now returns an iterator
nodes = map((x,y,z)->range(x,stop=y,length=z), min, max, n)
return Base.product(nodes...)
end
struct CartesianGrid{d} <: Grid{d}
min::Point{d}
max::Point{d}
n::SVector{d,Int}
nodes::ListOfPoints{d}
end
function (::Type{<:CartesianGrid})(min::SVector{d,Float64}, max::SVector{d,Float64}, n::SVector{d,Int64}) where d
A = [mlinspace(min, max, n)...]
N = prod(n)
mm = reshape(reinterpret(Point{d},vec(A)),(N,))
return CartesianGrid{d}(min, max, n, mm)
end
(::Type{<:CartesianGrid})(min::Vector{Float64},max::Vector{Float64},n::Vector{Int64}) = CartesianGrid(SVector(min...), SVector(max...), SVector(n...))
nodes(grid::CartesianGrid{d}) where d = grid.nodes
n_nodes(grid::CartesianGrid{d}) where d = length(grid.nodes)
node(grid::CartesianGrid{d}, i::Int) where d = grid.nodes[i]
nodes(::Type{<:ListOfPoints}, grid::CartesianGrid{d}) where d = grid.nodes
node(::Type{<:Point}, grid::CartesianGrid{d}, i::Int64) where d = node(grid,i)
function Product(a::CartesianGrid{d1}, b::CartesianGrid{d2}) where d1 where d2
return Dolo.CartesianGrid{d1+d2}( [a.min; b.min], [a.max; b.max], [a.n; b.n])
end
################
# Smolyak Grid #
################
struct SmolyakGrid{d} <: Grid{d}
smol_params::BM.SmolyakParams{Float64,Vector{Int}}
nodes::Matrix{Float64}
B_nodes::Matrix{Float64}
end
function SmolyakGrid{d}(min::Point{d}, max::Point{d}, mu::SVector{d,Int64}) where d
sp = BM.SmolyakParams(d, Vector(mu), Vector(min), Vector(max))
nodes = BM.nodes(sp)
B_nodes = BM.evalbase(sp, nodes)
return SmolyakGrid{d}(sp, nodes, B_nodes)
end
SmolyakGrid(min::Point{d}, max::Point{d}, mu::SVector{d,Int64}) where d = SmolyakGrid{d}(min,max,mu)
SmolyakGrid(min::Point{d}, max::Point{d}, mu::Int64) where d = SmolyakGrid{d}(min, max, SVector([mu for i=1:d]...))
SmolyakGrid{d}(min::Point{d}, max::Point{d}, mu::Int64) where d = SmolyakGrid{d}(min, max, SVector([mu for i=1:d]...))
function SmolyakGrid(min::Vector{Float64},max::Vector{Float64},mu::Union{Vector{Int64},Int64})
d = length(min)
mmu = mu isa Int ? ffill(mu,d) : mu
@assert d == length(max) == length(mmu)
SmolyakGrid{d}(SVector(min...), SVector(max...), SVector(mu...))
end
#
function SmolyakGrid{d}(min::Vector{Float64},max::Vector{Float64},mu::Union{Vector{Int64},Int64}) where d
mmu = mu isa Int ? fill(mu,d) : mu
@assert d == length(min) == length(max) == length(mmu)
SmolyakGrid{d}(SVector(min...), SVector(max...), SVector(mu...))
end
nodes(grid::SmolyakGrid{d}) where d = reshape(reinterpret(Point{d}, vec(copy(grid.nodes'))),( size(grid.nodes,1),))
n_nodes(grid::SmolyakGrid) = size(grid.nodes,1)
node(grid::SmolyakGrid{d}, i::Int) where d = Point{d}(grid.nodes[i,:]...)
###############
# Random Grid #
###############
struct RandomGrid{d} <: Grid{d}
min::SVector{d,Float64}
max::SVector{d,Float64}
n::Int64
nodes::ListOfPoints{d}
end
function (::Type{<:Union{RandomGrid,RandomGrid{d}}})(min::Point{d}, max::Point{d}, n::Int) where d
nodes = reshape(reinterpret(Point{d}, vec(rand(d, n))), (n,)) # on [0, 1]
for nn=1:length(n)
nodes[nn] = nodes[nn] .* (max-min) + min
end
RandomGrid(min, max, n, copy(nodes))
end
function RandomGrid(min::Vector{Float64}, max::Vector{Float64}, n::Int)
d = length(min)
@assert d == length(max)
dim_err = DimensionMismatch("min was length $d, max must be also")
length(max) == d || dim_err
all(max .> min) || error("max must be greater than min")
RandomGrid{d}(SVector(min...),SVector(max...),n)
end
function RandomGrid{d}(min::Vector{Float64}, max::Vector{Float64}, n::Int) where d
@assert d == length(min)
RandomGrid(min,max,d)
end
nodes(grid::RandomGrid) = grid.nodes
nodes(::Type{<:ListOfPoints}, grid::RandomGrid) = nodes(grid)
n_nodes(grid::RandomGrid) = length(grid.nodes)
node(grid::RandomGrid,i::Int) = grid.nodes[i]
|
{"hexsha": "88c5d179a68e2c8fdb2eeaccc5571864f8860174", "size": 5969, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/numeric/grids.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Dolo.jl-9d24351c-2990-5e1b-a277-04c4b809c898", "max_stars_repo_head_hexsha": "f507e1b812d8980d87ccac33bfd56677c8d70ec0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/numeric/grids.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Dolo.jl-9d24351c-2990-5e1b-a277-04c4b809c898", "max_issues_repo_head_hexsha": "f507e1b812d8980d87ccac33bfd56677c8d70ec0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/numeric/grids.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Dolo.jl-9d24351c-2990-5e1b-a277-04c4b809c898", "max_forks_repo_head_hexsha": "f507e1b812d8980d87ccac33bfd56677c8d70ec0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0885416667, "max_line_length": 150, "alphanum_fraction": 0.6371251466, "num_tokens": 1901}
|
'''
@lanhuage: python
@Descripttion:
@version: beta
@Author: xiaoshuyui
@Date: 2020-05-07 08:55:29
@LastEditors: xiaoshuyui
@LastEditTime: 2020-05-07 10:58:40
'''
'''
this file define a class to save the result of the mask of parse
the mask will be save as a gray image using different color to represent different
object
'''
import numpy as np
import logging
from PIL import Image,ImageDraw
import random
class label_mask_writer:
def __init__(
self,
label_num_dict,
save_file_path,
image_height,
image_width):
self.label_num_dict = label_num_dict
self.save_file_path = save_file_path
self.image_height = image_height
self.image_width = image_width
self.labels = []
self.shapes = []
def save_mask_image(self, shapes):
print("==================>")
print(shapes)
print("<==================")
for shape in shapes:
self.add_mask_label(shape['label'])
self.add_shape_points(shape['points'])
image = self.get_mask_image()
image.save(self.save_file_path, 'PNG')
def add_mask_label(self, label):
self.labels.append(label)
def add_shape_points(self, shape_points):
self.shapes.append(shape_points)
def get_mask_image(self):
'''
convert label and shapes to gray image mask
:return: gray image mask
'''
assert len(self.labels) == len(self.shapes)
mask_bg = Image.new('L',(self.image_width,self.image_height))
mask_draw = ImageDraw.Draw(mask_bg)
if self.labels:
index = 0
print("==================>")
print(self.label_num_dict)
print("<==================")
for label in self.labels:
# color = self.label_num_dict[label]
# if color == 1:
# color = color * 255
vertex = self.shapes[index]
color = random.randint(64,255)
mask_draw.polygon(vertex,fill=color)
index += 1
else:
logging.error('there are no shapes to save !')
return mask_bg
|
{"hexsha": "71d489e54ff6cb3389fc121604a53c731f2d8bd2", "size": 2223, "ext": "py", "lang": "Python", "max_stars_repo_path": "libs/saveMaskImage.py", "max_stars_repo_name": "guchengxi1994/LabelImgTool", "max_stars_repo_head_hexsha": "e7595f6758a3756379c8a1534e778bb57b71730f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libs/saveMaskImage.py", "max_issues_repo_name": "guchengxi1994/LabelImgTool", "max_issues_repo_head_hexsha": "e7595f6758a3756379c8a1534e778bb57b71730f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/saveMaskImage.py", "max_forks_repo_name": "guchengxi1994/LabelImgTool", "max_forks_repo_head_hexsha": "e7595f6758a3756379c8a1534e778bb57b71730f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8701298701, "max_line_length": 82, "alphanum_fraction": 0.5663517769, "include": true, "reason": "import numpy", "num_tokens": 489}
|
[STATEMENT]
lemma borel_measurable_ereal_prod[measurable (raw)]:
fixes f :: "'c \<Rightarrow> 'a \<Rightarrow> ereal"
assumes "\<And>i. i \<in> S \<Longrightarrow> f i \<in> borel_measurable M"
shows "(\<lambda>x. \<Prod>i\<in>S. f i x) \<in> borel_measurable M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>x. \<Prod>i\<in>S. f i x) \<in> borel_measurable M
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
?i \<in> S \<Longrightarrow> f ?i \<in> borel_measurable M
goal (1 subgoal):
1. (\<lambda>x. \<Prod>i\<in>S. f i x) \<in> borel_measurable M
[PROOF STEP]
by (induction S rule: infinite_finite_induct) auto
|
{"llama_tokens": 277, "file": null, "length": 2}
|
#!/usr/bin/env python
"""Extract subcatchment and routing information from SWMM input file to GIS.
Reads subcatchment geometries and routing from a SWMM input (.inp) file and
saves them as shapefiles into the same folder as the SWMM input file.
Copyright (C) 2018 Tero Niemi, Aalto University School of Engineering
TODO: Add message to user of NaN values in landuse_data?
TODO: Add LID routing output
TODO: Add junction output shapefile
TODO: Add conduit output shapefile
"""
import os
import sys
import numpy as np
import pandas as pd
import geopandas as gpd
import shapely.wkt
crs = {'init': 'epsg:4326'} # Default Coordinate Reference System
# Check input parameters
if (len(sys.argv) < 2 or len(sys.argv) > 3):
print("Usage:\n"
"""$ python inp2gis.py [PATH TO *.inp FILE] 'EPSG:[XXXX]'""")
sys.exit()
else:
if (not sys.argv[1].lower().endswith('.inp')):
print("Usage:\n"
"""$ python inp2gis.py [PATH TO *.inp FILE] 'EPSG:[XXXX]'""")
sys.exit()
if (len(sys.argv) == 2):
print('Using EPSG:4326 as default Coordinate Reference System.')
elif (len(sys.argv) == 3):
if not (sys.argv[2].lower().startswith('epsg:')):
print("Usage:\n"
"""$ python inp2gis.py [PATH TO *.inp FILE] 'EPSG:[XXXX]'""")
sys.exit()
else:
crs = {'init': sys.argv[2].lower()} # Custom CRS
subcatchment_data = []
landuse_data = []
subarea_data = []
infiltration_data = []
lid_data = []
coordinate_data = []
polygon_data = []
tags_data = []
# Go through the SWMM inp file
with open(sys.argv[1], 'rt', encoding='ISO-8859-1') as inp_file:
# Check for subcatchment information
for line in inp_file:
if '[subcatchments]' in line.lower():
for idx, row in enumerate(inp_file):
if row.startswith(';;'): # Skip comment rows
continue
elif row.isspace(): # Stop looking after empty line
break
elif row.startswith(';'): # Save landuse
try:
# TJN 28 Feb 2018 Add .rstrip() here to remove trailing
# newline - this has not been tested properly!
landuse_data.append(int(row.rstrip().split(";")[1]))
except ValueError:
landuse_data.append(row.rstrip().split(";")[1])
else: # Save data
subcatchment_data.append(row.split())
# Check for subarea information
if '[subareas]' in line.lower():
for idx, row in enumerate(inp_file):
if row.startswith(';'): # Skip comment rows
continue
elif row.isspace(): # Stop looking after empty line
break
else: # Save data
subarea_data.append(row.split())
# Check for infiltration information
if '[infiltration]' in line.lower():
for idx, row in enumerate(inp_file):
if row.startswith(';'): # Skip comment rows
continue
elif row.isspace(): # Stop looking after empty line
break
else: # Save data
infiltration_data.append(row.split())
# Check for LID usage information
if '[lid_usage]' in line.lower():
for idx, row in enumerate(inp_file):
if row.startswith(';'): # Skip comment rows
continue
elif row.isspace(): # Stop looking after empty line
break
else: # Save data
lid_data.append(row.split())
# Check for coordinate information
if '[coordinates]' in line.lower():
for idx, row in enumerate(inp_file):
if row.startswith(';'): # Skip comment rows
continue
elif row.isspace(): # Stop looking after empty line
break
else: # Save data
coordinate_data.append(row.split())
if '[polygons]' in line.lower():
for idx, row in enumerate(inp_file):
if row.startswith(';'): # Skip comment rows
continue
elif row.isspace(): # Stop looking after empty line
break
else: # Save data
polygon_data.append(row.split())
# Check for tag information
if '[tags]' in line.lower():
for idx, row in enumerate(inp_file):
if row.startswith(';;'): # Skip comment rows
continue
elif row.isspace(): # Stop looking after empty line
break
else: # Save data
tags_data.append(row.rstrip().split(None, 2))
# Create dataframes from data
subcatchment_col_names = ['Name',
'Rgage',
'OutID',
'Area',
'Imperv_pct',
'Width',
'Slope',
'Clength']
if (len(subcatchment_data[0]) == 9):
subcatchment_col_names.append('SPack')
subcatchment_df = pd.DataFrame(subcatchment_data,
columns=subcatchment_col_names)
subcatchment_df[['Area',
'Imperv_pct',
'Width',
'Slope',
'Clength']] = subcatchment_df[['Area',
'Imperv_pct',
'Width',
'Slope',
'Clength']].astype(float)
# Check if landuse was given
if landuse_data:
# Use given landuse data
subcatchment_df = subcatchment_df.assign(
landuse=pd.DataFrame(landuse_data).values)
else:
# Landuse was not given, save NaN values instead
subcatchment_df = subcatchment_df.assign(landuse=np.nan)
subarea_col_names = ['Name',
'Nimp',
'Nperv',
'Simp',
'Sperv',
'Zero_pct',
'RouteTo']
if (len(subarea_data[0]) == 8):
subarea_col_names.append('Routed_pct')
subarea_df = pd.DataFrame(subarea_data, columns=subarea_col_names)
subarea_df[['Nimp',
'Nperv',
'Simp',
'Sperv',
'Zero_pct']] = subarea_df[['Nimp',
'Nperv',
'Simp',
'Sperv',
'Zero_pct']].astype(float)
if (len(infiltration_data[0]) == 6): # Horton infiltration
infiltration_col_names = ['Name',
'MaxRate',
'MinRate',
'Decay',
'DryTime',
'MaxInf']
infiltration_df = pd.DataFrame(infiltration_data,
columns=infiltration_col_names)
infiltration_df[['MaxRate',
'MinRate',
'Decay',
'DryTime',
'MaxInf']] = infiltration_df[['MaxRate',
'MinRate',
'Decay',
'DryTime',
'MaxInf']].astype(float)
elif (len(infiltration_data[0]) == 4): # Green-Ampt infiltration
infiltration_col_names = ['Name',
'Psi',
'Ksat',
'IMD']
infiltration_df = pd.DataFrame(infiltration_data,
columns=infiltration_col_names)
infiltration_df[['Psi',
'Ksat',
'IMD']] = infiltration_df[['Psi',
'Ksat',
'IMD']].astype(float)
if lid_data:
lid_col_names = ['Name',
'LID',
'LIDNumber',
'LIDArea',
'LIDWidth',
'LIDInitSat',
'LIDFromImp',
'LIDToPerv']
if (len(max(lid_data, key=len)) >= 9):
lid_col_names.append('LIDRptFile')
if (len(max(lid_data, key=len)) == 10):
lid_col_names.append('LIDDrainTo')
lid_df = pd.DataFrame(lid_data, columns=lid_col_names)
lid_df[['LIDNumber',
'LIDArea',
'LIDWidth',
'LIDInitSat',
'LIDFromImp',
'LIDToPerv']] = lid_df[['LIDNumber',
'LIDArea',
'LIDWidth',
'LIDInitSat',
'LIDFromImp',
'LIDToPerv']].astype(float)
if tags_data:
tags_col_names = ['Type',
'Name',
'Tag']
tags_df = pd.DataFrame(tags_data, columns=tags_col_names)
tags_df = tags_df.drop('Type', axis=1)
coordinate_col_names = ['Name', 'X', 'Y']
coordinate_df = pd.DataFrame(coordinate_data, columns=coordinate_col_names)
polygon_df = pd.DataFrame(polygon_data, columns=coordinate_col_names)
# Create WKT geometries from subcatchment polygon corner point information
polygon_df['XY'] = polygon_df['X'].map(str) + ' ' + polygon_df['Y'].map(str)
polygon_df = polygon_df.groupby('Name').agg({'XY': lambda x: ','.join(x)})
polygon_df['wktcolumn'] = 'POLYGON((' + polygon_df['XY'].map(str) + '))'
# Check that polygons are closed and report if not
for idx, row in polygon_df.iterrows():
if(row['XY'].split(',')[0] != row['XY'].split(',')[-1]):
print('Error: ' + str(idx) + ' is not a closed polygon.')
polygon_df = polygon_df.drop('XY', axis=1)
polygon_df.reset_index(inplace=True)
# Convert to geodatabase for subcatchments
geometry = polygon_df['wktcolumn'].map(shapely.wkt.loads)
polygon_df = polygon_df.drop('wktcolumn', axis=1)
subcatchment_gdf = gpd.GeoDataFrame(polygon_df, crs=crs, geometry=geometry)
subcatchment_gdf['centroid'] = subcatchment_gdf['geometry'].centroid.map(
lambda p: p.x).map(str) + ' ' + \
subcatchment_gdf['geometry'].centroid.map(
lambda p: p.y).map(str)
# Merge subcatchment dataframes
subcatchment_gdf = subcatchment_gdf.merge(subcatchment_df,
on='Name', how='right')
subcatchment_gdf = subcatchment_gdf.merge(subarea_df,
on='Name', how='left')
subcatchment_gdf = subcatchment_gdf.merge(infiltration_df,
on='Name', how='left')
if lid_data:
subcatchment_gdf = subcatchment_gdf.merge(lid_df,
on='Name', how='left')
if tags_data:
subcatchment_gdf = subcatchment_gdf.merge(tags_df,
on='Name', how='left')
# Create WKT geometries from junction point information
coordinate_df['centroid'] = coordinate_df['X'].map(str) + ' ' + \
coordinate_df['Y'].map(str)
# Create a dictionary of (Name, coordinate) pairs for routing
junction_dict = dict(zip(coordinate_df['Name'], coordinate_df['centroid']))
subcatchment_dict = dict(zip(subcatchment_gdf['Name'],
subcatchment_gdf['centroid']))
coordinate_dict = junction_dict.copy()
coordinate_dict.update(subcatchment_dict)
# Create a WKT polyline of routing between subcatchments
subcatchment_df['wktcolumn'] = 'LINESTRING(' + \
subcatchment_df['Name'].map(
coordinate_dict).map(str) + ',' + \
subcatchment_df['OutID'].map(
coordinate_dict).map(str) + ')'
# Convert to geodatabase for routing
geometry = subcatchment_df['wktcolumn'].map(shapely.wkt.loads)
subcatchment_df = subcatchment_df.drop('wktcolumn', axis=1)
subcatchment_df = subcatchment_df.drop('Rgage', axis=1)
subcatchment_df = subcatchment_df.drop('Area', axis=1)
subcatchment_df = subcatchment_df.drop('Imperv_pct', axis=1)
subcatchment_df = subcatchment_df.drop('Width', axis=1)
subcatchment_df = subcatchment_df.drop('Slope', axis=1)
subcatchment_df = subcatchment_df.drop('Clength', axis=1)
subcatchment_df = subcatchment_df.drop('landuse', axis=1)
if 'SPack' in subcatchment_df.columns:
subcatchment_df = subcatchment_df.drop('SPack', axis=1)
routing_gdf = gpd.GeoDataFrame(subcatchment_df, crs=crs, geometry=geometry)
routing_gdf.rename(index=str, columns={"Name": "from", "OutID": "to"},
inplace=True)
# Save subcatchments as shapefile
subcatchment_gdf = subcatchment_gdf.drop('centroid', axis=1)
subcatchment_gdf.to_file(os.path.splitext(sys.argv[1])[0] +
'_subcatchments.shp', driver='ESRI Shapefile')
print('Saved subcatchments to ' + os.path.splitext(sys.argv[1])[0] +
'_subcatchments.shp')
# Save subcatchment routing as shapefile
routing_gdf.to_file(os.path.splitext(sys.argv[1])[0] +
'_subcatchment_routing.shp', driver='ESRI Shapefile')
print('Saved subcatchment routing to ' + os.path.splitext(sys.argv[1])[0] +
'_subcatchment_routing.shp')
|
{"hexsha": "dd2585f0fe32a6a8fa0a7fbcba7dc2ea9f07c843", "size": 13702, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/inp2gis.py", "max_stars_repo_name": "AaltoUAW/GisToSWMM5", "max_stars_repo_head_hexsha": "b3435006e084a14f6f0f325c0962a4c2a5213559", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2017-11-20T19:14:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T10:00:31.000Z", "max_issues_repo_path": "utils/inp2gis.py", "max_issues_repo_name": "AaltoUAW/GisToSWMM5", "max_issues_repo_head_hexsha": "b3435006e084a14f6f0f325c0962a4c2a5213559", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-04-23T15:48:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-15T13:48:35.000Z", "max_forks_repo_path": "utils/inp2gis.py", "max_forks_repo_name": "AaltoUAW/GisToSWMM5", "max_forks_repo_head_hexsha": "b3435006e084a14f6f0f325c0962a4c2a5213559", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2018-07-24T18:46:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T14:56:17.000Z", "avg_line_length": 41.5212121212, "max_line_length": 79, "alphanum_fraction": 0.5242300394, "include": true, "reason": "import numpy", "num_tokens": 2992}
|
% ClientServerProtocol.tex: Sedna Client/Server Protocol
% Copyright (C) 2010 ISP RAS
% The Institute for System Programming of the Russian Academy of Sciences
\documentclass[a4paper,12pt]{article}
\usepackage{alltt} % Like verbatim but supports commands inside
\usepackage{theorem}
\newtheorem{note}{Note} % To insert notes
\usepackage{multirow} % Allows inserting tables
\usepackage{ifpdf} % Package for conditionals in TeX
\newcommand{\TocAt}[6]{} % To avoid processing \TocAt by LaTeX
\title{Sedna Native XML Database Client/Server Protocol}
\date{}
% Switch for between PDF and other formats to generate bookmarks,
% pretty table of contents and set document's information in PDF
\ifpdf
\usepackage[colorlinks=true, linkcolor=blue,
citecolor=blue, urlcolor=blue,
pdftex, %%% hyper-references for pdflatex
bookmarks=true, %%% generate bookmarks ...
bookmarksnumbered=true %%% ... with numbers
]{hyperref}
\pdfadjustspacing=1
\hypersetup{
pdfauthor = {Sedna Team},
pdftitle = {Sedna Native XML Database Client/Server Protocol}
}
\else
\usepackage[colorlinks=true, linkcolor=blue,
citecolor=blue, urlcolor=blue]{hyperref}
\fi
% Use citemize environment to produce tightly packed lists
\newenvironment{citemize}
{\begin{itemize}
\setlength{\itemsep}{0pt}
\setlength{\parskip}{0pt}
\setlength{\parsep}{0pt}}
{\end{itemize}}
%===============================================================================
% Sedna Client/Server Protocol: Introduction
%===============================================================================
\begin{document}
\sloppy
\maketitle
\TocAt*{section,subsection,subsubsection}
\TocAt*{subsection,subsubsection}
\tableofcontents
\newpage
\section{Introduction}
This document describes details of the message-based protocol Sedna XML Database
server uses for communication with clients through the TCP/IP sockets. Higher
level application programming interfaces are built over this protocol.
This document describes versions 1.0, 2.0, 3.0 and 4.0 of the protocol. It
consists of three parts: section \ref{sec:MessageStructure} describes the basic
message structure, section \ref{sec:MessageFlow} defines communication protocol
and \ref{sec:MessageFormats}-th section describes the detailed format of each
message.
%===============================================================================
% Sedna Client/Server Protocol: Known Implementations
%===============================================================================
\subsection{Known Implementations}
The known implementations of the protocol include:
\begin{itemize}
\item \href{http://www.xqj.net/sedna/}{XQJ Driver} by Charles Foster
(version 4.0);
\item \href{http://www.cfoster.net/sedna/}{XML:DB API Driver} by Charles Foster
(version 4.0);
\item Java Driver included in distribution (version 2.0);
\item Scheme Driver included in distribution (version 2.0);
\item Terminal (\verb!se_term!) included in distribution (version 3.0).
\end{itemize}
\newpage
%===============================================================================
% Sedna Client/Server Protocol: Message Structure
%===============================================================================
\section{Message Structure}
\label{sec:MessageStructure}
In messages values of the following three data types are used:
\begin{enumerate}
\item\verb!byte! -- one byte;
\item\verb!int! -- four bytes presented in the network byte order (most
significant byte first);
\item\verb!string! -- has the following structure: the first byte identifies the
string format, the next four bytes (int) specify the length of the string in
bytes and the next 'length' bytes is the string. The only supported string
format is C-string without trailing null character. The first byte equal zero
identifies C-string.
\end{enumerate}
The common message structure is as follows:
\begin{citemize}
\item the first four bytes (int) is instruction;
\item the next four bytes (int) is the length of a body in bytes;
\item the next 'length' bytes is the body.
\end{citemize}
The body of the message is determined by the instruction. In general the body of
all messages is a sequence of values of the three types listed above. The
position of a value in the sequence determines its meaning.
In the current version of Sedna the size of the message body must not exceed
10240 bytes.
\newpage
%===============================================================================
% Message Flow: Start-Up
%===============================================================================
\section{Message Flow}
\label{sec:MessageFlow}
\subsection{Start-Up}
To begin a session, a client creates a connection to the server and sends a
startup message. The server launches a new process that is associated with the
session. If launching a new process causes some errors, the server sends the
\verb!se_ErrorResponse! message, if not, it sends the
\verb!se_SendSessionParameters! message to the client. Then client sends the
session parameters in the \verb!se_SessionParameters! message. This message
includes the particular protocol version to be used, the name of the user and of
the database the user wants to connect to. The server then uses this information
to determine whether the connection is acceptable. If not, it sends the error
message (\verb!se_ErrorResponse!). If the connection is acceptable the server
then sends an authentication request message, to which the client must reply
with an appropriate authentication response message. In principle the
authentication request/response cycle could require multiple iterations, but the
present authentication method uses exactly one request
(\verb!se_SendAuthParameters!) and response
(\verb!se_AuthenticationParameters!). The authentication cycle ends with the
server either rejecting the connection attempt (\verb!se_AuthenticationFailed!),
or sending \verb!se_AuthenticationOk!.
The possible instructions from the client in this phase are:
\begin{citemize}
\item \verb!se_Start-Up!. Does not contain the body.
\item \verb!se_SessionParameters!. The body contains the protocol version, user
name and db name.
\item \verb!se_AuthenticationParameters!. The body contains password.
\end{citemize}
The possible instructions from the server in this phase are:
\begin{citemize}
\item \verb!se_SendSessionParameters!. Does not contain the body.
\item \verb!se_SendAuthParameters!. Does not contain the body.
\item \verb!se_AuthenticationOK!. Does not contain the body.
\item \verb!se_AuthenticationFailed!. Body contains info.
\item \verb!se_ErrorResponse!. Body contains info.
\end{citemize}
%===============================================================================
% Message Flow: Transactions
%===============================================================================
\subsection{Transactions}
After the start-up phase has succeeded and session is begun, client can run zero
or more transactions in the session.
Transactions must be run sequentially, that is the client must commit a
transaction before starting a new one.
To begin a transaction client sends the \verb!se_BeginTransaction! message. If
the transaction begins Ok, the server answers \verb!se_BeginTransactionOk!. If
the transaction fails to begin, the server answers
\verb!se_BeginTransactionFailed!.
To commit the transaction client sends the \verb!se_CommitTransaction! message.
If the transaction commits Ok, the server answers \verb!se_CommitTransactionOk!.
If the transaction fails to commit, the server does rollback for the transaction
and answers \verb!se_CommitTransactionFailed!.
To rollback the transaction client sends the \verb!se_RollbackTransaction!
message. If the transaction rollbacks Ok, the server answers
\verb!se_RollbackTransactionOk!. If the transaction failed to rollback, the
server sends \verb!se_RollbackTransactionFailed! and closes the session.
The possible instructions from the client in this phase are:
\begin{citemize}
\item \verb!se_BeginTransaction!. Does not contain a body.
\item \verb!se_CommitTransaction!. Does not contain a body.
\item \verb!se_RollbackTransaction!. Does not contain a body.
\end{citemize}
The possible instructions from the server in this phase are:
\begin{citemize}
\item \verb!se_BeginTransactionOk!. Does not contain a body.
\item \verb!se_BeginTransactionFailed!. The body contains the error code and
error info.
\item \verb!se_CommitTransactionOk!. Does not contain a body.
\item \verb!se_CommitTransactionFailed!. The body contains the error code and
error info.
\item \verb!se_RollbackTransactionOk!. Does not contain a body.
\item \verb!se_RollbackTransactionFailed!. The body contains the error code and
error info.
\item \verb!se_ErrorResponse!. Body contains info.
\end{citemize}
%===============================================================================
% Message Flow: Session Options
%===============================================================================
\subsection{Session Options}
Since version 3.0 of the Sedna Client-Server protocol it is possible to set
session options.
There are a number of session options. Session options can be set at any moment
during the session except the period when session is in a query evaluation phase
(executing or passing result data to a client).
To set one or more options client must send \verb!se_SetSessionOptions! message.
If options were set successfully server sends \verb!se_SetSessionOptionsOk!
message to the client. Otherwise server sends \verb!se_ErrorResponse! to the
client.
To reset options to their default values client must send
\verb!se_ResetSessionOptions!. If options were reset successfully server sends
\verb!se_ResetSessionOptionsOk! message to the client. Otherwise server sends
\verb!se_ErrorResponse! to the client.
The possible instructions from client are:
\begin{citemize}
\item \verb!se_SetSessionOptions!. The body contains any number of pairs: option
id followed by option value. Option id is int, option value is string.
\item \verb!se_ResetSessionOptions!. Does not contain a body.
\end{citemize}
The possible instructions from the server are:
\begin{citemize}
\item \verb!se_SetSessionOptionsOk!. Does not contain a body.
\item \verb!se_ResetSessionOptionsOk!. Does not contain a body.
\item \verb!se_ErrorResponse!. Body contains info.
\end{citemize}
Possible option ids:
\begin{citemize}
\item \verb!SEDNA_DEBUG_OFF! -- turns off query debug mode. Query debug mode is
off be default. (See "Debug features" section of the Sedna Programmer's Guide
for details.) This option does not contain a value (there must be a string of
zero length in a message body);
\item \verb!SEDNA_DEBUG_ON! -- turns on query debug mode. This option does not
contain a value (there must be a string of zero length in a message body).
\end{citemize}
%===============================================================================
% Message Flow: Query Execution
%===============================================================================
\subsection{Query Execution}
Queries are executed via different subprotocols depending on the type of the
query and the query length. There are three types of queries: query, update,
bulk load.
If the query is not longer than the message body (10240 bytes) the client sends
the Execute message that contains a query statement to the server. If the query
is longer than the message body the client must send the query in parts: each
part is in the body of \verb!se_ExecuteLong! message. After all parts of the
query are sent, client must send \verb!se_LongQueryEnd! message (with the empty
body), thus to make the server understand that the whole query is sent.
The server analyzes the query text to identify the type of the query and runs
the corresponding subprotocol. The following sections describe these
subprotocols.
\subsubsection{Querying Data }
The client sends a query in the \verb!se_Execute! message, or in the
\verb!se_ExecuteLong! messages plus the \verb!se_LongQueryEnd! message. The
first byte of the query text is a format of the result to obtain - SXML
\cite{paper:sxml} or XML. Use \verb!0! if you want to get the result of the
query in XML, \verb!1! - to get the result in SXML.
\begin{note}
Since Protocol Version 4.0 it is client's task to indent and pretty print
output.
\end{note}
The server processes the query. If the query succeeds the server sends the
\verb!se_QuerySucceeded! message to the client and then sends a number of
messages that contain the first item of the result sequence and query debug
information (if any) to the client in the way described below.
When the client needs to get the next item of the result sequence it sends the
\verb!se_GetNextItem! message. The server then sends the next item of the result
sequence.
The way server sends items depends on the protocol version:
\textbf{Protocol Versions 1.0, 2.0, 3.0:} Server may send every item in parts.
Every part of the item is enveloped in the \verb!se_ItemPart! message. When the
whole item is sent, the server sends the \verb!se_ItemEnd! message or
\verb!se_ResultEnd! message if it was the last item.
\textbf{Protocol Version 4.0:} Server sends items in the following way.
\verb!se_ItemStart! message is sent in the first place. It contains type and
content of the item being sent. If content is too long to be sent within one
message, server may send it in parts. Every part of the item is enveloped in the
\verb!se_ItemPart! message. When the whole item is sent, the server sends the
\verb!se_ItemEnd! message or \verb!se_ResultEnd! message if it was the last
item.
When the result sequence has ended, server on receiving \verb!se_GetNextItem!
from client sends the ResultEnd message without prior sending the
\verb!se_ItemPart! message.
While sending result data server may also send any number of messages
\verb!se_DebugInfo! containing debug information if there is any.
\begin{note}
Client debug information is supported in Sedna Client/Server Protocol since
version 2.0.
\end{note}
Client is not required to get all the items of the result sequence. It can send
next query for execution before all the items of the result sequence are
received from the server.
If the query failed, the server sends message \verb!se_QueryFailed! to the client.
The possible instructions from the client in this phase are:
\begin{citemize}
\item \verb!se_Execute!. The body contains an XQuery query text.
\item \verb!se_ExecuteLong!. The body contains a part of a long XQuery query.
\item \verb!se_LongQueryEnd!. Does not contain a body.
\item \verb!se_GetNextItem!. Does not contain a body.
\end{citemize}
The possible instructions from the server in this phase are:
\begin{citemize}
\item \verb!se_QuerySucceeded!. Does not contain a body.
\item \verb!se_QueryFailed!. The body contains the error code and error info.
\item \verb!se_DebugInfo!. The body contains the debug type and debug info.
(Since version 2.0)
\item \verb!se_ItemStart!. The body contains type of the item being sent and the
part of the item. (Since version 4.0)
\item \verb!se_ItemPart!. The body contains the part of the item.
\item \verb!se_ItemEnd!. Does not contain a body.
\item \verb!se_ErrorResponse!. Body contains info.
\end{citemize}
\subsubsection{Updating Data}
The client sends the \verb!se_Execute! message (or the \verb!se_ExecuteLong!
messages plus the \verb!se_LongQueryEnd! message) that contains an update
statement. The server processes the update. If the update succeeded the server
sends the \verb!se_UpdateSucceeded! message to the client. If the update failed,
the server sends the \verb!se_UpdateFailed! message.
Before sending \verb!se_UpdateSucceeded! or \verb!se_UpdateFailed! message sever
may send any number of \verb!se_DebugInfo! messages if there is any debug
information.
The possible instructions from the client in this phase are:
\begin{citemize}
\item \verb!se_Execute!. The body contains an update statement.
\item \verb!se_ExecuteLong!. The body contains a part of a long XQuery query.
\item \verb!se_LongQueryEnd!. Does not contain a body.
\end{citemize}
The possible instructions from the server in this phase are:
\begin{citemize}
\item \verb!se_UpdateSucceeded!. Does not contain a body.
\item \verb!se_UpdateFailed!. The body contains the error code and error info.
\item \verb!se_DebugInfo!. The body contains the debug type and debug info.
\item \verb!se_ErrorResponse!. Body contains info.
\end{citemize}
\subsubsection{Bulk Load}
The client sends the \verb!se_Execute! message that contains a bulk load
statement. The server picks out the name of the file and sends the
\verb!se_BulkLoadFileName! message that contains the name.
Since version 2.0 of the Sedna Client-Server protocol server can send multiple
\verb!se_BulkLoadFileName! messages if there were multiple file names in a
query. This currently can happen in \verb!LOAD MODULE! statement.
The client reads the file specified. If there is no such file or some access
errors occur, the client sends \verb!se_BulkLoadError! message to the server.
Else the client transfers the data from the file to the server by portions. Each
portion is sent in the \verb!se_BulkLoadPortion! message.
When the whole file is sent, the client sends the \verb!se_BulkLoadEnd! message.
The server answers with the \verb!se_BulkLoadSucceeded! or
\verb!se_BulkLoadFailed! message.
The possible instructions from the client in this phase are:
\begin{citemize}
\item \verb!se_Execute!. The body contains a query for bulk load.
\item \verb!se_BulkLoadError!. Does not contain the body.
\item \verb!se_BulkLoadPortion!. The body contains portion of data.
\item \verb!se_BulkLoadEnd!. Does not contain the body.
\end{citemize}
The possible instructions from the server in this phase are:
\begin{citemize}
\item \verb!se_BulkLoadFileName!. The body contains file name.
\item \verb!se_BulkLoadSucceeded!. Does not contain the body.
\item \verb!se_BulkLoadFailed!. The body contains the error code and error info.
\item \verb!se_ErrorResponse!. Body contains info.
\end{citemize}
%===============================================================================
% Message Flow: Termination
%===============================================================================
\subsection{Termination}
Termination can be initiated by the client (for example when it closed the
session) or by the server (for example in case of an administrator-commanded
database shutdown or some failure).
The normal termination procedure is that the client closes the session after
transaction commit. In this case the client sends the \verb!se_CloseConnection!
message. The server processes its closing procedure. If no errors occur the
server sends the \verb!se_CloseConnectionOk! message and closes the connection.
If the client sends the \verb!se_CloseConnection! message before committing the
on-going transaction, the server does rollback for the transaction, sends the
\verb!se_TransactionRollbackBeforeClose! message and closes the connection.
If on receipt of the \verb!se_CloseConnection! message, some errors on server
occur the server sends the \verb!se_ErrorResponse! message and closes.
While an administrator-commanded database shutdown or some failure occurs the
server may disconnect without any client request to do so. Before closing the
connection the server sends the \verb!se_ErrorResponse! message that contains
error code and error info.
The possible instructions from the client in this phase are:
\begin{citemize}
\item \verb!se_CloseConnection!. Does not contain the body.
\end{citemize}
The possible instructions from the server in this phase are:
\begin{citemize}
\item \verb!se_CloseConnectionOk!. Does not contain the body.
\item \verb!se_TransactionRollbackBeforeClose!. Does not contain the body.
\item \verb!se_ErrorResponse!. Body contains info.
\end{citemize}
%===============================================================================
% Message Flow: Server Error Handling
%===============================================================================
\subsection{Server Error Handling}
In all phases of client/server interaction an error can occur on the server. In
this case the server answers to a client request message by sending the
\verb!se_ErrorResponse! message that contains the error code and the error info.
\begin{citemize}
\item \verb!se_ErrorResponse!. The body contains the error code and the error
info.
\end{citemize}
\newpage
%===============================================================================
% Message Formats
%===============================================================================
\section{Message Formats}
\label{sec:MessageFormats}
This section describes the detailed format of each message. Each is marked to
indicate that it may be sent either by a client (C), or a server (S).
\begin{verbatim}
se_Start-Up (C).
head:
110 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_SessionParameters (C).
head:
120 (int)
body length (int)
body:
major protocol version number (byte);
minor protocol version number (byte);
user name (string);
database name (string);
\end{verbatim}
\begin{verbatim}
se_AuthenticationParameters (C).
head:
130 (int)
body length (int)
body:
password (string)
\end{verbatim}
\begin{verbatim}
se_SendSessionParameters (S).
head:
140 (int)
body length = 0 (int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_SendAuthParameters (S).
head:
150 (int)
body length = 0 (int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_AuthenticationOK (S).
head:
160 (int)
body length = 0 (int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_AuthenticationFailed (S).
head:
170 (int)
body length (int)
body:
error code (int)
error info (string)
\end{verbatim}
\begin{verbatim}
se_ErrorResponse (S).
head:
100 (int)
body length (int)
body:
error code (int)
error info (string)
\end{verbatim}
\begin{verbatim}
se_BeginTransaction (C).
head:
210 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_CommitTransaction (C).
head:
220 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_RollbackTransaction (C).
head:
225 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_BeginTransactionOk (S).
head:
230 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_BeginTransactionFailed (S).
head:
240 (int)
body length (int)
body:
error code (int)
error info (string)
\end{verbatim}
\begin{verbatim}
se_CommitTransactionOk (S).
head:
250 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_CommitTransactionFailed (S).
head:
260 (int)
body length (int)
body:
error code (int)
error info (string)
\end{verbatim}
\begin{verbatim}
se_RollbackTransactionOk (S).
head:
255 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_RollbackTransactionFailed (S).
head:
265 (int)
body length (int)
body:
error code (int)
error info (string)
\end{verbatim}
\begin{verbatim}
se_Execute (C).
head:
300 (int)
body length (int)
body:
result format (byte) + query text (string)
\end{verbatim}
\begin{verbatim}
se_ExecuteLong (C).
head:
301 (int)
body length (int)
body:
result format (byte) + query text (string)
\end{verbatim}
\begin{verbatim}
se_LongQueryEnd (C).
head:
302 (int)
body length = 0 (int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_GetNextItem (C).
head:
310 (int)
body length = 0 (int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_QuerySucceeded (S).
head:
320 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_DebugInfo (S).
head:
325 (int)
body length (int)
body:
debug type (int)
debug info (string)
\end{verbatim}
\begin{verbatim}
se_QueryFailed (S).
head:
330 (int)
body length (int)
body:
error code (int)
error info (string)
\end{verbatim}
\begin{verbatim}
se_UpdateSucceeded (S).
head:
340 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_UpdateFailed (S).
head:
350 (int)
body length (int)
body:
error code (int)
error info (string)
\end{verbatim}
\begin{verbatim}
se_ItemStart (S).
head:
355 (int)
body length (int)
body:
item class (byte) - see below possible values of this field
item type (byte) - see below possible values of this field
URL flag (byte) - either 0 or 1, determines if URL field is empty
[URL (string)] - optional, URL of the item
result part (string)
\end{verbatim}
\begin{verbatim}
Item class enumeration (see se_ItemStart message )is defined as follows:
enum se_item_class {
se_atomic = 1, //item type defines atomic type
se_document = 2,
se_element = 3, //item type defines atomic type
se_attribute = 4, //item type defines atomic type
se_namespace = 5,
se_pi = 6,
se_comment = 7,
se_text = 8
};
\end{verbatim}
\begin{verbatim}
Item type enumeration (see se_ItemStart message )is defined as follows:
enum se_item_type {
/* Abstract base types */
se_anyType = 0,
se_anySimpleType = 1,
se_anyAtomicType = 2,
/* Built-in simple, non-atomic types */
se_IDREFS = 3,
se_NMTOKENS = 4,
se_ENTITIES = 5,
/* Built-in complex types */
se_untyped = 6,
/* Built-in atomic types (Primitive types) */
se_dateTime = 10,
se_date = 11,
se_time = 12,
se_duration = 13,
se_yearMonthDuration = 14,
se_dayTimeDuration = 15,
se_gYearMonth = 16,
se_gYear = 17,
se_gMonthDay = 18,
se_gDay = 19,
se_gMonth = 20,
se_float = 21,
se_double = 22,
se_decimal = 23,
se_integer = 24,
se_boolean = 25,
se_untypedAtomic = 26,
se_string = 27,
se_base64Binary = 28,
se_hexBinary = 29,
se_anyURI = 30,
se_QName = 31,
se_NOTATION = 32,
/* Types derived from xs:string */
se_normalizedString = 41,
se_token = 42,
se_language = 43,
se_NMTOKEN = 44,
se_Name = 45,
se_NCName = 46,
se_ID = 47,
se_IDREF = 48,
se_ENTITY = 49,
/* Types derived from xs:integer */
se_nonPositiveInteger = 50,
se_negativeInteger = 51,
se_long = 52,
se_int = 53,
se_short = 54,
se_byte = 55,
se_nonNegativeInteger = 56,
se_unsignedLong = 57,
se_unsignedInt = 58,
se_unsignedShort = 59,
se_unsignedByte = 60,
se_positiveInteger = 61
};
\end{verbatim}
\begin{verbatim}
se_ItemPart (S).
head:
360 (int)
body length (int)
body:
result part (string)
\end{verbatim}
\begin{verbatim}
se_ItemEnd (S).
head:
370 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_ResultEnd (S).
head:
375 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_BulkLoadError (C).
head:
400 (int)
body length (int)
body:
error code (int)
error info (string)
\end{verbatim}
\begin{verbatim}
se_BulkLoadPortion (C).
head:
410 (int)
body length (int)
body:
data portion (string)
\end{verbatim}
\begin{verbatim}
se_BulkLoadEnd (C).
head:
420 (int)
body length = 0 (int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_BulkLoadFileName (S).
head:
430 (int)
body length (int)
body:
file name (string)
\end{verbatim}
\begin{verbatim}
se_BulkLoadFromStream (S).
head:
431 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_BulkLoadSucceeded (S).
head:
440 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_BulkLoadFailed (S).
head:
450 (int)
body length(int)
body:
error code (int)
error info (string)
\end{verbatim}
\begin{verbatim}
se_ShowTime (C).
head:
451 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_LastQueryTime (S).
head:
452 (int)
body length(int)
body:
time (string)
\end{verbatim}
\begin{verbatim}
se_CloseConnection (C).
head:
500 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_CloseConnectionOk (S).
head:
510 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_TransactionRollbackBeforeClose (S).
head:
520 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_SetSessionOptions (C).
head:
530 (int)
body length (int)
body:
any number of pairs: option id (int), option value (string)
\end{verbatim}
\begin{verbatim}
se_SetSessionOptionsOk (S).
head:
540 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_ResetSessionOptions (C).
head:
550 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{verbatim}
se_ResetSessionOptionsOk (S).
head:
560 (int)
body length = 0(int)
body:
empty
\end{verbatim}
\begin{thebibliography}{1}
\bibitem{paper:sxml}
Oleg Kiselyov.
``SXML Specification, Revision 3.0'',
\url{http://www.okmij.org/ftp/Scheme/SXML.html}
\end{thebibliography}
\end{document}
|
{"hexsha": "d49460c442762d4e470bebea89b85586f69a81c6", "size": 30119, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/ClientServerProtocol/ClientServerProtocol.tex", "max_stars_repo_name": "TonnyRed/sedna", "max_stars_repo_head_hexsha": "06ff5a13a16f2d820d3cf0ce579df23f03a59eda", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2015-09-06T10:17:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T22:39:25.000Z", "max_issues_repo_path": "doc/ClientServerProtocol/ClientServerProtocol.tex", "max_issues_repo_name": "TonnyRed/sedna", "max_issues_repo_head_hexsha": "06ff5a13a16f2d820d3cf0ce579df23f03a59eda", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-02-15T22:02:14.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-17T10:30:14.000Z", "max_forks_repo_path": "doc/ClientServerProtocol/ClientServerProtocol.tex", "max_forks_repo_name": "TonnyRed/sedna", "max_forks_repo_head_hexsha": "06ff5a13a16f2d820d3cf0ce579df23f03a59eda", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2015-07-23T05:48:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-11T05:01:39.000Z", "avg_line_length": 29.9990039841, "max_line_length": 82, "alphanum_fraction": 0.6875062253, "num_tokens": 7552}
|
using CircuitscapeUI
using Circuitscape
using Distributed
w = run_ui()
oldpwd = pwd()
cd(CircuitscapeUI.TESTPATH)
Circuitscape.runtests()
cd(oldpwd)
|
{"hexsha": "788d222ecedf3635d0d8645a01993a40dd000222", "size": 149, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "build/snoop.jl", "max_stars_repo_name": "JuliaTagBot/CircuitscapeUI.jl", "max_stars_repo_head_hexsha": "87f392d993d95ce787d84460c96ebe22c9499a3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-17T20:21:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-25T06:53:53.000Z", "max_issues_repo_path": "build/snoop.jl", "max_issues_repo_name": "JuliaTagBot/CircuitscapeUI.jl", "max_issues_repo_head_hexsha": "87f392d993d95ce787d84460c96ebe22c9499a3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-11-21T22:49:16.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-10T11:34:21.000Z", "max_forks_repo_path": "build/snoop.jl", "max_forks_repo_name": "JuliaTagBot/CircuitscapeUI.jl", "max_forks_repo_head_hexsha": "87f392d993d95ce787d84460c96ebe22c9499a3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-10-29T15:08:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:39:10.000Z", "avg_line_length": 16.5555555556, "max_line_length": 27, "alphanum_fraction": 0.8053691275, "num_tokens": 44}
|
# Copyright (C) 2019-2022, François-Guillaume Fernandez.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from typing import Callable, Dict, List
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from .. import functional as F
__all__ = ["ConcatDownsample2d", "ConcatDownsample2dJit", "GlobalAvgPool2d", "BlurPool2d", "SPP", "ZPool"]
class ConcatDownsample2d(nn.Module):
"""Implements a loss-less downsampling operation described in `"YOLO9000: Better, Faster, Stronger"
<https://pjreddie.com/media/files/papers/YOLO9000.pdf>`_ by stacking adjacent information on the channel dimension.
Args:
scale_factor (int): spatial scaling factor
"""
def __init__(self, scale_factor: int) -> None:
super().__init__()
self.scale_factor = scale_factor
def forward(self, x: Tensor) -> Tensor:
return F.concat_downsample2d(x, self.scale_factor)
@torch.jit.script # type: ignore[attr-defined]
class ConcatDownsample2dJit(object):
"""Implements a loss-less downsampling operation described in `"YOLO9000: Better, Faster, Stronger"
<https://pjreddie.com/media/files/papers/YOLO9000.pdf>`_ by stacking adjacent information on the channel dimension.
Args:
scale_factor (int): spatial scaling factor
"""
def __init__(self, scale_factor: int) -> None:
self.scale_factor = scale_factor
def __call__(self, x: Tensor) -> Tensor:
return F.concat_downsample2d(x, self.scale_factor)
class GlobalAvgPool2d(nn.Module):
"""Fast implementation of global average pooling from `"TResNet: High Performance GPU-Dedicated Architecture"
<https://arxiv.org/pdf/2003.13630.pdf>`_
Args:
flatten (bool, optional): whether spatial dimensions should be squeezed
"""
def __init__(self, flatten: bool = False) -> None:
super().__init__()
self.flatten = flatten
def forward(self, x: Tensor) -> Tensor:
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1)
def extra_repr(self) -> str:
inplace_str = "flatten=True" if self.flatten else ""
return inplace_str
def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1) -> int:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
class BlurPool2d(nn.Module):
"""Ross Wightman's `implementation
<https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/blur_pool.py>`_ of blur pooling
module as described in `"Making Convolutional Networks Shift-Invariant Again"
<https://arxiv.org/pdf/1904.11486.pdf>`_.
.. image:: https://github.com/frgfm/Holocron/releases/download/v0.1.3/blurpool.png
:align: center
Args:
channels (int): Number of input channels
kernel_size (int, optional): binomial filter size for blurring. currently supports 3 (default) and 5.
stride (int, optional): downsampling filter stride
Returns:
torch.Tensor: the transformed tensor.
"""
def __init__(self, channels: int, kernel_size: int = 3, stride: int = 2) -> None:
super().__init__()
self.channels = channels
if kernel_size <= 1:
raise AssertionError
self.kernel_size = kernel_size
self.stride = stride
pad_size = [get_padding(kernel_size, stride, dilation=1)] * 4
self.padding = nn.ReflectionPad2d(pad_size) # type: ignore[arg-type]
self._coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.kernel_size - 1)).coeffs) # for torchscript compat
self.kernel: Dict[str, Tensor] = {} # lazy init by device for DataParallel compat
def _create_filter(self, like: Tensor) -> Tensor:
blur_filter = (self._coeffs[:, None] * self._coeffs[None, :]).to(dtype=like.dtype, device=like.device)
return blur_filter[None, None, :, :].repeat(self.channels, 1, 1, 1)
def _apply(self, fn: Callable[[nn.Module], None]) -> None:
# override nn.Module _apply, reset filter cache if used
self.kernel = {}
super()._apply(fn)
def forward(self, input_tensor: Tensor) -> Tensor:
blur_filter = self.kernel.get(str(input_tensor.device), self._create_filter(input_tensor))
return nn.functional.conv2d(
self.padding(input_tensor), blur_filter, stride=self.stride, groups=input_tensor.shape[1]
)
def extra_repr(self) -> str:
return f"{self.channels}, kernel_size={self.kernel_size}, stride={self.stride}"
class SPP(nn.ModuleList):
"""SPP layer from `"Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition"
<https://arxiv.org/pdf/1406.4729.pdf>`_.
Args:
kernel_sizes (list<int>): kernel sizes of each pooling
"""
def __init__(self, kernel_sizes: List[int]) -> None:
super().__init__([nn.MaxPool2d(k_size, stride=1, padding=k_size // 2) for k_size in kernel_sizes])
def forward(self, x):
feats = [x] + [pool_layer(x) for pool_layer in self]
return torch.cat(feats, dim=1)
class ZPool(nn.Module):
"""Z-pool layer from `"Rotate to Attend: Convolutional Triplet Attention Module"
<https://arxiv.org/pdf/2010.03045.pdf>`_.
Args:
dim: dimension to pool
"""
def __init__(self, dim: int = 1) -> None:
super().__init__()
self.dim = dim
def forward(self, x: Tensor) -> Tensor:
return F.z_pool(x, self.dim)
|
{"hexsha": "629c1098b4b921842afbea6f318d4e012a4b1f53", "size": 5715, "ext": "py", "lang": "Python", "max_stars_repo_path": "holocron/nn/modules/downsample.py", "max_stars_repo_name": "frgfm/torch-zoo", "max_stars_repo_head_hexsha": "c97beacf3d49eaa34398abf47f378ea6b48a70f3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "holocron/nn/modules/downsample.py", "max_issues_repo_name": "frgfm/torch-zoo", "max_issues_repo_head_hexsha": "c97beacf3d49eaa34398abf47f378ea6b48a70f3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "holocron/nn/modules/downsample.py", "max_forks_repo_name": "frgfm/torch-zoo", "max_forks_repo_head_hexsha": "c97beacf3d49eaa34398abf47f378ea6b48a70f3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1708860759, "max_line_length": 119, "alphanum_fraction": 0.6650918635, "include": true, "reason": "import numpy", "num_tokens": 1479}
|
[STATEMENT]
lemma "\<FF> \<F> \<Longrightarrow> ci"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<FF> \<F> \<Longrightarrow> \<forall>P. contains (\<bullet>P) (\<^bold>\<not> ((\<^bold>\<and>) P\<^sup>c (\<^bold>\<not> P)))
[PROOF STEP]
nitpick
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<FF> \<F> \<Longrightarrow> \<forall>P. contains (\<bullet>P) (\<^bold>\<not> ((\<^bold>\<and>) P\<^sup>c (\<^bold>\<not> P)))
[PROOF STEP]
oops
|
{"llama_tokens": 181, "file": "Topological_Semantics_ex_LFIs", "length": 2}
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Kernel Design
#
# It's easy to make new kernels in GPflow. To demonstrate, we'll have a look at the Brownian motion kernel, whose function is
# $$
# k(x, x') = \sigma^2 \text{min}(x, x')
# $$
# where $\sigma^2$ is a variance parameter.
# %%
import gpflow
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from gpflow.utilities import print_summary, positive
plt.style.use("ggplot")
# %matplotlib inline
# %% [markdown]
# To make this new kernel class, we inherit from the base class `gpflow.kernels.Kernel` and implement the three functions below. **NOTE:** Depending on the kernel to be implemented, other classes can be more adequate. For example, if the kernel to be implemented is isotropic stationary, you can immediately subclass `gpflow.kernels.IsotropicStationary` (at which point you
# only have to override `K_r` or `K_r2`; see the `IsotropicStationary` class docstring). Stationary but anisotropic kernels should subclass `gpflow.kernels.AnisotropicStationary` and override `K_d`.
#
# #### `__init__`
# In this simple example, the constructor takes no argument (though it could, if that was convenient, for example to pass in an initial value for `variance`). It *must* call the constructor of the superclass with appropriate arguments. Brownian motion is only defined in one dimension, and we'll assume that the `active_dims` are `[0]`, for simplicity.
#
# We've added a parameter to the kernel using the `Parameter` class. Using this class lets the parameter be used in computing the kernel function, and it will automatically be recognised for optimization (or MCMC). Here, the variance parameter is initialized at 1, and constrained to be positive.
#
# #### `K`
# This is where you implement the kernel function itself. This takes two arguments, `X` and `X2`. By convention, we make the second argument optional (it defaults to `None`).
#
# Inside `K`, all the computation must be done with TensorFlow - here we've used `tf.minimum`. When GPflow executes the `K` function, `X` and `X2` will be TensorFlow tensors, and parameters such as `self.variance` behave like TensorFlow tensors as well.
#
# #### `K_diag`
# This convenience function allows GPflow to save memory at predict time. It's simply the diagonal of the `K` function, in the case where `X2` is `None`. It must return a one-dimensional vector, so we use TensorFlow's reshape command.
# %%
class Brownian(gpflow.kernels.Kernel):
def __init__(self):
super().__init__(active_dims=[0])
self.variance = gpflow.Parameter(1.0, transform=positive())
def K(self, X, X2=None):
if X2 is None:
X2 = X
return self.variance * tf.minimum(X, tf.transpose(X2)) # this returns a 2D tensor
def K_diag(self, X):
return self.variance * tf.reshape(X, (-1,)) # this returns a 1D tensor
k_brownian = Brownian()
print_summary(k_brownian, fmt="notebook")
# %% [markdown]
# We can now evaluate our new kernel function and draw samples from a Gaussian process with this covariance:
# %%
np.random.seed(23) # for reproducibility
def plotkernelsample(k, ax, xmin=0, xmax=3):
xx = np.linspace(xmin, xmax, 300)[:, None]
K = k(xx)
ax.plot(xx, np.random.multivariate_normal(np.zeros(300), K, 5).T)
ax.set_title("Samples " + k.__class__.__name__)
def plotkernelfunction(k, ax, xmin=0, xmax=3, other=0):
xx = np.linspace(xmin, xmax, 100)[:, None]
ax.plot(xx, k(xx, np.zeros((1, 1)) + other))
ax.set_title(k.__class__.__name__ + " k(x, %.1f)" % other)
f, axes = plt.subplots(1, 2, figsize=(12, 4), sharex=True)
plotkernelfunction(k_brownian, axes[0], other=2.0)
plotkernelsample(k_brownian, axes[1])
# %% [markdown]
# ## Using the kernel in a model
#
# Because we've inherited from the `Kernel` base class, this new kernel has all the properties needed to be used in GPflow. It also has some convenience features such as allowing the user to call
#
# `k(X, X2)`
#
# which computes the kernel matrix.
#
# To show that this kernel works, let's use it inside GP regression. We'll see that Brownian motion has quite interesting properties. To add a little flexibility, we'll add a `Constant` kernel to our `Brownian` kernel, and the `GPR` class will handle the noise.
# %%
np.random.seed(42)
X = np.random.rand(5, 1)
Y = np.sin(X * 6) + np.random.randn(*X.shape) * 0.001
k1 = Brownian()
k2 = gpflow.kernels.Constant()
k = k1 + k2
m = gpflow.models.GPR((X, Y), kernel=k)
# m.likelihood.variance.assign(1e-6)
@tf.function
def objective():
return -m.log_marginal_likelihood()
opt = gpflow.optimizers.Scipy()
opt.minimize(objective, variables=m.trainable_variables)
print_summary(m, fmt="notebook")
xx = np.linspace(0, 1.1, 100).reshape(100, 1)
mean, var = m.predict_y(xx)
plt.plot(X, Y, "kx", mew=2)
(line,) = plt.plot(xx, mean, lw=2)
_ = plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color=line.get_color(),
alpha=0.2,
)
# %% [markdown]
# ## See also
#
# For more details on how to manipulate existing kernels (or the one you just created!), we refer to the [Manipulating kernels](../advanced/kernels.ipynb) notebook.
|
{"hexsha": "e8338c1efebee55c77bff5ef6061c9f7e79772e8", "size": 5502, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/source/notebooks/tailor/kernel_design.pct.py", "max_stars_repo_name": "christabella/GPflow", "max_stars_repo_head_hexsha": "30824d289f8ee3f58d4249238c8b7267e6a0b2fc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/source/notebooks/tailor/kernel_design.pct.py", "max_issues_repo_name": "christabella/GPflow", "max_issues_repo_head_hexsha": "30824d289f8ee3f58d4249238c8b7267e6a0b2fc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/source/notebooks/tailor/kernel_design.pct.py", "max_forks_repo_name": "christabella/GPflow", "max_forks_repo_head_hexsha": "30824d289f8ee3f58d4249238c8b7267e6a0b2fc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4755244755, "max_line_length": 373, "alphanum_fraction": 0.6997455471, "include": true, "reason": "import numpy", "num_tokens": 1519}
|
#import libraries
import math
#import matplotlib
import numpy
#import sympy
import subprocess as sub
#import sys
import time
import keyboard
#define variables
res=""
exe=""
cmd=""
timsl=0.25
lcmd=""
#define redirections
cmred={"quit":"m0","basic":"m1","math":"m2","numpy":"m3","emulationstation":"m0","(":"m0",")":"m0","execute":"m0"}
redba={"m1":"m0","m2":"m0","m3":"m0","m4":"m1"}
rednx={"m1":"m4"}
#define menu
mn="m0"
m0="// / / / //\n// quit / math / matplotlib / numpy //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// sympy / pyquim / pyfis / basic //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// none / none / none / none //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// execute / delete / startx / retropie //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////"
m1="// / / / //\n// back / 1 / 2 / 3 //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// + / 4 / 5 / 6 //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// - / 7 / 8 / 9 //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// next / / / 0 / * //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////"
m2="// / / / //\n// back / sqrt / pow / log //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// cos / acos / fabs / factorial //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// sin / asin / pi / e //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// tan / atan / none / none //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////"
m3="// / / / //\n// back / expand / factors / symbols //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// limit / solve / subs / symplify //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// evalf / init_session / sqrt / none //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// cos / sin / tg / none //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////"
m4="// / / / //\n// back / ( / ) / , //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// // / % / ** / ; //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// none / none / none / none //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////\n// / / / //\n// none / none / none / none //\n// / / / //\n///////////////////////////////////////////////////////////////////////////////"
#define key menu
d0={"1":"quit","2":"math","3":"","4":"numpy","q":"","w":"","e":"","r":"basic","a":"","s":"","d":"","f":"","z":"execute","x":"delete","c":"startx","v":"emulationstation"}
d1={"1":"back","2":"1","3":"2","4":"3","q":"+","w":"4","e":"5","r":"6","a":"-","s":"7","d":"8","f":"9","z":"next","x":"/","c":"0","v":"*"}
d2={"1":"back","2":".sqrt(","3":".pow(","4":".log(","q":".cos(","w":".acos(","e":".fabs(","r":".factorial(","a":".sin(","s":".asin(","d":".pi","f":".e","z":".tan(","x":".atan(","c":"","v":""}
d3={"1":"back","2":"","3":".factors(","4":".symbols(","q":".limit(","w":".solve(","e":".subs(","r":".symplify(","a":".evalf(","s":".init_sesion(","d":".sqrt(","f":"","z":".cos(","x":".sin(","c":".tg(","v":""}
d4={"1":"back","2":"(","3":")","4":",","q":"//","w":"%","e":"**","r":";","a":"","s":"","d":"","f":"","z":"","x":"","c":"","v":""}
#menu dictionary
dmn={"m0":m0,"m1":m1,"m2":m2,"m3":m3,"m4":m4}
#menu print
def showmenu(menu, comand, result):
lngcmd=len(comand)
lngres=len(str(result))
if lngcmd<35 and lngcmd%2!=0 or lngcmd==32:
comand=str((int((35-lngcmd)/2)-1)*" ")+comand+str((int((35-lngcmd)/2)-1)*" ")
if lngcmd==32:
comand+=" "
elif lngcmd<35 and lngcmd%2==0 and lngcmd!=34:
comand=str((int((36-lngcmd)/2)-2)*" ")+comand+str((int((35-lngcmd)/2))*" ")
if lngcmd>35 or lngcmd==33 or lngcmd==34 or lngcmd==35:
comand=comand[(lngcmd-33):]
if lngres<35 and lngres%2!=0 or lngres==32:
result=str((int((35-lngres)/2)-1)*" ")+str(result)+str((int((35-lngres)/2)-1)*" ")
if lngres==32:
result+=" "
elif lngres<35 and lngres%2==0 and lngres!=34:
result=str((int((35-lngres)/2)-1)*" ")+str(result)+str(int((35-lngres)/2)*" ")
if lngres>35 or lngcmd==33 or lngcmd==34 or lngcmd==35:
result=str(result)[(lngres-33):]
print("///////////////////////////////////////////////////////////////////////////////")
print("///////////////////////////////////////////////////////////////////////////////")
print("///////////////////////////////////////////////////////////////////////////////")
print("///////////////////////////////////////////////////////////////////////////////")
print("// / //")
print("// ",comand," / ",result," //")
print("// / //")
print("///////////////////////////////////////////////////////////////////////////////")
print(dmn[menu])
print("///////////////////////////////////////////////////////////////////////////////")
print("///////////////////////////////////////////////////////////////////////////////")
print("///////////////////////////////////////////////////////////////////////////////")
print("///////////////////////////////////////////////////////////////////////////////")
#helper for presses
def press(string):
for key in string:
keyboard.press(key)
time.sleep(0.001)
keyboard.release(key)
#main execution
qut=False
while qut!=True:
showmenu(mn,cmd,res)
res=""
key=keyboard.read_key()
# key=input()
# key=str(sys.stdin.read(1))
sub.call("clear")
time.sleep(timsl)
lng=len(cmd)
try:
if mn=="m0":
cmd+=d0[key]
if d0[key]!="delete":
lcmd+=str(len(d0[key]))
elif mn=="m1":
cmd+=d1[key]
lcmd+=str(len(d1[key]))
elif mn=="m2":
cmd+=d2[key]
lcmd+=str(len(d2[key]))
elif mn=="m3":
cmd+=d3[key]
lcmd+=str(len(d3[key]))
elif mn=="m4":
cmd+=d4[key]
lcmd+=str(len(d4[key]))
except:
mn="m0"
lcmd=lcmd.replace("0","")
try:
if mn=="m0":
fnd=cmd[lng:]
mn=cmred[fnd]
except:
mn="m0"
if (cmd.find("emulationstation")>=0):
sub.call(["su", "pi", "-c", "emulationstation"])
sub.call("clear")
if (cmd.find("startx")>=0):
sub.call("lightdm")
keyboard.send('ctrl+alt+F1')
sub.call("clear")
if (cmd.find("back")>=0):
mn=redba[mn]
if (cmd.find("next")>=0):
mn=rednx[mn]
if (cmd.find("execute")>=0):
exe="res="+cmd[:lng]
if (cmd.find("quit")>=0):
qut=True
if (cmd.find("delete")>=0):
try:
cmd=cmd.replace("delete","")
cmd=cmd[:-(int(lcmd[-2]))]
lcmd=lcmd[:-1]
except:
cmd=cmd.replace("delete","")
cmd=cmd.replace("basic","")
cmd=cmd.replace("back","")
cmd=cmd.replace("emulationstation","")
cmd=cmd.replace("execute","")
cmd=cmd.replace("next","")
cmd=cmd.replace("startx","")
try:
exec(exe,globals(),locals())
exe=""
except:
cmd=""
exe=""
#exiting routine
sub.call("clear")
quit()
|
{"hexsha": "b2f009598f616ccf2fb8ce54b79c6785ba40e9b6", "size": 11980, "ext": "py", "lang": "Python", "max_stars_repo_path": "Raspberry/Interface/main.old.py", "max_stars_repo_name": "miguiss27/Calculadora", "max_stars_repo_head_hexsha": "f4f488abd156b22082f425200291d9f27c6dff54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Raspberry/Interface/main.old.py", "max_issues_repo_name": "miguiss27/Calculadora", "max_issues_repo_head_hexsha": "f4f488abd156b22082f425200291d9f27c6dff54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Raspberry/Interface/main.old.py", "max_forks_repo_name": "miguiss27/Calculadora", "max_forks_repo_head_hexsha": "f4f488abd156b22082f425200291d9f27c6dff54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 62.0725388601, "max_line_length": 1299, "alphanum_fraction": 0.2131051753, "include": true, "reason": "import numpy,import sympy", "num_tokens": 2457}
|
import argparse
import os
import numpy as np
from sklearn.model_selection import train_test_split
def read_sentence_data(gold_sent_fh):
gold_scores = [float(line.strip()) for line in open(gold_sent_fh, 'r')]
return gold_scores
def read_data(fname):
data = [line.strip() for line in open(fname, 'r')]
return data
def save_data(fname, data):
f = open(fname, 'w', encoding='utf8')
for line in data:
f.write(line + '\n')
f.close()
def split_data(fname, idxs_first, idxs_second):
data = read_data(fname)
data_first = [data[i] for i in idxs_first]
data_second = [data[i] for i in idxs_second]
new_name_first = fname.replace('dev.', 'dev.set1.')
new_name_second = fname.replace('dev.', 'dev.set2.')
save_data(new_name_first, data_first)
save_data(new_name_second, data_second)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lp-dir', type=str, required=True, default='data/ro-en/')
args = parser.parse_args()
scores = read_sentence_data(os.path.join(args.lp_dir, 'dev.da'))
scores = np.array(scores)
min = np.amin(scores)
max = np.amax(scores)
bins = np.linspace(min, max, 21)
bins = bins[1:] # ignore first bin to create the interval (0, bin[1])
y = np.digitize(scores, bins, right=True)
x = np.arange(len(y))
idxs_first, idxs_second, _, _ = train_test_split(x, y, stratify=y, test_size=0.5)
idxs_first = idxs_first.tolist()
idxs_second = idxs_second.tolist()
print(bins)
print([(y == i).sum() for i in range(20)])
print(len(idxs_first))
print(len(idxs_second))
all_fnames = ['dev.da', 'dev.mt', 'dev.src', 'dev.src-tags', 'dev.hter', 'dev.pe',
'dev.src-mt.alignments', 'dev.tgt-tags']
for fname in all_fnames:
split_data(os.path.join(args.lp_dir, fname), idxs_first, idxs_second)
save_data(os.path.join(args.lp_dir, 'dev.set1.idxs'), list(map(str, idxs_first)))
save_data(os.path.join(args.lp_dir, 'dev.set2.idxs'), list(map(str, idxs_second)))
|
{"hexsha": "9d6793434f7aecd6c6904f1a84a8ed1f979d611b", "size": 2072, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/split_dev_set.py", "max_stars_repo_name": "deep-spin/explainable-qe-shared-task", "max_stars_repo_head_hexsha": "da517a9a76f6dc0c68113e2d6be830f5b57726a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-10-10T17:40:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T08:52:10.000Z", "max_issues_repo_path": "scripts/split_dev_set.py", "max_issues_repo_name": "deep-spin/explainable-qe-shared-task", "max_issues_repo_head_hexsha": "da517a9a76f6dc0c68113e2d6be830f5b57726a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/split_dev_set.py", "max_forks_repo_name": "deep-spin/explainable-qe-shared-task", "max_forks_repo_head_hexsha": "da517a9a76f6dc0c68113e2d6be830f5b57726a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T08:52:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T08:52:12.000Z", "avg_line_length": 31.8769230769, "max_line_length": 86, "alphanum_fraction": 0.6616795367, "include": true, "reason": "import numpy", "num_tokens": 568}
|
import os
import tornado.web
import tornado.ioloop
import codecs
import math
import numpy as np
import os
import sys
import json
import torch
from torch.utils.data import DataLoader
from config import Config
from dataset.classification_dataset import ClassificationDataset
from dataset.collator import ClassificationCollator
from dataset.collator import ClassificationType
from dataset.collator import FastTextCollator
from model.classification.drnn import DRNN
from model.classification.fasttext import FastText
from model.classification.textcnn import TextCNN
from model.classification.textvdcnn import TextVDCNN
from model.classification.textrnn import TextRNN
from model.classification.textrcnn import TextRCNN
from model.classification.transformer import Transformer
from model.classification.dpcnn import DPCNN
from model.classification.attentive_convolution import AttentiveConvNet
from model.classification.region_embedding import RegionEmbedding
from model.model_util import get_optimizer, get_hierar_relations
from predict import Predictor
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello , I love this world~\n")
def post(self):
sentence = self.get_argument('sentence')
#lan=0 English lan=1 Chinese
lan = self.get_argument('language')
print('sentence passed is %s' %sentence)
#sentence = '{"doc_label": ["Computer--MachineLearning--DeepLearning", "Neuro--ComputationalNeuro"],"doc_token": ["I", "love", "deep", "learning"],"doc_keyword": ["deep learning"],"doc_topic": ["AI", "Machine learning"]}'
config = Config(config_file='conf/train.json')
if lan == '0':
config = Config(config_file='conf/train.json')
if lan == '1':
print('trains.json used')
config = Config(config_file='conf/train2.json')
predictor = Predictor(config)
batch_size = config.eval.batch_size
input_texts = []
predict_probs = []
is_multi = config.task_info.label_type == ClassificationType.MULTI_LABEL
#TODO pass sentence as input_texts
#for line in codecs.open(sys.argv[2], "r", predictor.dataset.CHARSET):
# input_texts.append(line.strip("\n"))
# epoches = math.ceil(len(input_texts)/batch_size)
# for line in iter(sentence, "\n"):
# print('current line is %s' %line)
# input_texts.append(line.strip("\n"))
# epoches = math.ceil(len(input_texts)/batch_size)
input_texts.append(sentence.strip("\n"))
epoches = math.ceil(len(input_texts)/batch_size)
print('input_texts needed to be predicted is %s' %input_texts)
for i in range(epoches):
batch_texts = input_texts[i*batch_size:(i+1)*batch_size]
predict_prob = predictor.predict(batch_texts)
for j in predict_prob:
predict_probs.append(j)
for predict_prob in predict_probs:
if not is_multi:
predict_label_ids = [predict_prob.argmax()]
else:
predict_label_ids = []
predict_label_idx = np.argsort(-predict_prob)
for j in range(0, config.eval.top_k):
if predict_prob[predict_label_idx[j]] > config.eval.threshold:
predict_label_ids.append(predict_label_idx[j])
predict_label_name = [predictor.dataset.id_to_label_map[predict_label_id] \
for predict_label_id in predict_label_ids]
self.write(";".join(predict_label_name) + "\n")
def put(self):
sentence = self.get_argument('sentence')
self.write("hello , UPDATE\n " + sentence)
def delete(self):
self.write("hello , DELETE\n")
if __name__ == "__main__":
settings = {
'debug' : True,
'static_path' : os.path.join(os.path.dirname(__file__) , "static") ,
'template_path' : os.path.join(os.path.dirname(__file__) , "template") ,
}
application = tornado.web.Application([
(r"/" , MainHandler),
] , **settings)
application.listen(8080)
tornado.ioloop.IOLoop.instance().start()
|
{"hexsha": "8716884d212ffb9de36800f52f3795d49682deb1", "size": 4177, "ext": "py", "lang": "Python", "max_stars_repo_path": "api_tornado.py", "max_stars_repo_name": "TechnologyInstitute/NeuralNLP-NeuralClassifier", "max_stars_repo_head_hexsha": "cd2c46c8d99dd8fa537206b5f4444a777c7b5d89", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "api_tornado.py", "max_issues_repo_name": "TechnologyInstitute/NeuralNLP-NeuralClassifier", "max_issues_repo_head_hexsha": "cd2c46c8d99dd8fa537206b5f4444a777c7b5d89", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "api_tornado.py", "max_forks_repo_name": "TechnologyInstitute/NeuralNLP-NeuralClassifier", "max_forks_repo_head_hexsha": "cd2c46c8d99dd8fa537206b5f4444a777c7b5d89", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-03T02:09:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-03T02:09:49.000Z", "avg_line_length": 38.6759259259, "max_line_length": 229, "alphanum_fraction": 0.672252813, "include": true, "reason": "import numpy", "num_tokens": 913}
|
[STATEMENT]
lemma Spy_see_shrK_D [dest!]:
"\<lbrakk>Key (shrK A) \<in> parts (knows Spy evs); evs \<in> otway\<rbrakk> \<Longrightarrow> A \<in> bad"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Key (shrK A) \<in> parts (knows Spy evs); evs \<in> otway\<rbrakk> \<Longrightarrow> A \<in> bad
[PROOF STEP]
by (blast dest: Spy_see_shrK)
|
{"llama_tokens": 159, "file": null, "length": 1}
|
#!/usr/bin/env python
import argparse
import os
import os.path
import shutil
import cv2
import duckietown_utils
import numpy as np
import numpy.random
import rosbag
# Example usage: ./sample_images.py path_to_bag /scbb/camera_rectifier/image/compressed 500
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Sample images from ROS Bags.')
parser.add_argument('bag_path', help='path to the ROS Bag')
parser.add_argument('topic', help='the topic for image sampling')
parser.add_argument(
'n_imgs', help='the approximate number of images to extract')
parser.add_argument('--output_dir', default='./sampled_images',
help='output directory for the sampled images')
args = parser.parse_args()
if os.path.exists(args.output_dir):
shutil.rmtree(args.output_dir, ignore_errors=True)
os.makedirs(args.output_dir)
rb = rosbag.Bag(args.bag_path)
n_imgs = float(args.n_imgs)
p = n_imgs / rb.get_message_count(args.topic)
img_no = 0
for msg in rb.read_messages(args.topic):
if numpy.random.rand() < p:
cv2.imwrite(os.path.join(args.output_dir, 'sample_{{:0{}d}}.png'.format(int(np.floor(
np.log10(n_imgs))) + 1).format(img_no)), duckietown_utils.rgb_from_ros(msg.message)[..., ::-1])
img_no += 1
|
{"hexsha": "d2d3f3b53ab6cf5e9146b5de3f99880774534f5e", "size": 1357, "ext": "py", "lang": "Python", "max_stars_repo_path": "catkin_ws/src/10-lane-control/anti_instagram/sandbox/annotation/sample_images.py", "max_stars_repo_name": "johnson880319/Software", "max_stars_repo_head_hexsha": "045894227f359e0a3a3ec5b7a53f8d1ebc06acdd", "max_stars_repo_licenses": ["CC-BY-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "catkin_ws/src/10-lane-control/anti_instagram/sandbox/annotation/sample_images.py", "max_issues_repo_name": "johnson880319/Software", "max_issues_repo_head_hexsha": "045894227f359e0a3a3ec5b7a53f8d1ebc06acdd", "max_issues_repo_licenses": ["CC-BY-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "catkin_ws/src/10-lane-control/anti_instagram/sandbox/annotation/sample_images.py", "max_forks_repo_name": "johnson880319/Software", "max_forks_repo_head_hexsha": "045894227f359e0a3a3ec5b7a53f8d1ebc06acdd", "max_forks_repo_licenses": ["CC-BY-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7105263158, "max_line_length": 111, "alphanum_fraction": 0.6816507001, "include": true, "reason": "import numpy", "num_tokens": 325}
|
import unittest
import numpy as np
import pandas as pd
import numpy.testing as np_testing
import pandas.testing as pd_testing
import os
import import_ipynb
from tensorflow import random
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
from keras.preprocessing import image
class Test(unittest.TestCase):
def setUp(self):
import Activity8_01
self.activity = Activity8_01
self.classifier=VGG16()
self.new_image = image.load_img('../Data/Prediction/test_image_1.jpg', target_size=(224, 224))
self.transformed_image = image.img_to_array(self.new_image)
self.transformed_image = np.expand_dims(self.transformed_image, axis=0)
self.transformed_image = preprocess_input(self.transformed_image)
def test_image_size(self):
np_testing.assert_array_equal(self.transformed_image, self.activity.transformed_image)
def test_prediction(self):
y_pred = self.classifier.predict(self.transformed_image)
np_testing.assert_array_equal(y_pred, self.activity.y_pred)
def test_decode_prediction(self):
y_pred = self.classifier.predict(self.transformed_image)
predictions = decode_predictions(y_pred, top=5)
np_testing.assert_array_equal(predictions, decode_predictions(self.activity.y_pred, top=5))
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "679b3440d01906f8cfa6bb4714b2a981aa8293fb", "size": 1411, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter08/Activity8.01/Activity8.01_Unit_test.py", "max_stars_repo_name": "PacktWorkshops/Applied-Deep-Learning-with-Keras", "max_stars_repo_head_hexsha": "d1372a6109e2ee9434ae47df59440577566badaa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-16T13:28:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-16T13:28:41.000Z", "max_issues_repo_path": "Chapter08/Activity8.01/Activity8.01_Unit_test.py", "max_issues_repo_name": "olavomendes/The-Deep-Learning-with-Keras-Workshop", "max_issues_repo_head_hexsha": "d1372a6109e2ee9434ae47df59440577566badaa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-04-30T21:35:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:55:31.000Z", "max_forks_repo_path": "Chapter08/Activity8.01/Activity8.01_Unit_test.py", "max_forks_repo_name": "olavomendes/The-Deep-Learning-with-Keras-Workshop", "max_forks_repo_head_hexsha": "d1372a6109e2ee9434ae47df59440577566badaa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-01-03T10:22:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-13T06:30:33.000Z", "avg_line_length": 36.1794871795, "max_line_length": 102, "alphanum_fraction": 0.7427356485, "include": true, "reason": "import numpy", "num_tokens": 295}
|
[STATEMENT]
lemma strong_supplementation: "\<not> P x y \<Longrightarrow> (\<exists> z. P z x \<and> \<not> O z y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> P x y \<Longrightarrow> \<exists>z. P z x \<and> \<not> O z y
[PROOF STEP]
nitpick [expect = genuine]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> P x y \<Longrightarrow> \<exists>z. P z x \<and> \<not> O z y
[PROOF STEP]
oops
|
{"llama_tokens": 167, "file": "Mereology_GMM", "length": 2}
|
\documentclass[letterpaper,10pt]{article}
\usepackage[margin=2cm]{geometry}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage[colorlinks]{hyperref}
\newcommand{\panhline}{\begin{center}\rule{\textwidth}{1pt}\end{center}}
\title{\textbf{LectureTitle}}
\author{Authors}
\begin{document}
\maketitle
\panhline
\href{../index.html}{Back to Index}
\panhline
\tableofcontents
\section*{Resources}
\begin{itemize}
\item \href{../../Lectures/LectureFile.pdf}{Lecture}
\end{itemize}
\panhline
\end{document}
|
{"hexsha": "c25cec0ac92a09f03bcc3ae0d16df9704900e3be", "size": 565, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Notes/R3_MleMapVectorMatrixDifferentiation/document.tex", "max_stars_repo_name": "MengwenHe-CMU/17S_10701_MachineLearning", "max_stars_repo_head_hexsha": "613a3087a57a206b83d79855cec359e04cb440f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-03-04T01:53:13.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-14T09:17:05.000Z", "max_issues_repo_path": "Notes/R3_MleMapVectorMatrixDifferentiation/document.tex", "max_issues_repo_name": "MengwenHe-CMU/17S_10701_MachineLearning", "max_issues_repo_head_hexsha": "613a3087a57a206b83d79855cec359e04cb440f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Notes/R3_MleMapVectorMatrixDifferentiation/document.tex", "max_forks_repo_name": "MengwenHe-CMU/17S_10701_MachineLearning", "max_forks_repo_head_hexsha": "613a3087a57a206b83d79855cec359e04cb440f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-20T15:07:29.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-20T15:07:29.000Z", "avg_line_length": 14.125, "max_line_length": 72, "alphanum_fraction": 0.7451327434, "num_tokens": 185}
|
import sys
import numpy as np
print("Make space between values.")
n = map(float, input("Reflactive Index>> ").split())
r = map(float, input("Curvature>> ").split())
d = map(float, input("Thickness>> ").split())
n = list(n)
n.insert(0, 1.0)
r = list(r)
d = list(d)
print(n)
all_matrix = []
def calculate_matrix():
i = len(r)
matrix_list = []
l_n = n[i]
l_n_1 = n[i-1]
l_r = r[i-1]
l_d = d[i-1]
last_matrix = np.array([[1,0],[(-(l_n-l_n_1)/(l_r*l_n)),(l_n_1/l_n)]])
matrix_list.append(last_matrix)
i -= 1
while i >= 1:
i -= 1
X = i
n_n = n[X+1]
n_n_1 = n[X]
n_r = r[X]
n_d = d[X]
id_matrix = np.array([[1,n_d],[0,1]])
matrix_list.append(id_matrix)
X_matrix = np.array([[1,0],[(-(n_n-n_n_1)/(n_r*n_n)),(n_n_1/n_n)]])
matrix_list.append(X_matrix)
all_matrix = matrix_list
return matrix_list
if len(n)==(len(r)+1):
if len(n)==(len(d)+1):
if len(n)>=3:
All = calculate_matrix()
N = 2
seki = All[0] @ All[1]
while N <= (len(All) - 1):
seki = seki @ All[N]
N += 1
f = -1/seki[1,0]
print(f)
|
{"hexsha": "d5197d496b4ead9c85488cfee5b89548fc1d4274", "size": 1454, "ext": "py", "lang": "Python", "max_stars_repo_path": "matrix.py", "max_stars_repo_name": "ilikemap2/RayMatrix", "max_stars_repo_head_hexsha": "d894ed1b0bda85aad2c3b155cbcf2a277c30e8be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "matrix.py", "max_issues_repo_name": "ilikemap2/RayMatrix", "max_issues_repo_head_hexsha": "d894ed1b0bda85aad2c3b155cbcf2a277c30e8be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matrix.py", "max_forks_repo_name": "ilikemap2/RayMatrix", "max_forks_repo_head_hexsha": "d894ed1b0bda85aad2c3b155cbcf2a277c30e8be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.3866666667, "max_line_length": 75, "alphanum_fraction": 0.4154057772, "include": true, "reason": "import numpy", "num_tokens": 424}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.