content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# ***
# Functions to extract variance components from supported classes
# of statistical models
#
# *** lmer method
.getVC.lmer <- function(model){
if(!requireNamespace("lme4", quietly=TRUE)) stop("The 'lme4' package must be installed in order to use this function.")
m <- length(model)
vlist <- addp <- NULL
# variance components
vlist <- list()
vc <- lapply(model,lme4::VarCorr)
clus <- names(vc[[1]])
for(vv in clus){
q <- dim(vc[[1]][[vv]])[1]
v.cl <- vapply(vc, function(z) z[[vv]], FUN.VALUE=matrix(0,q,q))
if(is.null(dim(v.cl))) dim(v.cl) <- c(1,1,m)
dimnames(v.cl)[1:2] <- lapply(dimnames(vc[[1]][[vv]]), function(z) sub("^[(]Intercept[)]$","Intercept",z))
vlist[[paste("|",vv,sep="")]] <- v.cl
}
# residual variance (if model uses scale)
usesc <- attr(vc[[1]], "useSc")
if(usesc){
rv <- sapply(vc, function(z) attr(z,"sc")^2)
dim(rv) <- c(1,1,m)
dimnames(rv) <- list("Residual","Residual",NULL)
vlist <- c(vlist, list(rv))
}
# additional parameters
# 1. ICC (only single clustering)
if(usesc & length(clus)==1){
if("(Intercept)"%in%colnames(vc[[1]][[clus]])){
iv <- sapply(vc, function(z) z[[clus]]["(Intercept)","(Intercept)"])
icc <- iv / (iv + rv[1,1,])
addp <- c(addp, mean(icc))
names(addp) <- paste("ICC|",clus,sep="")
}
}
list(vlist=vlist,addp=addp)
}
# *** nlme method
.getVC.nlme <- function(model){
if(!requireNamespace("nlme", quietly=TRUE)) stop("The 'nlme' package must be installed in order to use this function.")
m <- length(model)
vlist <- addp <- NULL
# variance components (single clustering, limited for multiple clustering)
cls <- class(model[[1]])[1]
clus <- names(model[[1]]$coefficients$random)
vlist <- list()
# single cluster variable
if(cls=="lme" & length(clus)==1){
vc <- lapply(model,nlme::getVarCov)
clus <- attr(vc[[1]],"group.levels")
q <- dim(vc[[1]])[1]
v.cl <- vapply(vc, identity, FUN.VALUE=matrix(0,q,q))
if(is.null(dim(v.cl))) dim(v.cl) <- c(1,1,m)
dimnames(v.cl)[1:2] <- lapply(dimnames(vc[[1]]), function(z) sub("^[(]Intercept[)]$","Intercept",z))
vlist[[paste("|",clus,sep="")]] <- v.cl
}else{
vc <- lapply(model,nlme::VarCorr)
# by variable
for(vv in clus){
q <- dim(model[[1]]$coefficients$random[[vv]])[2]
if(length(clus)==1){
rind <- 1:q
}else{
rind <- grep( paste0("^",vv," =$"), rownames(vc[[1]]))
rind <- (rind+1):(rind+q)
}
# ... by term
for(qq in rind){
v.cl <- sapply(vc, function(x) as.numeric(x[qq,1]))
if(is.null(dim(v.cl))) dim(v.cl) <- c(1,1,m)
dimnames(v.cl)[1:2] <- list(sub("^[(]Intercept[)]$", "Intercept", rownames(vc[[1]])[qq]))
vlist[[paste("|",vv,sep="")]] <- v.cl
}
}
}
# residual variance (if estimated)
fixsigma <- attr(model[[1]]$modelStruct,"fixedSigma")
if(!fixsigma){
rv <- sapply(model, function(z) z$sigma^2)
dim(rv) <- c(1,1,m)
dimnames(rv) <- list("Residual","Residual",NULL)
vlist <- c(vlist, list(rv))
}
# additional parameters
# 1. ICC (only lme, single clustering)
if(!fixsigma & cls=="lme" & length(clus)==1){
if("(Intercept)"%in%colnames(vc[[1]])){
iv <- sapply(vc, function(z) z["(Intercept)","(Intercept)"])
icc <- iv / (iv + rv[1,1,])
addp <- c(addp, mean(icc))
names(addp) <- paste("ICC|",clus,sep="")
}
}
list(vlist=vlist,addp=addp)
}
# *** geeglm method
.getVC.geeglm <- function(model){
if(!requireNamespace("geepack", quietly=TRUE)) stop("The 'geepack' package must be installed in order to use this function.")
m <- length(model)
vlist <- addp <- NULL
# variance components (currently not used)
# vlist <- list()
# additional parameters
# 1. scale parameter (gamma)
isfix <- model[[1]]$geese$model$scale.fix
if(!isfix){
gamma <- sapply(model, function(x) x$geese$gamma)
if(is.null(dim(gamma))){
dim(gamma) <- c(1,m)
rownames(gamma) <- names(model[[1]]$geese$gamma)
}
addp <- c(addp,rowMeans(gamma))
nms <- gsub("^[(]Intercept[)]$", "Intercept", names(addp))
names(addp) <- paste0("Scale:",nms)
}
# 2. correlation parameters (alpha)
corstr <- model[[1]]$geese$model$corstr
isfix <- corstr%in%c("fixed","userdefined")
if(!isfix){
alpha <- sapply(model, function(x) x$geese$alpha)
if(is.null(dim(alpha))){
dim(alpha) <- c(1,m)
rownames(alpha) <- names(model[[1]]$geese$alpha)
}
rownames(alpha) <- paste0("Correlation:",rownames(alpha))
addp <- c(addp,rowMeans(alpha))
}
list(vlist=vlist,addp=addp)
}
# *** lm method
.getVC.lm <- function(model,ML=FALSE){
m <- length(model)
vlist <- addp <- NULL
if(ML){ # SiG 16-04-2016
rv <- sapply(model, function(z) sum(resid(z)^2)/length(resid(z)) )
}else{
rv <- sapply(model, function(z) sum(resid(z)^2)/df.residual(z) )
}
dim(rv) <- c(1,1,m)
dimnames(rv) <- list("Residual","Residual",NULL)
vlist <- c(vlist, list(rv))
list(vlist=vlist,addp=addp)
}
|
/R/internal-getVC.R
|
no_license
|
stefvanbuuren/mitml
|
R
| false
| false
| 5,067
|
r
|
# ***
# Functions to extract variance components from supported classes
# of statistical models
#
# *** lmer method
.getVC.lmer <- function(model){
if(!requireNamespace("lme4", quietly=TRUE)) stop("The 'lme4' package must be installed in order to use this function.")
m <- length(model)
vlist <- addp <- NULL
# variance components
vlist <- list()
vc <- lapply(model,lme4::VarCorr)
clus <- names(vc[[1]])
for(vv in clus){
q <- dim(vc[[1]][[vv]])[1]
v.cl <- vapply(vc, function(z) z[[vv]], FUN.VALUE=matrix(0,q,q))
if(is.null(dim(v.cl))) dim(v.cl) <- c(1,1,m)
dimnames(v.cl)[1:2] <- lapply(dimnames(vc[[1]][[vv]]), function(z) sub("^[(]Intercept[)]$","Intercept",z))
vlist[[paste("|",vv,sep="")]] <- v.cl
}
# residual variance (if model uses scale)
usesc <- attr(vc[[1]], "useSc")
if(usesc){
rv <- sapply(vc, function(z) attr(z,"sc")^2)
dim(rv) <- c(1,1,m)
dimnames(rv) <- list("Residual","Residual",NULL)
vlist <- c(vlist, list(rv))
}
# additional parameters
# 1. ICC (only single clustering)
if(usesc & length(clus)==1){
if("(Intercept)"%in%colnames(vc[[1]][[clus]])){
iv <- sapply(vc, function(z) z[[clus]]["(Intercept)","(Intercept)"])
icc <- iv / (iv + rv[1,1,])
addp <- c(addp, mean(icc))
names(addp) <- paste("ICC|",clus,sep="")
}
}
list(vlist=vlist,addp=addp)
}
# *** nlme method
.getVC.nlme <- function(model){
if(!requireNamespace("nlme", quietly=TRUE)) stop("The 'nlme' package must be installed in order to use this function.")
m <- length(model)
vlist <- addp <- NULL
# variance components (single clustering, limited for multiple clustering)
cls <- class(model[[1]])[1]
clus <- names(model[[1]]$coefficients$random)
vlist <- list()
# single cluster variable
if(cls=="lme" & length(clus)==1){
vc <- lapply(model,nlme::getVarCov)
clus <- attr(vc[[1]],"group.levels")
q <- dim(vc[[1]])[1]
v.cl <- vapply(vc, identity, FUN.VALUE=matrix(0,q,q))
if(is.null(dim(v.cl))) dim(v.cl) <- c(1,1,m)
dimnames(v.cl)[1:2] <- lapply(dimnames(vc[[1]]), function(z) sub("^[(]Intercept[)]$","Intercept",z))
vlist[[paste("|",clus,sep="")]] <- v.cl
}else{
vc <- lapply(model,nlme::VarCorr)
# by variable
for(vv in clus){
q <- dim(model[[1]]$coefficients$random[[vv]])[2]
if(length(clus)==1){
rind <- 1:q
}else{
rind <- grep( paste0("^",vv," =$"), rownames(vc[[1]]))
rind <- (rind+1):(rind+q)
}
# ... by term
for(qq in rind){
v.cl <- sapply(vc, function(x) as.numeric(x[qq,1]))
if(is.null(dim(v.cl))) dim(v.cl) <- c(1,1,m)
dimnames(v.cl)[1:2] <- list(sub("^[(]Intercept[)]$", "Intercept", rownames(vc[[1]])[qq]))
vlist[[paste("|",vv,sep="")]] <- v.cl
}
}
}
# residual variance (if estimated)
fixsigma <- attr(model[[1]]$modelStruct,"fixedSigma")
if(!fixsigma){
rv <- sapply(model, function(z) z$sigma^2)
dim(rv) <- c(1,1,m)
dimnames(rv) <- list("Residual","Residual",NULL)
vlist <- c(vlist, list(rv))
}
# additional parameters
# 1. ICC (only lme, single clustering)
if(!fixsigma & cls=="lme" & length(clus)==1){
if("(Intercept)"%in%colnames(vc[[1]])){
iv <- sapply(vc, function(z) z["(Intercept)","(Intercept)"])
icc <- iv / (iv + rv[1,1,])
addp <- c(addp, mean(icc))
names(addp) <- paste("ICC|",clus,sep="")
}
}
list(vlist=vlist,addp=addp)
}
# *** geeglm method
.getVC.geeglm <- function(model){
if(!requireNamespace("geepack", quietly=TRUE)) stop("The 'geepack' package must be installed in order to use this function.")
m <- length(model)
vlist <- addp <- NULL
# variance components (currently not used)
# vlist <- list()
# additional parameters
# 1. scale parameter (gamma)
isfix <- model[[1]]$geese$model$scale.fix
if(!isfix){
gamma <- sapply(model, function(x) x$geese$gamma)
if(is.null(dim(gamma))){
dim(gamma) <- c(1,m)
rownames(gamma) <- names(model[[1]]$geese$gamma)
}
addp <- c(addp,rowMeans(gamma))
nms <- gsub("^[(]Intercept[)]$", "Intercept", names(addp))
names(addp) <- paste0("Scale:",nms)
}
# 2. correlation parameters (alpha)
corstr <- model[[1]]$geese$model$corstr
isfix <- corstr%in%c("fixed","userdefined")
if(!isfix){
alpha <- sapply(model, function(x) x$geese$alpha)
if(is.null(dim(alpha))){
dim(alpha) <- c(1,m)
rownames(alpha) <- names(model[[1]]$geese$alpha)
}
rownames(alpha) <- paste0("Correlation:",rownames(alpha))
addp <- c(addp,rowMeans(alpha))
}
list(vlist=vlist,addp=addp)
}
# *** lm method
.getVC.lm <- function(model,ML=FALSE){
m <- length(model)
vlist <- addp <- NULL
if(ML){ # SiG 16-04-2016
rv <- sapply(model, function(z) sum(resid(z)^2)/length(resid(z)) )
}else{
rv <- sapply(model, function(z) sum(resid(z)^2)/df.residual(z) )
}
dim(rv) <- c(1,1,m)
dimnames(rv) <- list("Residual","Residual",NULL)
vlist <- c(vlist, list(rv))
list(vlist=vlist,addp=addp)
}
|
library(sspse)
### Name: posteriorsize
### Title: Estimating hidden population size using RDS data
### Aliases: posteriorsize
### Keywords: models
### ** Examples
N0 <- 200
n <- 100
K <- 10
# Create probabilities for a Waring distribution
# with scaling parameter 3 and mean 5, but truncated at K=10.
probs <- c(0.33333333,0.19047619,0.11904762,0.07936508,0.05555556,
0.04040404,0.03030303,0.02331002,0.01831502,0.01465201)
probs <- probs / sum(probs)
#
# Create a sample
#
set.seed(1)
pop<-sample(1:K, size=N0, replace = TRUE, prob = probs)
s<-sample(pop, size=n, replace = FALSE, prob = pop)
# Here interval=1 so that it will run faster. It should be higher in a
# real application.
out <- posteriorsize(s=s,interval=1)
plot(out, HPD.level=0.9,data=pop[s])
summary(out, HPD.level=0.9)
# Let's look at some MCMC diagnostics
plot(out, HPD.level=0.9,mcmc=TRUE)
|
/data/genthat_extracted_code/sspse/examples/posteriorsize.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 885
|
r
|
library(sspse)
### Name: posteriorsize
### Title: Estimating hidden population size using RDS data
### Aliases: posteriorsize
### Keywords: models
### ** Examples
N0 <- 200
n <- 100
K <- 10
# Create probabilities for a Waring distribution
# with scaling parameter 3 and mean 5, but truncated at K=10.
probs <- c(0.33333333,0.19047619,0.11904762,0.07936508,0.05555556,
0.04040404,0.03030303,0.02331002,0.01831502,0.01465201)
probs <- probs / sum(probs)
#
# Create a sample
#
set.seed(1)
pop<-sample(1:K, size=N0, replace = TRUE, prob = probs)
s<-sample(pop, size=n, replace = FALSE, prob = pop)
# Here interval=1 so that it will run faster. It should be higher in a
# real application.
out <- posteriorsize(s=s,interval=1)
plot(out, HPD.level=0.9,data=pop[s])
summary(out, HPD.level=0.9)
# Let's look at some MCMC diagnostics
plot(out, HPD.level=0.9,mcmc=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_prism.R
\name{add_prism}
\alias{add_prism}
\title{Add a prism of nodes to the graph}
\usage{
add_prism(graph, n, type = NULL, label = TRUE, rel = NULL, nodes = NULL)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph} that is created using
\code{create_graph}.}
\item{n}{the number of nodes describing the shape
of the prism. For example, the triangonal prism has
\code{n} equal to 3 and it is composed of 6 nodes
and 9 edges. For any n-gonal prism, the graph will
be generated with 2\code{n} nodes and 3\code{n}
edges.}
\item{type}{an optional string that describes the
entity type for the nodes to be added.}
\item{label}{either a vector object of length
\code{n} that provides optional labels for the new
nodes, or, a boolean value where setting to
\code{TRUE} ascribes node IDs to the label and
\code{FALSE} yields a blank label.}
\item{rel}{an optional string for providing a
relationship label to all new edges created in the
node prism.}
\item{nodes}{an optional vector of node IDs of
length \code{n} for the newly created nodes. If
nothing is provided, node IDs will assigned as
monotonically increasing integers.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
With a graph object of class
\code{dgr_graph}, add a node prism to the graph.
}
\examples{
# Create a new graph and add a prism
graph <-
create_graph() \%>\%
add_prism(3, "prism")
# Get node information from this graph
node_info(graph)
#> node label type deg indeg outdeg loops
#> 1 1 1 prism 3 1 2 0
#> 2 2 2 prism 3 1 2 0
#> 3 3 3 prism 3 1 2 0
#> 4 4 4 prism 3 2 1 0
#> 5 5 5 prism 3 2 1 0
#> 6 6 6 prism 3 2 1 0
}
|
/man/add_prism.Rd
|
no_license
|
timelyportfolio/DiagrammeR
|
R
| false
| true
| 1,853
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_prism.R
\name{add_prism}
\alias{add_prism}
\title{Add a prism of nodes to the graph}
\usage{
add_prism(graph, n, type = NULL, label = TRUE, rel = NULL, nodes = NULL)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph} that is created using
\code{create_graph}.}
\item{n}{the number of nodes describing the shape
of the prism. For example, the triangonal prism has
\code{n} equal to 3 and it is composed of 6 nodes
and 9 edges. For any n-gonal prism, the graph will
be generated with 2\code{n} nodes and 3\code{n}
edges.}
\item{type}{an optional string that describes the
entity type for the nodes to be added.}
\item{label}{either a vector object of length
\code{n} that provides optional labels for the new
nodes, or, a boolean value where setting to
\code{TRUE} ascribes node IDs to the label and
\code{FALSE} yields a blank label.}
\item{rel}{an optional string for providing a
relationship label to all new edges created in the
node prism.}
\item{nodes}{an optional vector of node IDs of
length \code{n} for the newly created nodes. If
nothing is provided, node IDs will assigned as
monotonically increasing integers.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
With a graph object of class
\code{dgr_graph}, add a node prism to the graph.
}
\examples{
# Create a new graph and add a prism
graph <-
create_graph() \%>\%
add_prism(3, "prism")
# Get node information from this graph
node_info(graph)
#> node label type deg indeg outdeg loops
#> 1 1 1 prism 3 1 2 0
#> 2 2 2 prism 3 1 2 0
#> 3 3 3 prism 3 1 2 0
#> 4 4 4 prism 3 2 1 0
#> 5 5 5 prism 3 2 1 0
#> 6 6 6 prism 3 2 1 0
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iam_operations.R
\name{list_user_tags}
\alias{list_user_tags}
\title{Lists the tags that are attached to the specified user}
\usage{
list_user_tags(UserName, Marker = NULL, MaxItems = NULL)
}
\arguments{
\item{UserName}{[required] The name of the IAM user whose tags you want to see.
This parameter accepts (through its \href{http://wikipedia.org/wiki/regex}{regex pattern}) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-}
\item{Marker}{Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the \code{Marker} element in the response to indicate where the next call should start.}
\item{MaxItems}{(Optional) Use this only when paginating results to indicate the maximum number of items that you want in the response. If additional items exist beyond the maximum that you specify, the \code{IsTruncated} response element is \code{true}.
If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when more results are available. In that case, the \code{IsTruncated} response element returns \code{true}, and \code{Marker} contains a value to include in the subsequent call that tells the service where to continue from.}
}
\description{
Lists the tags that are attached to the specified user. The returned list of tags is sorted by tag key. For more information about tagging, see \href{http://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html}{Tagging IAM Identities} in the \emph{IAM User Guide}.
}
\section{Accepted Parameters}{
\preformatted{list_user_tags(
UserName = "string",
Marker = "string",
MaxItems = 123
)
}
}
\examples{
# The following example shows how to list the tags attached to a user.
\donttest{list_user_tags(
UserName = "anika"
)}
}
|
/service/paws.iam/man/list_user_tags.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 2,010
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iam_operations.R
\name{list_user_tags}
\alias{list_user_tags}
\title{Lists the tags that are attached to the specified user}
\usage{
list_user_tags(UserName, Marker = NULL, MaxItems = NULL)
}
\arguments{
\item{UserName}{[required] The name of the IAM user whose tags you want to see.
This parameter accepts (through its \href{http://wikipedia.org/wiki/regex}{regex pattern}) a string of characters that consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: =,.@-}
\item{Marker}{Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the \code{Marker} element in the response to indicate where the next call should start.}
\item{MaxItems}{(Optional) Use this only when paginating results to indicate the maximum number of items that you want in the response. If additional items exist beyond the maximum that you specify, the \code{IsTruncated} response element is \code{true}.
If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when more results are available. In that case, the \code{IsTruncated} response element returns \code{true}, and \code{Marker} contains a value to include in the subsequent call that tells the service where to continue from.}
}
\description{
Lists the tags that are attached to the specified user. The returned list of tags is sorted by tag key. For more information about tagging, see \href{http://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html}{Tagging IAM Identities} in the \emph{IAM User Guide}.
}
\section{Accepted Parameters}{
\preformatted{list_user_tags(
UserName = "string",
Marker = "string",
MaxItems = 123
)
}
}
\examples{
# The following example shows how to list the tags attached to a user.
\donttest{list_user_tags(
UserName = "anika"
)}
}
|
# Loading the required libraries
library(dplyr)
# Reading the data after setting the directory to working directory
x <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# The data required for analysis was selected.
data <- x[x$Date %in% "2/2/2007" | x$Date %in% "1/2/2007",]
# The data was of factor class, it was converted to numeric class in order to perform the analysis.
data[,3:9] <- sapply(data[,3:9], function(f) as.numeric(as.character(f)))
# A new variable 'datetime' was made which stores the data as well as time as a single value.
data$datetime <- apply(data[,1:2], 1, paste, collapse=" ") %>% strptime(format = "%d/%m/%Y %H:%M:%S")
# The required data was subsetted again after mutating the data frame.
data <- data[,3:10]
# A timeplot of Global_active_power is made to get the required graph.
with(data, plot(datetime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
# The plot was then saved.
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
|
/plot2.R
|
no_license
|
priyank1574q/Exploratory_project_1
|
R
| false
| false
| 1,089
|
r
|
# Loading the required libraries
library(dplyr)
# Reading the data after setting the directory to working directory
x <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# The data required for analysis was selected.
data <- x[x$Date %in% "2/2/2007" | x$Date %in% "1/2/2007",]
# The data was of factor class, it was converted to numeric class in order to perform the analysis.
data[,3:9] <- sapply(data[,3:9], function(f) as.numeric(as.character(f)))
# A new variable 'datetime' was made which stores the data as well as time as a single value.
data$datetime <- apply(data[,1:2], 1, paste, collapse=" ") %>% strptime(format = "%d/%m/%Y %H:%M:%S")
# The required data was subsetted again after mutating the data frame.
data <- data[,3:10]
# A timeplot of Global_active_power is made to get the required graph.
with(data, plot(datetime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
# The plot was then saved.
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
|
## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that
## stores a matrix and caches its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
Testing My Functions
> source("ProgrammingAssignment2/cachematrix.R")
> my_matrix <- makeCacheMatrix(matrix(1:4, 2, 2))
> my_matrix$get()
[,1] [,2]
[1,] 1 3
[2,] 2 4
> my_matrix$getInverse()
NULL
> cacheSolve(my_matrix)
[,1] [,2]
[1,] -2 1.5
[2,] 1 -0.5
> cacheSolve(my_matrix)
getting cached data
[,1] [,2]
[1,] -2 1.5
[2,] 1 -0.5
> my_matrix$getInverse()
[,1] [,2]
[1,] -2 1.5
[2,] 1 -0.5
> my_matrix$set(matrix(c(2, 2, 1, 4), 2, 2))
> my_matrix$get()
[,1] [,2]
[1,] 2 1
[2,] 2 4
> my_matrix$getInverse()
NULL
> cacheSolve(my_matrix)
[,1] [,2]
[1,] 0.6666667 -0.1666667
[2,] -0.3333333 0.3333333
> cacheSolve(my_matrix)
getting cached data
[,1] [,2]
[1,] 0.6666667 -0.1666667
[2,] -0.3333333 0.3333333
> my_matrix$getInverse()
[,1] [,2]
[1,] 0.6666667 -0.1666667
[2,] -0.3333333 0.3333333
|
/cachematrix.R
|
no_license
|
Ahmad-Noor/ProgrammingAssignment2
|
R
| false
| false
| 2,275
|
r
|
## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that
## stores a matrix and caches its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
Testing My Functions
> source("ProgrammingAssignment2/cachematrix.R")
> my_matrix <- makeCacheMatrix(matrix(1:4, 2, 2))
> my_matrix$get()
[,1] [,2]
[1,] 1 3
[2,] 2 4
> my_matrix$getInverse()
NULL
> cacheSolve(my_matrix)
[,1] [,2]
[1,] -2 1.5
[2,] 1 -0.5
> cacheSolve(my_matrix)
getting cached data
[,1] [,2]
[1,] -2 1.5
[2,] 1 -0.5
> my_matrix$getInverse()
[,1] [,2]
[1,] -2 1.5
[2,] 1 -0.5
> my_matrix$set(matrix(c(2, 2, 1, 4), 2, 2))
> my_matrix$get()
[,1] [,2]
[1,] 2 1
[2,] 2 4
> my_matrix$getInverse()
NULL
> cacheSolve(my_matrix)
[,1] [,2]
[1,] 0.6666667 -0.1666667
[2,] -0.3333333 0.3333333
> cacheSolve(my_matrix)
getting cached data
[,1] [,2]
[1,] 0.6666667 -0.1666667
[2,] -0.3333333 0.3333333
> my_matrix$getInverse()
[,1] [,2]
[1,] 0.6666667 -0.1666667
[2,] -0.3333333 0.3333333
|
wrap360 = function(lon) {
lon360<-ifelse(lon<0,lon+360,lon)
return(lon360)
}
|
/R/wrap360.R
|
no_license
|
raorben/seabird_tracking_atlas
|
R
| false
| false
| 81
|
r
|
wrap360 = function(lon) {
lon360<-ifelse(lon<0,lon+360,lon)
return(lon360)
}
|
#1.
#Answer the below questions:
# a. What are the assumptions of ANOVA, test it out?
#Ans.
1.The categories are independent of each other.
2.The response variable is normally distributed.
3.The variances of the response data are identical.
#b. Why ANOVA test? Is there any other way to answer the above question?
t-test is a special type of ANOVA that can be used when we have only two populations
to compare their means. Although the chances of errors might increase if t-test is used when we have to
compare more than two means of the populations concurrently, that is why ANOVA is used.
|
/Assignment_12.2.R
|
no_license
|
Hemant-424/Data_Analytics_Assignment_11.2
|
R
| false
| false
| 609
|
r
|
#1.
#Answer the below questions:
# a. What are the assumptions of ANOVA, test it out?
#Ans.
1.The categories are independent of each other.
2.The response variable is normally distributed.
3.The variances of the response data are identical.
#b. Why ANOVA test? Is there any other way to answer the above question?
t-test is a special type of ANOVA that can be used when we have only two populations
to compare their means. Although the chances of errors might increase if t-test is used when we have to
compare more than two means of the populations concurrently, that is why ANOVA is used.
|
library(ILS)
### Name: plot.lab.qcdata
### Title: Plot method for 'lab.qcdata' objects
### Aliases: plot.lab.qcdata
### ** Examples
library(ILS)
data(Glucose)
Glucose.qcdata <- lab.qcdata(Glucose)
str(Glucose.qcdata)
plot(Glucose.qcdata)
|
/data/genthat_extracted_code/ILS/examples/plot.lab.qcdata.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 245
|
r
|
library(ILS)
### Name: plot.lab.qcdata
### Title: Plot method for 'lab.qcdata' objects
### Aliases: plot.lab.qcdata
### ** Examples
library(ILS)
data(Glucose)
Glucose.qcdata <- lab.qcdata(Glucose)
str(Glucose.qcdata)
plot(Glucose.qcdata)
|
library(cmsaf)
### Name: read_ncvar
### Title: Read NetCDF variable.
### Aliases: read_ncvar
### ** Examples
## Create an example NetCDF file with a similar structure
## as used by CM SAF. The file is created with the ncdf4 package.
## Alternatively example data can be freely downloaded here:
## <https://wui.cmsaf.eu/>
library(ncdf4)
## create some (non-realistic) example data
lon <- seq(5,15,0.5)
lat <- seq(45,55,0.5)
time <- seq(as.Date('2000-01-01'), as.Date('2010-12-31'), 'month')
origin <- as.Date('1983-01-01 00:00:00')
time <- as.numeric(difftime(time,origin,units='hour'))
data <- array(250:350,dim=c(21,21,132))
## create example NetCDF
x <- ncdim_def(name='lon',units='degrees_east',vals=lon)
y <- ncdim_def(name='lat',units='degrees_north',vals=lat)
t <- ncdim_def(name='time',units='hours since 1983-01-01 00:00:00',
vals=time,unlim=TRUE)
var1 <- ncvar_def('SIS','W m-2',list(x,y,t),-1,prec='short')
vars <- list(var1)
ncnew <- nc_create('CMSAF_example_file.nc',vars)
ncvar_put(ncnew,var1,data)
ncatt_put(ncnew,'lon','standard_name','longitude',prec='text')
ncatt_put(ncnew,'lat','standard_name','latitude',prec='text')
nc_close(ncnew)
## Load the data of variable 'SIS' of the example file into R.
## To access the data use e.g., my.data$SIS
my.data <- read_ncvar('SIS','CMSAF_example_file.nc')
|
/data/genthat_extracted_code/cmsaf/examples/read_ncvar.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,375
|
r
|
library(cmsaf)
### Name: read_ncvar
### Title: Read NetCDF variable.
### Aliases: read_ncvar
### ** Examples
## Create an example NetCDF file with a similar structure
## as used by CM SAF. The file is created with the ncdf4 package.
## Alternatively example data can be freely downloaded here:
## <https://wui.cmsaf.eu/>
library(ncdf4)
## create some (non-realistic) example data
lon <- seq(5,15,0.5)
lat <- seq(45,55,0.5)
time <- seq(as.Date('2000-01-01'), as.Date('2010-12-31'), 'month')
origin <- as.Date('1983-01-01 00:00:00')
time <- as.numeric(difftime(time,origin,units='hour'))
data <- array(250:350,dim=c(21,21,132))
## create example NetCDF
x <- ncdim_def(name='lon',units='degrees_east',vals=lon)
y <- ncdim_def(name='lat',units='degrees_north',vals=lat)
t <- ncdim_def(name='time',units='hours since 1983-01-01 00:00:00',
vals=time,unlim=TRUE)
var1 <- ncvar_def('SIS','W m-2',list(x,y,t),-1,prec='short')
vars <- list(var1)
ncnew <- nc_create('CMSAF_example_file.nc',vars)
ncvar_put(ncnew,var1,data)
ncatt_put(ncnew,'lon','standard_name','longitude',prec='text')
ncatt_put(ncnew,'lat','standard_name','latitude',prec='text')
nc_close(ncnew)
## Load the data of variable 'SIS' of the example file into R.
## To access the data use e.g., my.data$SIS
my.data <- read_ncvar('SIS','CMSAF_example_file.nc')
|
##############################
### Load required packages ###
##############################
rm(list=ls())
options(warn=-1)
installIfAbsentAndLoad <- function(neededVector) {
if(length(neededVector) > 0) {
for(thispackage in neededVector) {
if(! require(thispackage, character.only = T)) {
install.packages(thispackage)}
require(thispackage, character.only = T)
}
}
}
########packages########
needed <- c("rpart", #this is the recursive partitioning package
"rattle", #the fancyRpartPlot and asRules functions at
#the end of this script are in the rattle package
"ISLR","glmnet")
installIfAbsentAndLoad(needed)
Hitters <- na.omit(Hitters)[,-c(14,15,20)]
######################
###partition data into training, validate and test subsets (60/20/20)###
set.seed(527)
n <- nrow(Hitters)
trainrows <- sample(n, 0.6* n)
validaterows <- sample(setdiff(seq_len(n), trainrows), 0.2* n)
testrows <- setdiff(setdiff(seq_len(n), trainrows), validaterows)
train <- Hitters[trainrows,]
validate <- Hitters[validaterows,]
test <- Hitters[testrows,]
##############Generate Tree####
rpart<-rpart(Salary ~ .,data=train, method="anova",
parms=list(split="gini"),
control=rpart.control(usesurrogate=0,
maxsurrogate=0,cp=0, minsplit=2,
minbucket=1))
print(rpart)
printcp(rpart)
summary(rpart)
##############Plots####
plot(rpart)
text(rpart, all=TRUE, use.n=TRUE)
title("Training Set's Regression Tree")
fancyRpartPlot(rpart, main="Fancy Plot")
#rules
asRules(rpart)
##############Pruning####
rpart$cptable
plotcp(rpart)
xerr<-rpart$cptable[,"xerror"]
minxerr<-which(xerr==min(xerr))
mincp<-rpart$cptable[minxerr,"CP"]
rpart.prune<-prune(rpart,cp=mincp)
printcp(rpart.prune)
fancyRpartPlot(rpart.prune, main="Pruned Tree")
##############MSE####
predTest <- predict(rpart, newdata=test, type="vector")
tree.mse <- mean((test$Salary - predTest)^2)
predTest.p <- predict(rpart.prune, newdata=test, type="vector")
tree.prune.mse <- mean((test$Salary - predTest.p)^2)
#compare with lasso
x <- as.matrix(Hitters)[,-17]
cv.out <- cv.glmnet(x[trainrows,],train$Salary,alpha = 1)
bestlam <- cv.out$lambda.min
lasso <- glmnet(x[trainrows,],Hitters$Salary[trainrows],alpha = 1)
pred <- predict(lasso,s=bestlam,newx=x[testrows,])
lasso.mse = mean((pred-Hitters$Salary[testrows])^2)
cbind(lasso.mse,tree.prune.mse,tree.mse)
|
/ML/seminar/mycode/RegressionTree.R
|
no_license
|
myytchYY/TYmyytch.github.io
|
R
| false
| false
| 2,542
|
r
|
##############################
### Load required packages ###
##############################
rm(list=ls())
options(warn=-1)
installIfAbsentAndLoad <- function(neededVector) {
if(length(neededVector) > 0) {
for(thispackage in neededVector) {
if(! require(thispackage, character.only = T)) {
install.packages(thispackage)}
require(thispackage, character.only = T)
}
}
}
########packages########
needed <- c("rpart", #this is the recursive partitioning package
"rattle", #the fancyRpartPlot and asRules functions at
#the end of this script are in the rattle package
"ISLR","glmnet")
installIfAbsentAndLoad(needed)
Hitters <- na.omit(Hitters)[,-c(14,15,20)]
######################
###partition data into training, validate and test subsets (60/20/20)###
set.seed(527)
n <- nrow(Hitters)
trainrows <- sample(n, 0.6* n)
validaterows <- sample(setdiff(seq_len(n), trainrows), 0.2* n)
testrows <- setdiff(setdiff(seq_len(n), trainrows), validaterows)
train <- Hitters[trainrows,]
validate <- Hitters[validaterows,]
test <- Hitters[testrows,]
##############Generate Tree####
rpart<-rpart(Salary ~ .,data=train, method="anova",
parms=list(split="gini"),
control=rpart.control(usesurrogate=0,
maxsurrogate=0,cp=0, minsplit=2,
minbucket=1))
print(rpart)
printcp(rpart)
summary(rpart)
##############Plots####
plot(rpart)
text(rpart, all=TRUE, use.n=TRUE)
title("Training Set's Regression Tree")
fancyRpartPlot(rpart, main="Fancy Plot")
#rules
asRules(rpart)
##############Pruning####
rpart$cptable
plotcp(rpart)
xerr<-rpart$cptable[,"xerror"]
minxerr<-which(xerr==min(xerr))
mincp<-rpart$cptable[minxerr,"CP"]
rpart.prune<-prune(rpart,cp=mincp)
printcp(rpart.prune)
fancyRpartPlot(rpart.prune, main="Pruned Tree")
##############MSE####
predTest <- predict(rpart, newdata=test, type="vector")
tree.mse <- mean((test$Salary - predTest)^2)
predTest.p <- predict(rpart.prune, newdata=test, type="vector")
tree.prune.mse <- mean((test$Salary - predTest.p)^2)
#compare with lasso
x <- as.matrix(Hitters)[,-17]
cv.out <- cv.glmnet(x[trainrows,],train$Salary,alpha = 1)
bestlam <- cv.out$lambda.min
lasso <- glmnet(x[trainrows,],Hitters$Salary[trainrows],alpha = 1)
pred <- predict(lasso,s=bestlam,newx=x[testrows,])
lasso.mse = mean((pred-Hitters$Salary[testrows])^2)
cbind(lasso.mse,tree.prune.mse,tree.mse)
|
fa22d2ea28c349ace929ac0596034c8f biu.mv.xl_ao.bb-b003-p020-IPF01-c05.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-002.qdimacs 2866 3728
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF01-c05.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-002/biu.mv.xl_ao.bb-b003-p020-IPF01-c05.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-002.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 167
|
r
|
fa22d2ea28c349ace929ac0596034c8f biu.mv.xl_ao.bb-b003-p020-IPF01-c05.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-002.qdimacs 2866 3728
|
###### 1. FP & PE script combined for even more speed
FP_PE<- function (clade.matrix,edge.length,tip.label) {
# FP part
cset=rowSums(clade.matrix)*clade.matrix
lambda=edge.length*clade.matrix
tmp=lambda/cset
rm(lambda)
rm(cset)
tmp[is.na(tmp)]=0
FPP=as.matrix(colSums(tmp))
rownames(FPP)=tip.label
#PE part
PE=as.matrix(edge.length[1:dim(clade.matrix)[2]])
rownames(PE)=tip.label
IS=vector("list",2) #store two sets of isolation scores
IS[[1]]=FPP
IS[[2]]=PE
IS
}
####### 2. read CAIC fast scipt - stores a tree in useful format
readCAIC<-function(file) {
tree <- file
tpc <- unlist(strsplit(tree, "[\\(\\),;]"))
tpc=tpc[grep(":",tpc)]
# find the tip and edge labels
tiplabels=tpc[grep(".:",tpc)]
edgelabels=tpc[-grep(".:",tpc)]
edgelabels=c(edgelabels,":0")
# locate the clusters and edges
tree <- unlist(strsplit(tree, NULL))
x=which(tree=="(")
y=which(tree==")")
v=which(tree==":")
#these are the locations of the tips
w=setdiff(v,y+1)
##Pass through string from left to right locating the paired parenthesis, thus
## allowing for easy assigment of edge to subtending tips
#initialize objects (M is the actual clade matrix, while E is vector with associated edge weights)
j=2
k=length(w)+1
M=matrix(0,length(w)*2-1,length(w))
E=as.vector(matrix(0,1,length(w)*2-1))
x=c(x,y[length(y)]+1)
# Main Pass
while (length(x)>1)
{
if (x[j+1]<y[1])
{j=j+1
} else {
M[k,which(x[j]<w & w<y[1])]=1
E[k]=strsplit(edgelabels[k-length(w)],"[:]")[[1]][2]
k=k+1
y=y[-1]
x=x[-j]
j=j-1}
}
# Assign branch lengths and finished tip names to the tips
for (i in 1:length(w))
{M[i,i]=1
tmp=strsplit(tiplabels[i],"[:]")[[1]]
E[i]=tmp[2]
tiplabels[i]=tmp[1]}
M=list(M,as.numeric(E),tiplabels)
}
##############################################################
compute_fp_ep <- function(filepath1, filepath2 = "out_FP.txt", filepath3 = "out_FP.txt", as.phylo = TRUE){
library("caper")
#trees are stored as a .txt set of newick strings, one tree per line, line end with ";"
AllTrees=scan(filepath1, what="",sep="\n",quiet=TRUE,skip=0,comment.char="#")
AllTrees=unlist(strsplit(AllTrees,"[;]"))
##get number of tips
tpc <- unlist(strsplit(AllTrees[1], "[\\(\\),;]"))
tpc=tpc[grep(":",tpc)]
tiplabels=tpc[grep(".:",tpc)]
nn=length(tiplabels)
# pre-allocate score matrices
SM_FP=matrix(0,nn,length(AllTrees))
SM_PE=matrix(0,nn,length(AllTrees))
###loop through all the trees
for (ii in 1:length(AllTrees))
{
p=AllTrees[ii]
B=readCAIC(p)
#Actually compute the scores
IS=FP_PE(B[[1]],B[[2]],B[[3]])
FP=IS[[1]]
PE=IS[[2]]
# sort them properly and fill the two matrices
fpp=as.matrix(FP[order(rownames(FP)),])
SM_FP[,ii]=fpp
pe=as.matrix(PE[order(rownames(PE)),])
SM_PE[,ii]=pe
}
###end of loop
### write the data
names=rownames(fpp)
rownames(SM_FP)=names
write.table(SM_FP,filepath2,col.names=FALSE,row.names=TRUE,quote=FALSE, sep = ",")
names=rownames(pe)
rownames(SM_PE)=names
write.table(SM_PE,filepath3,col.names=FALSE,row.names=TRUE,quote=FALSE, sep = ",")
return(list(Fair_Proportion = SM_FP, Equal_Split = SM_PE))
}
|
/Code/R/ed_arne_functions.R
|
no_license
|
gvdr/2013_Priebe_Strain
|
R
| false
| false
| 3,185
|
r
|
###### 1. FP & PE script combined for even more speed
FP_PE<- function (clade.matrix,edge.length,tip.label) {
# FP part
cset=rowSums(clade.matrix)*clade.matrix
lambda=edge.length*clade.matrix
tmp=lambda/cset
rm(lambda)
rm(cset)
tmp[is.na(tmp)]=0
FPP=as.matrix(colSums(tmp))
rownames(FPP)=tip.label
#PE part
PE=as.matrix(edge.length[1:dim(clade.matrix)[2]])
rownames(PE)=tip.label
IS=vector("list",2) #store two sets of isolation scores
IS[[1]]=FPP
IS[[2]]=PE
IS
}
####### 2. read CAIC fast scipt - stores a tree in useful format
readCAIC<-function(file) {
tree <- file
tpc <- unlist(strsplit(tree, "[\\(\\),;]"))
tpc=tpc[grep(":",tpc)]
# find the tip and edge labels
tiplabels=tpc[grep(".:",tpc)]
edgelabels=tpc[-grep(".:",tpc)]
edgelabels=c(edgelabels,":0")
# locate the clusters and edges
tree <- unlist(strsplit(tree, NULL))
x=which(tree=="(")
y=which(tree==")")
v=which(tree==":")
#these are the locations of the tips
w=setdiff(v,y+1)
##Pass through string from left to right locating the paired parenthesis, thus
## allowing for easy assigment of edge to subtending tips
#initialize objects (M is the actual clade matrix, while E is vector with associated edge weights)
j=2
k=length(w)+1
M=matrix(0,length(w)*2-1,length(w))
E=as.vector(matrix(0,1,length(w)*2-1))
x=c(x,y[length(y)]+1)
# Main Pass
while (length(x)>1)
{
if (x[j+1]<y[1])
{j=j+1
} else {
M[k,which(x[j]<w & w<y[1])]=1
E[k]=strsplit(edgelabels[k-length(w)],"[:]")[[1]][2]
k=k+1
y=y[-1]
x=x[-j]
j=j-1}
}
# Assign branch lengths and finished tip names to the tips
for (i in 1:length(w))
{M[i,i]=1
tmp=strsplit(tiplabels[i],"[:]")[[1]]
E[i]=tmp[2]
tiplabels[i]=tmp[1]}
M=list(M,as.numeric(E),tiplabels)
}
##############################################################
compute_fp_ep <- function(filepath1, filepath2 = "out_FP.txt", filepath3 = "out_FP.txt", as.phylo = TRUE){
library("caper")
#trees are stored as a .txt set of newick strings, one tree per line, line end with ";"
AllTrees=scan(filepath1, what="",sep="\n",quiet=TRUE,skip=0,comment.char="#")
AllTrees=unlist(strsplit(AllTrees,"[;]"))
##get number of tips
tpc <- unlist(strsplit(AllTrees[1], "[\\(\\),;]"))
tpc=tpc[grep(":",tpc)]
tiplabels=tpc[grep(".:",tpc)]
nn=length(tiplabels)
# pre-allocate score matrices
SM_FP=matrix(0,nn,length(AllTrees))
SM_PE=matrix(0,nn,length(AllTrees))
###loop through all the trees
for (ii in 1:length(AllTrees))
{
p=AllTrees[ii]
B=readCAIC(p)
#Actually compute the scores
IS=FP_PE(B[[1]],B[[2]],B[[3]])
FP=IS[[1]]
PE=IS[[2]]
# sort them properly and fill the two matrices
fpp=as.matrix(FP[order(rownames(FP)),])
SM_FP[,ii]=fpp
pe=as.matrix(PE[order(rownames(PE)),])
SM_PE[,ii]=pe
}
###end of loop
### write the data
names=rownames(fpp)
rownames(SM_FP)=names
write.table(SM_FP,filepath2,col.names=FALSE,row.names=TRUE,quote=FALSE, sep = ",")
names=rownames(pe)
rownames(SM_PE)=names
write.table(SM_PE,filepath3,col.names=FALSE,row.names=TRUE,quote=FALSE, sep = ",")
return(list(Fair_Proportion = SM_FP, Equal_Split = SM_PE))
}
|
## module load conda_R/3.6.x # devel
## ----Libraries ------------------
library(tidyverse)
library(ggplot2)
library(Matrix)
library(Rmisc)
library(ggforce)
library(rjson)
library(cowplot)
library(RColorBrewer)
library(grid)
library(readbitmap)
library(Seurat)
library(SummarizedExperiment)
library(rtracklayer)
## Function for plotting
geom_spatial <- function(mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = FALSE,
...) {
GeomCustom <- ggproto(
"GeomCustom",
Geom,
setup_data = function(self, data, params) {
data <- ggproto_parent(Geom, self)$setup_data(data, params)
data
},
draw_group = function(data, panel_scales, coord) {
vp <- grid::viewport(x=data$x, y=data$y)
g <- grid::editGrob(data$grob[[1]], vp=vp)
ggplot2:::ggname("geom_spatial", g)
},
required_aes = c("grob","x","y")
)
layer(
geom = GeomCustom,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
## get sample names
sample_names <- read.delim("lenas.txt", as.is=TRUE, header=FALSE)$V1
sample_names
## 10x output path
path = "/dcs04/lieber/lcolladotor/with10x_LIBD001/HumanPilot/10X/"
## output
image_paths <- paste0(path, sample_names, "/tissue_lowres_image.png")
scalefactor_paths <- paste0(path, sample_names, "/scalefactors_json.json")
tissue_paths <- paste0(path, sample_names, "/tissue_positions_list.txt")
cluster_paths <- paste0(path, sample_names, "/", sample_names, "_analysis__clustering_graphclust_clusters.csv")
matrix_paths <- paste0(path, sample_names, "/", sample_names, "_filtered_feature_bc_matrix.h5")
all(file.exists(c(image_paths, scalefactor_paths, tissue_paths, cluster_paths, matrix_paths)))
# TRUE
## get annotation
map = read.delim("../10X/151675/151675_raw_feature_bc_matrix__features.tsv.gz",
as.is=TRUE, header=FALSE,col.names=c("EnsemblID", "Symbol", "Type"))
## get GTF, this seems like what they used
gtf = import("/dcl01/ajaffe/data/lab/singleCell/refdata-cellranger-GRCh38-3.0.0/genes/genes.gtf")
gtf = gtf[gtf$type == "gene"]
names(gtf) = gtf$gene_id
gtf = gtf[map$EnsemblID]
seqlevels(gtf)[1:25] = paste0("chr", seqlevels(gtf)[1:25])
mcols(gtf) = mcols(gtf)[,c(5:9)]
## ------------------------------------------------------------------------
images_cl <- lapply(image_paths, read.bitmap)
dims = t(sapply(images_cl, dim))
colnames(dims) = c("height", "width", "channel")
dims = as.data.frame(dims)
## ------------------------------------------------------------------------
grobs <- lapply(images_cl, rasterGrob, width=unit(1,"npc"), height=unit(1,"npc"))
images_tibble <- tibble(sample=sample_names, grob=grobs)
images_tibble$height = dims$height
images_tibble$width = dims$width
images_tibble
## ------------------------------------------------------------------------
scales <- lapply(scalefactor_paths, function(x) fromJSON(file=x))
## ------------------------------------------------------------------------
clusters = lapply(cluster_paths, read.csv)
head(clusters[[1]])
## ------------------------------------------------------------------------
bcs <- list()
for (i in 1:length(sample_names)) {
bcs[[i]] <- read.csv(tissue_paths[i],col.names=c("barcode","tissue","row","col","imagerow","imagecol"), header = FALSE)
bcs[[i]]$sample_name <- sample_names[i]
bcs[[i]]$imagerow <- bcs[[i]]$imagerow * scales[[i]]$tissue_lowres_scalef # scale tissue coordinates for lowres image
bcs[[i]]$imagecol <- bcs[[i]]$imagecol * scales[[i]]$tissue_lowres_scalef
bcs[[i]]$tissue <- as.factor(bcs[[i]]$tissue)
bcs[[i]] <- merge(bcs[[i]], clusters[[i]], by.x = "barcode", by.y = "Barcode", all = TRUE)
bcs[[i]]$height <- images_tibble$height[i]
bcs[[i]]$width <- images_tibble$width[i]
}
names(bcs) <- sample_names
head(bcs[[1]])
## ------------------------------------------------------------------------
## keep in regular genomics formatting
umiList <- lapply(matrix_paths, Read10X_h5)
names(umiList) = sample_names
sapply(umiList, dim)
rseList = mapply(function(u, bc) {
rownames(bc) = bc$barcode
bc = bc[colnames(u),c(1,7,2:6,8:ncol(bc))]
rse = SummarizedExperiment(
assays = list('umis' = u),
rowRanges = gtf, colData = bc)
rse$sum_umi = colSums(u)
rse$sum_gene = colSums(u > 0)
return(rse)
}, umiList, bcs)
## add images
for(i in seq(along=rseList)) {
metadata(rseList[[i]])$image = images_tibble[i,]
}
## save out
save(rseList, geom_spatial,
file = "Human_DLPFC_Visium_processedData_rseList.rda")
## ------------------------------------------------------------------------
myPalette <- colorRampPalette(rev(brewer.pal(11, "Spectral")))
## ---- Just tissue per sample ------------------------------------
plots_tissue = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow,fill=sum_umi)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_tissue.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_tissue))
dev.off()
#### by sample
pdf("tissue_by_sample.pdf")
for(i in seq(along=plots_tissue)) {
print(plots_tissue[[i]])
}
dev.off()
## ---- Just tissue with blank spots per sample ------------------------------------
plots_tissueSpots = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_tissue_spotted.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_tissueSpots))
dev.off()
#### by sample
pdf("tissueSpotted_by_sample.pdf")
for(i in seq(along=plots_tissueSpots)) {
print(plots_tissueSpots[[i]])
}
dev.off()
## ---- UMIs per sample ------------------------------------
plots_umis = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow,fill=sum_umi)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "Total UMI")+
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_umi.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_umis))
dev.off()
#### by sample
pdf("umi_by_sample.pdf")
for(i in seq(along=plots_umis)) {
print(plots_umis[[i]])
}
dev.off()
## ---- gene counts by sample ----------------------------------
plots_genes = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow,fill=sum_gene)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "Total Gene")+
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_gene.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_genes))
dev.off()
#### by sample
pdf("geneCount_by_sample.pdf")
for(i in seq(along=plots_genes)) {
print(plots_genes[[i]])
}
dev.off()
## ---- fig.width = 16, fig.height = 8-------------------------------------
plots_clusters = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow,fill=factor(Cluster))) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_manual(values = c("#b2df8a","#e41a1c","#377eb8","#4daf4a","#ff7f00","gold",
"#a65628", "#999999", "black", "grey", "white", "purple"))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "Cluster")+
guides(fill = guide_legend(override.aes = list(size=3)))+
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_cluster.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_clusters))
dev.off()
#### by sample
pdf("cluster_by_sample.pdf")
for(i in seq(along=plots_clusters)) {
print(plots_clusters[[i]])
}
dev.off()
#############################
## Layer and other markers
geneTab = read.csv("../mouse_layer_marker_info_cleaned.csv",as.is=TRUE)
library(biomaRt)
ensembl = useMart("ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl", host="feb2014.archive.ensembl.org")
sym = getBM(attributes = c("ensembl_gene_id","hgnc_symbol","entrezgene"),
mart=ensembl)
geneTab$hgnc_symbol = sym$hgnc_symbol[match(geneTab$HumanEnsID, sym$ensembl_gene_id)]
geneTab$hgnc_symbol[geneTab$HumanEnsID == "ENSG00000275700"] = "AATF"
symbol = c("SLC17A7", "BDNF", "MBP", "MOBP", "GFAP", "MOG", "SNAP25", "GAD2", "CAMK2A",
"AQP4", "CD74", "FOXP2", "PDGFRA", "DLG4", geneTab$hgnc_symbol)
type = c("Excit", "Interest", "OLIGO", "OLIGO", "ASTRO", "OLIGO", "Neuron", "Inhib",
"Neuron", "MICRO", "ASTRO", "NEURON", "OPC", "PSD95",paste("Layer", geneTab$Label))
dir.create("pdfs_grid")
dir.create("pdfs_single")
map = rowData(rseList[[1]])
ens = map$gene_id[match(symbol, map$gene_name)]
for(j in seq(along=symbol)) {
# for(j in 1:2) {
cat(".")
g = symbol[j]
label = type[j]
e = ens[j]
plots = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
d$UMI = assays(rse)$umis[e,]
ggplot(d, aes(x=imagecol,y=imagerow,fill=UMI)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "UMI")+
ggtitle(paste(unique(rse$sample_name), g, label, sep=" - ")) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf(paste0("pdfs_grid/", g,"_",
gsub(" ", "", gsub("/","-", label)), ".pdf"),height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
#### by sample
pdf(paste0("pdfs_single/", g,"_",
gsub(" ", "", gsub("/","-", label)), "_bySample.pdf"))
for(i in seq(along=plots)) {
print(plots[[i]])
}
dev.off()
}
## gene by cluster
# for(j in seq(along=symbol)) {
for(j in 1:2) {
cat(".")
g = symbol[j]
label = type[j]
e = ens[j]
plots = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
d$UMI = assays(rse)$umis[e,]
d = d[d$UMI > 3,]
ggplot(d, aes(x=imagecol,y=imagerow,fill=factor(Cluster))) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_manual(values = c("#b2df8a","#e41a1c","#377eb8","#4daf4a","#ff7f00","gold",
"#a65628", "#999999", "black", "grey", "white", "purple"))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "UMI")+
ggtitle(paste(unique(rse$sample_name), g, label, sep=" - ")) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf(paste0("pdfs_grid/", g,"_",
gsub(" ", "", gsub("/","-", label)), "_sparse.pdf"),height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
#### by sample
pdf(paste0("pdfs_single/", g,"_",
gsub(" ", "", gsub("/","-", label)), "_bySample_sparse.pdf"))
for(i in seq(along=plots)) {
print(plots[[i]])
}
dev.off()
}
|
/Analysis/Layer_Notebook.R
|
no_license
|
LieberInstitute/HumanPilot
|
R
| false
| false
| 14,067
|
r
|
## module load conda_R/3.6.x # devel
## ----Libraries ------------------
library(tidyverse)
library(ggplot2)
library(Matrix)
library(Rmisc)
library(ggforce)
library(rjson)
library(cowplot)
library(RColorBrewer)
library(grid)
library(readbitmap)
library(Seurat)
library(SummarizedExperiment)
library(rtracklayer)
## Function for plotting
geom_spatial <- function(mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = FALSE,
...) {
GeomCustom <- ggproto(
"GeomCustom",
Geom,
setup_data = function(self, data, params) {
data <- ggproto_parent(Geom, self)$setup_data(data, params)
data
},
draw_group = function(data, panel_scales, coord) {
vp <- grid::viewport(x=data$x, y=data$y)
g <- grid::editGrob(data$grob[[1]], vp=vp)
ggplot2:::ggname("geom_spatial", g)
},
required_aes = c("grob","x","y")
)
layer(
geom = GeomCustom,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
## get sample names
sample_names <- read.delim("lenas.txt", as.is=TRUE, header=FALSE)$V1
sample_names
## 10x output path
path = "/dcs04/lieber/lcolladotor/with10x_LIBD001/HumanPilot/10X/"
## output
image_paths <- paste0(path, sample_names, "/tissue_lowres_image.png")
scalefactor_paths <- paste0(path, sample_names, "/scalefactors_json.json")
tissue_paths <- paste0(path, sample_names, "/tissue_positions_list.txt")
cluster_paths <- paste0(path, sample_names, "/", sample_names, "_analysis__clustering_graphclust_clusters.csv")
matrix_paths <- paste0(path, sample_names, "/", sample_names, "_filtered_feature_bc_matrix.h5")
all(file.exists(c(image_paths, scalefactor_paths, tissue_paths, cluster_paths, matrix_paths)))
# TRUE
## get annotation
map = read.delim("../10X/151675/151675_raw_feature_bc_matrix__features.tsv.gz",
as.is=TRUE, header=FALSE,col.names=c("EnsemblID", "Symbol", "Type"))
## get GTF, this seems like what they used
gtf = import("/dcl01/ajaffe/data/lab/singleCell/refdata-cellranger-GRCh38-3.0.0/genes/genes.gtf")
gtf = gtf[gtf$type == "gene"]
names(gtf) = gtf$gene_id
gtf = gtf[map$EnsemblID]
seqlevels(gtf)[1:25] = paste0("chr", seqlevels(gtf)[1:25])
mcols(gtf) = mcols(gtf)[,c(5:9)]
## ------------------------------------------------------------------------
images_cl <- lapply(image_paths, read.bitmap)
dims = t(sapply(images_cl, dim))
colnames(dims) = c("height", "width", "channel")
dims = as.data.frame(dims)
## ------------------------------------------------------------------------
grobs <- lapply(images_cl, rasterGrob, width=unit(1,"npc"), height=unit(1,"npc"))
images_tibble <- tibble(sample=sample_names, grob=grobs)
images_tibble$height = dims$height
images_tibble$width = dims$width
images_tibble
## ------------------------------------------------------------------------
scales <- lapply(scalefactor_paths, function(x) fromJSON(file=x))
## ------------------------------------------------------------------------
clusters = lapply(cluster_paths, read.csv)
head(clusters[[1]])
## ------------------------------------------------------------------------
bcs <- list()
for (i in 1:length(sample_names)) {
bcs[[i]] <- read.csv(tissue_paths[i],col.names=c("barcode","tissue","row","col","imagerow","imagecol"), header = FALSE)
bcs[[i]]$sample_name <- sample_names[i]
bcs[[i]]$imagerow <- bcs[[i]]$imagerow * scales[[i]]$tissue_lowres_scalef # scale tissue coordinates for lowres image
bcs[[i]]$imagecol <- bcs[[i]]$imagecol * scales[[i]]$tissue_lowres_scalef
bcs[[i]]$tissue <- as.factor(bcs[[i]]$tissue)
bcs[[i]] <- merge(bcs[[i]], clusters[[i]], by.x = "barcode", by.y = "Barcode", all = TRUE)
bcs[[i]]$height <- images_tibble$height[i]
bcs[[i]]$width <- images_tibble$width[i]
}
names(bcs) <- sample_names
head(bcs[[1]])
## ------------------------------------------------------------------------
## keep in regular genomics formatting
umiList <- lapply(matrix_paths, Read10X_h5)
names(umiList) = sample_names
sapply(umiList, dim)
rseList = mapply(function(u, bc) {
rownames(bc) = bc$barcode
bc = bc[colnames(u),c(1,7,2:6,8:ncol(bc))]
rse = SummarizedExperiment(
assays = list('umis' = u),
rowRanges = gtf, colData = bc)
rse$sum_umi = colSums(u)
rse$sum_gene = colSums(u > 0)
return(rse)
}, umiList, bcs)
## add images
for(i in seq(along=rseList)) {
metadata(rseList[[i]])$image = images_tibble[i,]
}
## save out
save(rseList, geom_spatial,
file = "Human_DLPFC_Visium_processedData_rseList.rda")
## ------------------------------------------------------------------------
myPalette <- colorRampPalette(rev(brewer.pal(11, "Spectral")))
## ---- Just tissue per sample ------------------------------------
plots_tissue = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow,fill=sum_umi)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_tissue.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_tissue))
dev.off()
#### by sample
pdf("tissue_by_sample.pdf")
for(i in seq(along=plots_tissue)) {
print(plots_tissue[[i]])
}
dev.off()
## ---- Just tissue with blank spots per sample ------------------------------------
plots_tissueSpots = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_tissue_spotted.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_tissueSpots))
dev.off()
#### by sample
pdf("tissueSpotted_by_sample.pdf")
for(i in seq(along=plots_tissueSpots)) {
print(plots_tissueSpots[[i]])
}
dev.off()
## ---- UMIs per sample ------------------------------------
plots_umis = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow,fill=sum_umi)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "Total UMI")+
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_umi.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_umis))
dev.off()
#### by sample
pdf("umi_by_sample.pdf")
for(i in seq(along=plots_umis)) {
print(plots_umis[[i]])
}
dev.off()
## ---- gene counts by sample ----------------------------------
plots_genes = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow,fill=sum_gene)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "Total Gene")+
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_gene.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_genes))
dev.off()
#### by sample
pdf("geneCount_by_sample.pdf")
for(i in seq(along=plots_genes)) {
print(plots_genes[[i]])
}
dev.off()
## ---- fig.width = 16, fig.height = 8-------------------------------------
plots_clusters = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
ggplot(d, aes(x=imagecol,y=imagerow,fill=factor(Cluster))) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_manual(values = c("#b2df8a","#e41a1c","#377eb8","#4daf4a","#ff7f00","gold",
"#a65628", "#999999", "black", "grey", "white", "purple"))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "Cluster")+
guides(fill = guide_legend(override.aes = list(size=3)))+
ggtitle(unique(rse$sample_name)) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf("example_cluster.pdf",height=24, width=36)
print(plot_grid(plotlist = plots_clusters))
dev.off()
#### by sample
pdf("cluster_by_sample.pdf")
for(i in seq(along=plots_clusters)) {
print(plots_clusters[[i]])
}
dev.off()
#############################
## Layer and other markers
geneTab = read.csv("../mouse_layer_marker_info_cleaned.csv",as.is=TRUE)
library(biomaRt)
ensembl = useMart("ENSEMBL_MART_ENSEMBL",
dataset="hsapiens_gene_ensembl", host="feb2014.archive.ensembl.org")
sym = getBM(attributes = c("ensembl_gene_id","hgnc_symbol","entrezgene"),
mart=ensembl)
geneTab$hgnc_symbol = sym$hgnc_symbol[match(geneTab$HumanEnsID, sym$ensembl_gene_id)]
geneTab$hgnc_symbol[geneTab$HumanEnsID == "ENSG00000275700"] = "AATF"
symbol = c("SLC17A7", "BDNF", "MBP", "MOBP", "GFAP", "MOG", "SNAP25", "GAD2", "CAMK2A",
"AQP4", "CD74", "FOXP2", "PDGFRA", "DLG4", geneTab$hgnc_symbol)
type = c("Excit", "Interest", "OLIGO", "OLIGO", "ASTRO", "OLIGO", "Neuron", "Inhib",
"Neuron", "MICRO", "ASTRO", "NEURON", "OPC", "PSD95",paste("Layer", geneTab$Label))
dir.create("pdfs_grid")
dir.create("pdfs_single")
map = rowData(rseList[[1]])
ens = map$gene_id[match(symbol, map$gene_name)]
for(j in seq(along=symbol)) {
# for(j in 1:2) {
cat(".")
g = symbol[j]
label = type[j]
e = ens[j]
plots = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
d$UMI = assays(rse)$umis[e,]
ggplot(d, aes(x=imagecol,y=imagerow,fill=UMI)) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_gradientn(colours = myPalette(100))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "UMI")+
ggtitle(paste(unique(rse$sample_name), g, label, sep=" - ")) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf(paste0("pdfs_grid/", g,"_",
gsub(" ", "", gsub("/","-", label)), ".pdf"),height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
#### by sample
pdf(paste0("pdfs_single/", g,"_",
gsub(" ", "", gsub("/","-", label)), "_bySample.pdf"))
for(i in seq(along=plots)) {
print(plots[[i]])
}
dev.off()
}
## gene by cluster
# for(j in seq(along=symbol)) {
for(j in 1:2) {
cat(".")
g = symbol[j]
label = type[j]
e = ens[j]
plots = lapply(rseList, function(rse) {
d = as.data.frame(colData(rse))
d$UMI = assays(rse)$umis[e,]
d = d[d$UMI > 3,]
ggplot(d, aes(x=imagecol,y=imagerow,fill=factor(Cluster))) +
geom_spatial(data=metadata(rse)$image,
aes(grob=grob), x=0.5, y=0.5) +
geom_point(shape = 21, size = 1.25, stroke = 0.25)+
coord_cartesian(expand=FALSE)+
scale_fill_manual(values = c("#b2df8a","#e41a1c","#377eb8","#4daf4a","#ff7f00","gold",
"#a65628", "#999999", "black", "grey", "white", "purple"))+
xlim(0,max(rse$width)) +
ylim(max(rse$height),0) +
xlab("") + ylab("") +
labs(fill = "UMI")+
ggtitle(paste(unique(rse$sample_name), g, label, sep=" - ")) +
theme_set(theme_bw(base_size = 10))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.text = element_blank(),
axis.ticks = element_blank())
})
pdf(paste0("pdfs_grid/", g,"_",
gsub(" ", "", gsub("/","-", label)), "_sparse.pdf"),height=24, width=36)
print(plot_grid(plotlist = plots))
dev.off()
#### by sample
pdf(paste0("pdfs_single/", g,"_",
gsub(" ", "", gsub("/","-", label)), "_bySample_sparse.pdf"))
for(i in seq(along=plots)) {
print(plots[[i]])
}
dev.off()
}
|
## Getting full dataset
data_full <- read.csv("./exdata-data-household_power_consumption/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 3
with(data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/plot3.R
|
no_license
|
anshultiwari1/Exploratory-Data-Analysis
|
R
| false
| false
| 978
|
r
|
## Getting full dataset
data_full <- read.csv("./exdata-data-household_power_consumption/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 3
with(data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
library(brms)
### Name: stanplot.brmsfit
### Title: MCMC Plots Implemented in 'bayesplot'
### Aliases: stanplot.brmsfit stanplot
### ** Examples
## Not run:
##D model <- brm(count ~ log_Age_c + log_Base4_c * Trt
##D + (1|patient) + (1|visit),
##D data = epilepsy, family = "poisson")
##D
##D # plot posterior intervals
##D stanplot(model)
##D
##D # only show population-level effects in the plots
##D stanplot(model, pars = "^b_")
##D
##D # show histograms of the posterior distributions
##D stanplot(model, type = "hist")
##D
##D # plot some diagnostics of the sampler
##D stanplot(model, type = "neff")
##D stanplot(model, type = "rhat")
##D
##D # plot some diagnostics specific to the NUTS sampler
##D stanplot(model, type = "nuts_acceptance")
##D stanplot(model, type = "nuts_divergence")
## End(Not run)
|
/data/genthat_extracted_code/brms/examples/stanplot.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 863
|
r
|
library(brms)
### Name: stanplot.brmsfit
### Title: MCMC Plots Implemented in 'bayesplot'
### Aliases: stanplot.brmsfit stanplot
### ** Examples
## Not run:
##D model <- brm(count ~ log_Age_c + log_Base4_c * Trt
##D + (1|patient) + (1|visit),
##D data = epilepsy, family = "poisson")
##D
##D # plot posterior intervals
##D stanplot(model)
##D
##D # only show population-level effects in the plots
##D stanplot(model, pars = "^b_")
##D
##D # show histograms of the posterior distributions
##D stanplot(model, type = "hist")
##D
##D # plot some diagnostics of the sampler
##D stanplot(model, type = "neff")
##D stanplot(model, type = "rhat")
##D
##D # plot some diagnostics specific to the NUTS sampler
##D stanplot(model, type = "nuts_acceptance")
##D stanplot(model, type = "nuts_divergence")
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getEsts.R
\name{getEsts}
\alias{getEsts}
\title{Estimate the complexity of a library or sample based on unique fragments
using Daley & Smith's implementation of Good & Toulmin's rational function
approximation to solve the missing species problem.}
\usage{
getEsts(xx, withCI = FALSE, ...)
}
\arguments{
\item{xx}{The fragments or sample of fragments}
\item{withCI}{Have preseq compute 95% confidence intervals for plotting?}
\item{...}{Other arguments to pass on to preseq}
}
\value{
A data frame with results
}
\description{
Estimate the complexity of a library or sample based on unique fragments
using Daley & Smith's implementation of Good & Toulmin's rational function
approximation to solve the missing species problem.
}
|
/man/getEsts.Rd
|
no_license
|
danielhgu/ATACseeker
|
R
| false
| true
| 810
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getEsts.R
\name{getEsts}
\alias{getEsts}
\title{Estimate the complexity of a library or sample based on unique fragments
using Daley & Smith's implementation of Good & Toulmin's rational function
approximation to solve the missing species problem.}
\usage{
getEsts(xx, withCI = FALSE, ...)
}
\arguments{
\item{xx}{The fragments or sample of fragments}
\item{withCI}{Have preseq compute 95% confidence intervals for plotting?}
\item{...}{Other arguments to pass on to preseq}
}
\value{
A data frame with results
}
\description{
Estimate the complexity of a library or sample based on unique fragments
using Daley & Smith's implementation of Good & Toulmin's rational function
approximation to solve the missing species problem.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-helpers.R
\name{adj_ranges}
\alias{adj_ranges}
\title{adjust ranges if necessary}
\usage{
adj_ranges(gr_e, gr_j, tx_plot, ex_use, gr_base = NULL)
}
\arguments{
\item{gr_e}{\code{GenomicRanges} for exons}
\item{gr_j}{\code{GenomicRanges} for junctions}
\item{tx_plot}{output form \code{find_annotations}}
\item{ex_use}{see \code{splicegrahm} documentation}
\item{gr_base}{\code{GenomicRanges} to use for adjusting gr_e, gr_j,
if NULL, then just adjust based on \code{gr_e}
(default = NULL)}
}
\description{
adjust ranges if necessary
}
\author{
Patrick Kimes
}
\keyword{internal}
|
/man/adj_ranges.Rd
|
permissive
|
pkimes/spliceclust
|
R
| false
| true
| 667
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-helpers.R
\name{adj_ranges}
\alias{adj_ranges}
\title{adjust ranges if necessary}
\usage{
adj_ranges(gr_e, gr_j, tx_plot, ex_use, gr_base = NULL)
}
\arguments{
\item{gr_e}{\code{GenomicRanges} for exons}
\item{gr_j}{\code{GenomicRanges} for junctions}
\item{tx_plot}{output form \code{find_annotations}}
\item{ex_use}{see \code{splicegrahm} documentation}
\item{gr_base}{\code{GenomicRanges} to use for adjusting gr_e, gr_j,
if NULL, then just adjust based on \code{gr_e}
(default = NULL)}
}
\description{
adjust ranges if necessary
}
\author{
Patrick Kimes
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CellDMC.R
\name{CellDMC}
\alias{CellDMC}
\title{A method that allows the identification of differentially methylated
cell-types and the estimated change of each cell-type}
\usage{
CellDMC(beta.m, pheno.v, frac.m, adjPMethod = "fdr", adjPThresh = 0.05,
cov.mod = NULL, sort = FALSE, mc.cores = 1)
}
\arguments{
\item{beta.m}{A beta value matrix with rows labeling the CpGs and columns labeling
samples.}
\item{pheno.v}{A vector of phenotype. CellDMC can handle both of binary and
continuous/oderinal phenotypes. \code{NA} is not allowed in
\code{pheno.v}.}
\item{frac.m}{A matrix contains fractions of each cell-type. Each row labels a sample,
with the same order of the columns in beta.m. Each column labels a
cell-type. Column names, which are the names of cell-types, are required. The
rowSums of frac.m should be 1 or close to 1.}
\item{adjPMethod}{The method will be used to adjust p values. The method can be any of method
accepted by \code{\link{p.adjust}}.}
\item{adjPThresh}{A numeric value, default as 0.05. This is used to call significant DMCTs.
Adjusted p values less than this threshold will be reported as DMCTs (-1 or
1) in the 'dmct' matrix in the returned list.}
\item{cov.mod}{A design matrix from \code{model.matrix}, which contains other covariates to
be adjusted. For example, input
\code{model.matrix(~ geneder, data = pheno.df)} to adjust gender. Do not put
cell-type fraction here!}
\item{sort}{Default as \code{FALSE}. If \code{TRUE}, the data.frame in coe list will be
sorted based on p value of each CpG. The order of rows in 'dmct' will not
change since the orders of each cell-type are different.}
\item{mc.cores}{The number of cores to use, i.e. at most how many threads will
run simultaneously. The defatul is 1, which means no parallelization.}
}
\value{
A list with the following two items.
dmct
A matrix givess wheter the input CpGs are DMCTs and DMCs. The first column
gives whether a CpG is DMC or not. If the CpG is called as DMC, the value
will be 1, otherwise it is 0. The following columns give DMCTs for each
cell-types. If a CpG is a DMCT, the value will be 1 (hypermethylated for case
compared to control) or -1 (hypomethylated for case compared to control).
Otherwise, the value is 0 (non-DMCT). The rows of this matrix are ordered as
the same as input \code{beta.m}.
coe
This list contains several dataframes, which correspond to each cel-type in
\code{frac.m}. Each dataframe contains all CpGs in input \code{beta.m}.
All dataframes contain estimated DNAm changes (\code{Estimate}),
standard error (\code{SE}), estimated t statistics (\code{t}),
raw P values (\code{p}), and multiple hypothesis corrected P
values (\code{adjP}).
}
\description{
An outstanding challenge of Epigenome-Wide Association Studies performed in
complex tissues is the identification of the specific cell-type(s)
responsible for the observed differential methylation. CellDMC is a novel
statistical algorithm, which is able to identify not only differentially
methylated positions, but also the specific cell-type(s) driving the
methylation change.
}
\examples{
data(centEpiFibIC.m)
data(DummyBeta.m)
out.l <- epidish(DummyBeta.m, centEpiFibIC.m, method = 'RPC')
frac.m <- out.l$estF
pheno.v <- rep(c(0, 1), each = 5)
celldmc.o <- CellDMC(DummyBeta.m, pheno.v, frac.m)
# Pls note this is a faked beta value matrix.
}
\references{
Zheng SC, Breeze CE, Beck S, Teschendorff AE.
\emph{Identification of differentially methylated cell-types in
Epigenome-Wide Association Studies.}
Nat Methods (2018) 15: 1059-1066
doi:\href{https://doi.org/10.1038/s41592-018-0213-x}{10.1038/s41592-018-0213-x}.
}
|
/man/CellDMC.Rd
|
no_license
|
RogerZou0108/EpiDISH
|
R
| false
| true
| 3,740
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CellDMC.R
\name{CellDMC}
\alias{CellDMC}
\title{A method that allows the identification of differentially methylated
cell-types and the estimated change of each cell-type}
\usage{
CellDMC(beta.m, pheno.v, frac.m, adjPMethod = "fdr", adjPThresh = 0.05,
cov.mod = NULL, sort = FALSE, mc.cores = 1)
}
\arguments{
\item{beta.m}{A beta value matrix with rows labeling the CpGs and columns labeling
samples.}
\item{pheno.v}{A vector of phenotype. CellDMC can handle both of binary and
continuous/oderinal phenotypes. \code{NA} is not allowed in
\code{pheno.v}.}
\item{frac.m}{A matrix contains fractions of each cell-type. Each row labels a sample,
with the same order of the columns in beta.m. Each column labels a
cell-type. Column names, which are the names of cell-types, are required. The
rowSums of frac.m should be 1 or close to 1.}
\item{adjPMethod}{The method will be used to adjust p values. The method can be any of method
accepted by \code{\link{p.adjust}}.}
\item{adjPThresh}{A numeric value, default as 0.05. This is used to call significant DMCTs.
Adjusted p values less than this threshold will be reported as DMCTs (-1 or
1) in the 'dmct' matrix in the returned list.}
\item{cov.mod}{A design matrix from \code{model.matrix}, which contains other covariates to
be adjusted. For example, input
\code{model.matrix(~ geneder, data = pheno.df)} to adjust gender. Do not put
cell-type fraction here!}
\item{sort}{Default as \code{FALSE}. If \code{TRUE}, the data.frame in coe list will be
sorted based on p value of each CpG. The order of rows in 'dmct' will not
change since the orders of each cell-type are different.}
\item{mc.cores}{The number of cores to use, i.e. at most how many threads will
run simultaneously. The defatul is 1, which means no parallelization.}
}
\value{
A list with the following two items.
dmct
A matrix givess wheter the input CpGs are DMCTs and DMCs. The first column
gives whether a CpG is DMC or not. If the CpG is called as DMC, the value
will be 1, otherwise it is 0. The following columns give DMCTs for each
cell-types. If a CpG is a DMCT, the value will be 1 (hypermethylated for case
compared to control) or -1 (hypomethylated for case compared to control).
Otherwise, the value is 0 (non-DMCT). The rows of this matrix are ordered as
the same as input \code{beta.m}.
coe
This list contains several dataframes, which correspond to each cel-type in
\code{frac.m}. Each dataframe contains all CpGs in input \code{beta.m}.
All dataframes contain estimated DNAm changes (\code{Estimate}),
standard error (\code{SE}), estimated t statistics (\code{t}),
raw P values (\code{p}), and multiple hypothesis corrected P
values (\code{adjP}).
}
\description{
An outstanding challenge of Epigenome-Wide Association Studies performed in
complex tissues is the identification of the specific cell-type(s)
responsible for the observed differential methylation. CellDMC is a novel
statistical algorithm, which is able to identify not only differentially
methylated positions, but also the specific cell-type(s) driving the
methylation change.
}
\examples{
data(centEpiFibIC.m)
data(DummyBeta.m)
out.l <- epidish(DummyBeta.m, centEpiFibIC.m, method = 'RPC')
frac.m <- out.l$estF
pheno.v <- rep(c(0, 1), each = 5)
celldmc.o <- CellDMC(DummyBeta.m, pheno.v, frac.m)
# Pls note this is a faked beta value matrix.
}
\references{
Zheng SC, Breeze CE, Beck S, Teschendorff AE.
\emph{Identification of differentially methylated cell-types in
Epigenome-Wide Association Studies.}
Nat Methods (2018) 15: 1059-1066
doi:\href{https://doi.org/10.1038/s41592-018-0213-x}{10.1038/s41592-018-0213-x}.
}
|
\docType{methods}
\name{head}
\alias{head}
\alias{head,DXGTable-method}
\title{Get the First Part of a GTable}
\arguments{
\item{x}{A GTable handler}
\item{n}{An integer: if positive, the max number of rows
starting from the beginning; if negative, all but the
last "|n|" rows.}
}
\value{
data frame of rows from the GTable
}
\description{
Returns the first part of the referenced GTable. By
default, returns the first 6 rows.
}
|
/src/R/dxR/man/head-methods.Rd
|
permissive
|
dnanexus/dx-toolkit
|
R
| false
| false
| 446
|
rd
|
\docType{methods}
\name{head}
\alias{head}
\alias{head,DXGTable-method}
\title{Get the First Part of a GTable}
\arguments{
\item{x}{A GTable handler}
\item{n}{An integer: if positive, the max number of rows
starting from the beginning; if negative, all but the
last "|n|" rows.}
}
\value{
data frame of rows from the GTable
}
\description{
Returns the first part of the referenced GTable. By
default, returns the first 6 rows.
}
|
\name{select}
\alias{select}
\alias{select.dataset}
\alias{select.default}
\title{Selecting Variables}
\description{
Select one or more expressions evaluated in the context of a data object.
}
\usage{
select(`_data`, ...)
\method{select}{dataset}(`_data`, ...)
\method{select}{default}(`_data`, ...)
}
\arguments{
\item{_data}{a data object.}
\item{...}{expressions to evaluate in the context of the data object.}
}
\details{
The \code{select} function evaluates a set of expressions in the context
of a data object, returning the results as a data object with the results
as variables.
The default implementation of the method coerces its \code{_data} argument
to a dataset, then uses \code{scope} to evaluate the expressions in the
context of the converted dataset. For the result, named arguments get these
names as variable names; unnamed arguments get the deparsed expressions as
names.
The expressions must evaluate to data objects with the same number of rows
as \code{_data}.
If the \code{_data} object has keys, then these get propagated to the result.
}
\value{
A data object with the evaluated expressions as variables.
}
\seealso{
\code{\link{transform.dataset}}, \code{\link{scope}}.
}
\examples{
# evaluate expressions, take given names
select(iris,
Sepal.Area = Sepal.Length * Sepal.Width,
Petal.Area = Petal.Length * Petal.Width)
# propagate keys, take names from arguments
x <- as.dataset(mtcars)
keys(x) <- keyset(name = rownames(mtcars))
select(x, cyl, 2 * cyl, three = 3 * cyl)
}
|
/man/select.Rd
|
permissive
|
patperry/r-frame
|
R
| false
| false
| 1,519
|
rd
|
\name{select}
\alias{select}
\alias{select.dataset}
\alias{select.default}
\title{Selecting Variables}
\description{
Select one or more expressions evaluated in the context of a data object.
}
\usage{
select(`_data`, ...)
\method{select}{dataset}(`_data`, ...)
\method{select}{default}(`_data`, ...)
}
\arguments{
\item{_data}{a data object.}
\item{...}{expressions to evaluate in the context of the data object.}
}
\details{
The \code{select} function evaluates a set of expressions in the context
of a data object, returning the results as a data object with the results
as variables.
The default implementation of the method coerces its \code{_data} argument
to a dataset, then uses \code{scope} to evaluate the expressions in the
context of the converted dataset. For the result, named arguments get these
names as variable names; unnamed arguments get the deparsed expressions as
names.
The expressions must evaluate to data objects with the same number of rows
as \code{_data}.
If the \code{_data} object has keys, then these get propagated to the result.
}
\value{
A data object with the evaluated expressions as variables.
}
\seealso{
\code{\link{transform.dataset}}, \code{\link{scope}}.
}
\examples{
# evaluate expressions, take given names
select(iris,
Sepal.Area = Sepal.Length * Sepal.Width,
Petal.Area = Petal.Length * Petal.Width)
# propagate keys, take names from arguments
x <- as.dataset(mtcars)
keys(x) <- keyset(name = rownames(mtcars))
select(x, cyl, 2 * cyl, three = 3 * cyl)
}
|
#' @name growthModels
#'
#' @title Creates a function for a specific parameterization of the von Bertalanffy, Gompertz, Richards, and logistic growth functions.
#'
#' @description Creates a function for a specific parameterizations of the von Bertalanffy, Gompertz, Richards, and logistic growth functions. Use \code{growthFunShow()} to see the equations for each growth function.
#'
#' @param type A string (in \code{growthFunShow}) that indicates the type of growth function to show.
#' @param param A string (for von Bertalanffy, Gompertz, and logistic) or numeric (for Richards) that indicates the specific parameterization of the growth function See details.
#' @param simple A logical that indicates whether the function will accept all parameter values in the first parameter argument (\code{=FALSE}; DEFAULT) or whether all individual parameters must be specified in separate arguments (\code{=TRUE}).
#' @param msg A logical that indicates whether a message about the growth function and parameter definitions should be output (\code{=TRUE}) or not (\code{=FALSE}; DEFAULT).
#' @param plot A logical that indicates whether the growth function expression should be shown as an equation in a simple plot.
#' @param \dots Not implemented.
#'
#' @return The functions ending in \code{xxxFuns} return a function that can be used to predict fish size given a vector of ages and values for the growth function parameters and, in some parameterizations, values for constants. The result should be saved to an object that is then the function name. When the resulting function is used, the parameters are ordered as shown when the definitions of the parameters are printed after the function is called (if \code{msg=TRUE}). If \code{simple=FALSE} (DEFAULT), then the values for all parameters may be included as a vector in the first parameter argument (but in the same order). Similarly, the values for all constants may be included as a vector in the first constant argument (i.e., \code{t1}). If \code{simple=TRUE}, then all parameters and constants must be declared individually. The resulting function is somewhat easier to read when \code{simple=TRUE}, but is less general for some applications.
#'
#' An expression of the equation for each growth function may be created with \code{growthFunShow}. In this function \code{type=} is used to select the major function type (e.g., von Bertalanffy, Gompertz, Richards, Logistic, Schnute) and \code{param=} is used to select a specific parameterization of that growth function. If \code{plot=TRUE}, then a simple graphic will be created with the equation using \code{\link{plotmath}} for a pretty format.
#'
#' @note Take note of the following for parameterizations (i.e., \code{param}) of each growth function:
#' \itemize{
#' \item von Bertalanffy
#' \itemize{
#' \item The \sQuote{Original} and \sQuote{vonBertalanffy} are synonymous as are \sQuote{Typical}, \sQuote{Traditional}, and \sQuote{BevertonHolt}.
#' }
#' \item Gompertz
#' \itemize{
#' \item The \sQuote{Ricker2} and \sQuote{QuinnDeriso1} are synonymous, as are \sQuote{Ricker3} and \sQuote{QuinnDeriso2}.
#' \item The parameterizations and parameters for the Gompertz function are varied and confusing in the literature. I have attempted to use a uniform set of parameters in these functions, but that makes a direct comparison to the literature difficult. Common sources for Gompertz models are listed in the references below. I make some comments here to aid comparisons to the literature.
#' \item Within FSA, L0 is the mean length at age 0, Linf is the mean asymptotic length, ti is the age at the inflection point, gi is the instantaneous growth rate at the inflection point, t* is a dimensionless parameter related to time/age, and a is a dimensionless parameter related to growth.
#' \item In the Quinn and Deriso (1999) functions (the \sQuote{QuinnDerisoX} functions), the a parameter here is equal to lambda/K there and the gi parameter here is equal to the K parameter there. Also note that their Y is L here.
#' \item In the Ricker (1979)[p. 705] functions (the \sQuote{RickerX} functions), the a parameter here is equal to k there and the gi paramter here is equal to the g parameter there. Also note that their w is L here. In the Ricker (1979) functions as presented in Campana and Jones (1992), the a parameter here is equal to k parameter there and the gi paramter here is equal to the G parameter there. Also note that their X is L here.
#' \item The function in Ricker (1975)[p. 232] is the same as \sQuote{Ricker2} where the a parameter here is qual to G there and the gi parameter here is equal to the g parameter there. Also note that their w is L here.
#' \item The function in Quist et al. (2012)[p. 714] is the same as \sQuote{Ricker1} where the gi parameter here is equal to the G parameter there and the ti parameter here is equal to the t0 parameter there.
#' \item The function in Katsanevakis and Maravelias (2008) is the same as \sQuote{Ricker1} where the gi parameter here is equal to k2 parameter there and the ti parameter here is equal to the t2 parameter there.
#' }
#' \item Richards
#' \itemize{
#' \item Within FSA, Linf is the mean asymptotic length, ti is the age at the inflection point, k is related to growth (slope at the inflection point), b is related to the vertical position of the inflection point, and L0 is the mean length at age-0.
#' \item The parameterizations (1-6) correspond to functions/equations 1, 4, 5, 6, 7, and 8, respectively, in Tjorve and Tjorve (2010). Note that their A, S, k, d, and B are Linf, a, k, b, and L0, respectively, here (in FSA).
#' }
#' \item logistic
#' \itemize{
#' \item Within FSA, L0 is the mean length at age 0, Linf is the mean asymptotic length, ti is the age at the inflection point, and gninf is the instantaneous growth rate at negative infinity.
#' }
#' }
#'
#' @author Derek H. Ogle, \email{derek@@derekogle.com}, thanks to Gabor Grothendieck for a hint about using \code{get()}.
#'
#' @section IFAR Chapter: 12-Individual Growth.
#'
#' @seealso See \code{\link{Schnute}} for an implementation of the Schnute (1981) model.
#'
#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL.
#'
#' Campana, S.E. and C.M. Jones. 1992. Analysis of otolith microstructure data. Pages 73-100 In D.K. Stevenson and S.E. Campana, editors. Otolith microstructure examination and analysis. Canadian Special Publication of Fisheries and Aquatic Sciences 117. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/141734.pdf.]
#'
#' Fabens, A. 1965. Properties and fitting of the von Bertalanffy growth curve. Growth 29:265-289.
#'
#' Francis, R.I.C.C. 1988. Are growth parameters estimated from tagging and age-length data comparable? Canadian Journal of Fisheries and Aquatic Sciences, 45:936-942.
#'
#' Gallucci, V.F. and T.J. Quinn II. 1979. Reparameterizing, fitting, and testing a simple growth model. Transactions of the American Fisheries Society, 108:14-25.
#'
#' Garcia-Berthou, E., G. Carmona-Catot, R. Merciai, and D.H. Ogle. A technical note on seasonal growth models. Reviews in Fish Biology and Fisheries 22:635-640. [Was (is?) from https://www.researchgate.net/publication/257658359_A_technical_note_on_seasonal_growth_models.]
#'
#' Gompertz, B. 1825. On the nature of the function expressive of the law of human mortality, and on a new method of determining the value of life contingencies. Philosophical Transactions of the Royal Society of London. 115:513-583.
#'
#' Haddon, M., C. Mundy, and D. Tarbath. 2008. Using an inverse-logistic model to describe growth increments of blacklip abalone (\emph{Haliotis rubra}) in Tasmania. Fishery Bulletin 106:58-71. [Was (is?) from http://aquaticcommons.org/8857/1/haddon_Fish_Bull_2008.pdf.]
#'
#' Karkach, A. S. 2006. Trajectories and models of individual growth. Demographic Research 15:347-400. [Was (is?) from http://www.demographic-research.org/volumes/vol15/12/15-12.pdf.]
#'
#' Katsanevakis, S. and C.D. Maravelias. 2008. Modelling fish growth: multi-model inference as a better alternative to a priori using von Bertalanffy equation. Fish and Fisheries 9:178-187.
#'
#' Mooij, W.M., J.M. Van Rooij, and S. Wijnhoven. 1999. Analysis and comparison of fish growth from small samples of length-at-age data: Detection of sexual dimorphism in Eurasian perch as an example. Transactions of the American Fisheries Society 128:483-490.
#'
#' Polacheck, T., J.P. Eveson, and G.M. Laslett. 2004. Increase in growth rates of southern bluefin tuna (\emph{Thunnus maccoyii}) over four decades: 1960 to 2000. Canadian Journal of Fisheries and Aquatic Sciences, 61:307-322.
#'
#' Quinn, T. J. and R. B. Deriso. 1999. Quantitative Fish Dynamics. Oxford University Press, New York, New York. 542 pages.
#'
#' Quist, M.C., M.A. Pegg, and D.R. DeVries. 2012. Age and Growth. Chapter 15 in A.V. Zale, D.L Parrish, and T.M. Sutton, Editors Fisheries Techniques, Third Edition. American Fisheries Society, Bethesda, MD.
#'
#' Richards, F. J. 1959. A flexible growth function for empirical use. Journal of Experimental Biology 10:290-300.
#'
#' Ricker, W.E. 1975. Computation and interpretation of biological statistics of fish populations. Technical Report Bulletin 191, Bulletin of the Fisheries Research Board of Canada. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/1485.pdf.]
#'
#' Ricker, W.E. 1979. Growth rates and models. Pages 677-743 In W.S. Hoar, D.J. Randall, and J.R. Brett, editors. Fish Physiology, Vol. 8: Bioenergetics and Growth. Academic Press, NY, NY. [Was (is?) from https://books.google.com/books?id=CB1qu2VbKwQC&pg=PA705&lpg=PA705&dq=Gompertz+fish&source=bl&ots=y34lhFP4IU&sig=EM_DGEQMPGIn_DlgTcGIi_wbItE&hl=en&sa=X&ei=QmM4VZK6EpDAgwTt24CABw&ved=0CE8Q6AEwBw#v=onepage&q=Gompertz\%20fish&f=false.]
#'
#' Schnute, J. 1981. A versatile growth model with statistically stable parameters. Canadian Journal of Fisheries and Aquatic Sciences, 38:1128-1140.
#'
#' Somers, I. F. 1988. On a seasonally oscillating growth function. Fishbyte 6(1):8-11. [Was (is?) from http://www.worldfishcenter.org/Naga/na_2914.pdf.]
#'
#' Tjorve, E. and K. M. C. Tjorve. 2010. A unified approach to the Richards-model family for use in growth analyses: Why we need only two model forms. Journal of Theoretical Biology 267:417-425. [Was (is?) from https://www.researchgate.net/profile/Even_Tjorve/publication/46218377_A_unified_approach_to_the_Richards-model_family_for_use_in_growth_analyses_why_we_need_only_two_model_forms/links/54ba83b80cf29e0cb04bd24e.pdf.]
#'
#' Troynikov, V. S., R. W. Day, and A. M. Leorke. Estimation of seasonal growth parameters using a stochastic Gompertz model for tagging data. Journal of Shellfish Research 17:833-838. [Was (is?) from https://www.researchgate.net/profile/Robert_Day2/publication/249340562_Estimation_of_seasonal_growth_parameters_using_a_stochastic_gompertz_model_for_tagging_data/links/54200fa30cf203f155c2a08a.pdf.]
#'
#' Vaughan, D. S. and T. E. Helser. 1990. Status of the red drum stock of the Atlantic coast: Stock assessment report for 1989. NOAA Technical Memorandum NMFS-SEFC-263, 117 p. [Was (is?) from http://docs.lib.noaa.gov/noaa_documents/NMFS/SEFSC/TM_NMFS_SEFSC/NMFS_SEFSC_TM_263.pdf.]
#'
#' Wang, Y.-G. 1998. An improved Fabens method for estimation of growth parameters in the von Bertalanffy model with individual asymptotes. Canadian Journal of Fisheries and Aquatic Sciences 55:397-400.
#'
#' Weisberg, S., G.R. Spangler, and L. S. Richmond. 2010. Mixed effects models for fish growth. Canadian Journal of Fisheries And Aquatic Sciences 67:269-277.
#'
#' Winsor, C.P. 1932. The Gompertz curve as a growth curve. Proceedings of the National Academy of Sciences. 18:1-8. [Was (is?) from http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1076153/pdf/pnas01729-0009.pdf.]
#'
#' @keywords manip hplot
#'
#' @examples
#' ###########################################################
#' ## Simple Examples -- Von B
#' ( vb1 <- vbFuns() )
#' ages <- 0:20
#' plot(vb1(ages,Linf=20,K=0.3,t0=-0.2)~ages,type="b",pch=19)
#' ( vb2 <- vbFuns("Francis") )
#' plot(vb2(ages,L1=10,L2=19,L3=20,t1=2,t3=18)~ages,type="b",pch=19)
#' ( vb2c <- vbFuns("Francis",simple=TRUE) ) # compare to vb2
#'
#' ## Simple Examples -- Gompertz
#' ( gomp1 <- GompertzFuns() )
#' plot(gomp1(ages,Linf=800,gi=0.5,ti=5)~ages,type="b",pch=19)
#' ( gomp2 <- GompertzFuns("Ricker2") )
#' plot(gomp2(ages,L0=2,a=6,gi=0.5)~ages,type="b",pch=19)
#' ( gomp2c <- GompertzFuns("Ricker2",simple=TRUE) ) # compare to gomp2
#' ( gompT <- GompertzFuns("Troynikov1"))
#'
#' ## Simple Examples -- Richards
#' ( rich1 <- RichardsFuns() )
#' plot(rich1(ages,Linf=800,k=0.5,a=1,b=6)~ages,type="b",pch=19)
#' ( rich2 <- RichardsFuns(2) )
#' plot(rich2(ages,Linf=800,k=0.5,ti=3,b=6)~ages,type="b",pch=19)
#' ( rich3 <- RichardsFuns(3) )
#' plot(rich3(ages,Linf=800,k=0.5,ti=3,b=0.15)~ages,type="b",pch=19)
#' ( rich4 <- RichardsFuns(4) )
#' plot(rich4(ages,Linf=800,k=0.5,ti=3,b=0.95)~ages,type="b",pch=19)
#' lines(rich4(ages,Linf=800,k=0.5,ti=3,b=1.5)~ages,type="b",pch=19,col="blue")
#' ( rich5 <- RichardsFuns(5) )
#' plot(rich5(ages,Linf=800,k=0.5,L0=50,b=1.5)~ages,type="b",pch=19)
#' ( rich6 <- RichardsFuns(6) )
#' plot(rich6(ages,Linf=800,k=0.5,ti=3,Lninf=50,b=1.5)~ages,type="b",pch=19)
#' ( rich2c <- RichardsFuns(2,simple=TRUE) ) # compare to rich2
#'
#' ## Simple Examples -- Logistic
#' ( log1 <- logisticFuns() )
#' plot(log1(ages,Linf=800,gninf=0.5,ti=5)~ages,type="b",pch=19)
#' ( log2 <- logisticFuns("CJ2") )
#' plot(log2(ages,Linf=800,gninf=0.5,a=10)~ages,type="b",pch=19)
#' ( log2c <- logisticFuns("CJ2",simple=TRUE) ) # compare to log2
#' ( log3 <- logisticFuns("Karkach") )
#' plot(log3(ages,L0=10,Linf=800,gninf=0.5)~ages,type="b",pch=19)
#' ( log4 <- logisticFuns("Haddon") )
#'
#'
#' ###########################################################
#' ## Examples of fitting
#' ## After the last example a plot is constructed with three
#' ## or four lines on top of each other illustrating that the
#' ## parameterizations all produce the same fitted values.
#' ## However, observe the correlations in the summary() results.
#'
#' ## Von B
#' data(SpotVA1)
#' # Fitting the typical paramaterization of the von B function
#' fit1 <- nls(tl~vb1(age,Linf,K,t0),data=SpotVA1,start=vbStarts(tl~age,data=SpotVA1))
#' summary(fit1,correlation=TRUE)
#' plot(tl~age,data=SpotVA1,pch=19)
#' curve(vb1(x,Linf=coef(fit1)),from=0,to=5,col="red",lwd=10,add=TRUE)
#'
#' # Fitting the Francis paramaterization of the von B function
#' fit2 <- nls(tl~vb2c(age,L1,L2,L3,t1=0,t3=5),data=SpotVA1,
#' start=vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5)))
#' summary(fit2,correlation=TRUE)
#' curve(vb2c(x,L1=coef(fit2)[1],L2=coef(fit2)[2],L3=coef(fit2)[3],t1=0,t3=5),
#' from=0,to=5,col="blue",lwd=5,add=TRUE)
#'
#' # Fitting the Schnute parameterization of the von B function
#' vb3 <- vbFuns("Schnute")
#' fit3 <- nls(tl~vb3(age,L1,L3,K,t1=0,t3=4),data=SpotVA1,
#' start=vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,4)))
#' summary(fit3,correlation=TRUE)
#' curve(vb3(x,L1=coef(fit3),t1=c(0,4)),from=0,to=5,col="green",lwd=2,add=TRUE)
#'
#' ## Gompertz
#' # Make some fake data using the original parameterization
#' gompO <- GompertzFuns("original")
#' # setup ages, sample sizes (general reduction in numbers with
#' # increasing age), and additive SD to model
#' t <- 1:15
#' n <- c(10,40,35,25,12,10,10,8,6,5,3,3,3,2,2)
#' sd <- 15
#' # expand ages
#' ages <- rep(t,n)
#' # get lengths from gompertz and a random error for individuals
#' lens <- gompO(ages,Linf=450,a=1,gi=0.3)+rnorm(length(ages),0,sd)
#' # put together as a data.frame
#' df <- data.frame(age=ages,len=round(lens,0))
#'
#' # Fit first Ricker parameterization
#' fit1 <- nls(len~gomp1(age,Linf,gi,ti),data=df,start=list(Linf=500,gi=0.3,ti=3))
#' summary(fit1,correlation=TRUE)
#' plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5))
#' curve(gomp1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE)
#'
#' # Fit third Ricker parameterization
#' fit2 <- nls(len~gomp2(age,L0,a,gi),data=df,start=list(L0=30,a=3,gi=0.3))
#' summary(fit2,correlation=TRUE)
#' curve(gomp2(x,L0=coef(fit2)),from=0,to=15,col="blue",lwd=5,add=TRUE)
#'
#' # Fit third Quinn and Deriso parameterization (using simple=TRUE model)
#' gomp3 <- GompertzFuns("QD3",simple=TRUE)
#' fit3 <- nls(len~gomp3(age,Linf,gi,t0),data=df,start=list(Linf=500,gi=0.3,t0=0))
#' summary(fit3,correlation=TRUE)
#' curve(gomp3(x,Linf=coef(fit3)[1],gi=coef(fit3)[2],t0=coef(fit3)[3]),
#' from=0,to=15,col="green",lwd=2,add=TRUE)
#'
#' ## Richards
#' # Fit first Richards parameterization
#' fit1 <- nls(len~rich1(age,Linf,k,a,b),data=df,start=list(Linf=450,k=0.25,a=0.65,b=3))
#' summary(fit1,correlation=TRUE)
#' plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5))
#' curve(rich1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE)
#'
#' # Fit second Richards parameterization
#' fit2 <- nls(len~rich2(age,Linf,k,ti,b),data=df,start=list(Linf=450,k=0.25,ti=3,b=3))
#' summary(fit2,correlation=TRUE)
#' curve(rich2(x,Linf=coef(fit2)),from=0,to=15,col="blue",lwd=7,add=TRUE)
#'
#' # Fit third Richards parameterization
#' fit3 <- nls(len~rich3(age,Linf,k,ti,b),data=df,start=list(Linf=450,k=0.25,ti=3,b=-0.3))
#' summary(fit3,correlation=TRUE)
#' curve(rich3(x,Linf=coef(fit3)),from=0,to=15,col="green",lwd=4,add=TRUE)
#'
#' # Fit fourth Richards parameterization
#' fit4 <- nls(len~rich4(age,Linf,k,ti,b),data=df,start=list(Linf=450,k=0.25,ti=3,b=0.7))
#' summary(fit4,correlation=TRUE)
#' curve(rich4(x,Linf=coef(fit4)),from=0,to=15,col="black",lwd=1,add=TRUE)
#'
#' ## Logistic
#' # Fit first Campana-Jones parameterization
#' fit1 <- nls(len~log1(age,Linf,gninf,ti),data=df,start=list(Linf=450,gninf=0.45,ti=4))
#' summary(fit1,correlation=TRUE)
#' plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5))
#' curve(log1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE)
#'
#' # Fit second Campana-Jones parameterization
#' fit2 <- nls(len~log2(age,Linf,gninf,a),data=df,start=list(Linf=450,gninf=0.45,a=7))
#' summary(fit2,correlation=TRUE)
#' curve(log2(x,Linf=coef(fit2)),from=0,to=15,col="blue",lwd=5,add=TRUE)
#'
#' # Fit Karkach parameterization (using simple=TRUE model)
#' log3 <- logisticFuns("Karkach",simple=TRUE)
#' fit3 <- nls(len~log3(age,Linf,L0,gninf),data=df,start=list(Linf=450,L0=30,gninf=0.45))
#' summary(fit3,correlation=TRUE)
#' curve(log3(x,Linf=coef(fit3)[1],L0=coef(fit3)[2],gninf=coef(fit3)[3]),
#' from=0,to=15,col="green",lwd=2,add=TRUE)
#'
#'
#' #############################################################################
#' ## Create expressions of the models
#' #############################################################################
#' # Typical von Bertalanffy ... Show as a stand-alone plot
#' growthFunShow("vonBertalanffy","Typical",plot=TRUE)
#' # Get and save the expression
#' ( tmp <- growthFunShow("vonBertalanffy","Typical") )
#' # Use expression as title on a plot
#' lens <- vb1(ages,Linf=20,K=0.3,t0=-0.2)
#' plot(lens~ages,type="b",pch=19,main=tmp)
#' # Put expression in the main plot
#' text(10,5,tmp)
#' # Put multiple expressions on a plot
#' op <- par(mar=c(0.1,0.1,0.1,0.1))
#' plot(0,type="n",xlab="",ylab="",xlim=c(0,1),ylim=c(0,3),xaxt="n",yaxt="n")
#' text(0,2.5,"Original:",pos=4)
#' text(0.5,2.5,growthFunShow("vonBertalanffy","Original"))
#' text(0,1.5,"Typical:",pos=4)
#' text(0.5,1.5,growthFunShow("vonBertalanffy","Typical"))
#' text(0,0.5,"Francis:",pos=4)
#' text(0.5,0.5,growthFunShow("vonBertalanffy","Francis"))
#' par(op)
NULL
#' @rdname growthModels
#' @export
vbFuns <- function(param=c("Typical","typical","Traditional","traditional","BevertonHolt",
"Original","original","vonBertalanffy",
"GQ","GallucciQuinn","Mooij","Weisberg",
"Schnute","Francis","Laslett","Polacheck",
"Somers","Somers2",
"Fabens","Fabens2","Wang","Wang2","Wang3"),
simple=FALSE,msg=FALSE) {
Typical <- typical <- Traditional <- traditional <- BevertonHolt <- function(t,Linf,K=NULL,t0=NULL) {
if (length(Linf)==3) { K <- Linf[[2]]
t0 <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*(1-exp(-K*(t-t0)))
}
STypical <- Stypical <- STraditional <- Straditional <- SBevertonHolt <- function(t,Linf,K,t0) {
Linf*(1-exp(-K*(t-t0)))
}
Original <- original <- vonBertalanffy <- function(t,Linf,L0=NULL,K=NULL) {
if (length(Linf)==3) { L0 <- Linf[[2]]
K <- Linf[[3]]
Linf <- Linf[[1]] }
Linf-(Linf-L0)*exp(-K*t)
}
SOriginal <- Soriginal <- SvonBertalanffy <- function(t,Linf,L0,K) {
Linf-(Linf-L0)*exp(-K*t)
}
GQ <- GallucciQuinn <- function(t,omega,K=NULL,t0=NULL) {
if (length(omega)==3) { K <- omega[[2]]
t0 <- omega[[3]]
omega <- omega[[1]] }
(omega/K)*(1-exp(-K*(t-t0)))
}
SGQ <- SGallucciQuinn <- function(t,omega,K,t0) {
(omega/K)*(1-exp(-K*(t-t0)))
}
Mooij <- function(t,Linf,L0=NULL,omega=NULL) {
if (length(Linf)==3) { L0 <- Linf[[2]]
omega <- Linf[[3]]
Linf <- Linf[[1]] }
Linf-(Linf-L0)*exp(-(omega/Linf)*t)
}
SMooij <- function(t,Linf,L0,omega) {
Linf-(Linf-L0)*exp(-(omega/Linf)*t)
}
Weisberg <- function(t,Linf,t50=NULL,t0=NULL) {
if (length(Linf)==3) { t50 <- Linf[[2]]
t0 <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*(1-exp(-(log(2)/(t50-t0))*(t-t0)))
}
SWeisberg <- function(t,Linf,t50,t0) {
Linf*(1-exp(-(log(2)/(t50-t0))*(t-t0)))
}
Schnute <- function(t,L1,L3=NULL,K=NULL,t1,t3=NULL) {
if (length(L1)==3) { L3 <- L1[[2]]; K <- L1[[3]]; L1 <- L1[[1]] }
if (length(t1)==2) { t3 <- t1[[2]]; t1 <- t1[[1]] }
L1+(L3-L1)*((1-exp(-K*(t-t1)))/(1-exp(-K*(t3-t1))))
}
SSchnute <- function(t,L1,L3,K,t1,t3) {
L1+(L3-L1)*((1-exp(-K*(t-t1)))/(1-exp(-K*(t3-t1))))
}
Francis <- function(t,L1,L2=NULL,L3=NULL,t1,t3=NULL) {
if (length(L1)==3) { L2 <- L1[[2]]; L3 <- L1[[3]]; L1 <- L1[[1]] }
if (length(t1)==2) { t3 <- t1[[2]]; t1 <- t1[[1]] }
r <- (L3-L2)/(L2-L1)
L1+(L3-L1)*((1-r^(2*((t-t1)/(t3-t1))))/(1-r^2))
}
SFrancis <- function(t,L1,L2,L3,t1,t3) {
r <- (L3-L2)/(L2-L1)
L1+(L3-L1)*((1-r^(2*((t-t1)/(t3-t1))))/(1-r^2))
}
Laslett <- Polacheck <- function(t,Linf,K1,K2,t0,a,b) {
if (length(Linf)==6) { K1 <- Linf[[2]]; K2 <- Linf[[3]]
t0 <- Linf[[4]]; a <- Linf[[5]]
b <- Linf[[6]]; Linf <- Linf[[1]] }
Linf*(1-exp(-K2*(t-t0))*((1+exp(-b*(t-t0-a)))/(1+exp(a*b)))^(-(K2-K1)/b))
}
SLaslett <- SPolacheck <- function(t,Linf,K1,K2,t0,a,b) {
Linf*(1-exp(-K2*(t-t0))*((1+exp(-b*(t-t0-a)))/(1+exp(a*b)))^(-(K2-K1)/b))
}
Somers <- function(t,Linf,K,t0,C,ts) {
if (length(Linf)==5) { K <- Linf[[2]]; t0 <- Linf[[3]]
C <- Linf[[4]]; ts <- Linf[[5]]
Linf <- Linf[[1]] }
St <- (C*K)/(2*pi)*sin(2*pi*(t-ts))
Sto <- (C*K)/(2*pi)*sin(2*pi*(t0-ts))
Linf*(1-exp(-K*(t-t0)-St+Sto))
}
SSomers <- function(t,Linf,K,t0,C,ts) {
Linf*(1-exp(-K*(t-t0)-(C*K)/(2*pi)*sin(2*pi*(t-ts))+(C*K)/(2*pi)*sin(2*pi*(t0-ts))))
}
Somers2 <- function(t,Linf,K,t0,C,WP) {
if (length(Linf)==5) { K <- Linf[[2]]; t0 <- Linf[[3]]
C <- Linf[[4]]; WP <- Linf[[5]]
Linf <- Linf[[1]] }
Rt <- (C*K)/(2*pi)*sin(2*pi*(t-WP+0.5))
Rto <- (C*K)/(2*pi)*sin(2*pi*(t0-WP+0.5))
Linf*(1-exp(-K*(t-t0)-Rt+Rto))
}
SSomers2 <- function(t,Linf,K,t0,C,WP) {
Linf*(1-exp(-K*(t-t0)-(C*K)/(2*pi)*sin(2*pi*(t-WP+0.5))+(C*K)/(2*pi)*sin(2*pi*(t0-WP+0.5))))
}
Fabens <- function(Lm,dt,Linf,K) {
if (length(Linf)==2) { K <- Linf[[2]]; Linf <- Linf[[1]] }
Lm+(Linf-Lm)*(1-exp(-K*dt))
}
SFabens <- function(Lm,dt,Linf,K) {
Lm+(Linf-Lm)*(1-exp(-K*dt))
}
Fabens2 <- function(Lm,dt,Linf,K) {
if (length(Linf)==2) { K <- Linf[[2]]; Linf <- Linf[[1]] }
(Linf-Lm)*(1-exp(-K*dt))
}
SFabens2 <- function(Lm,dt,Linf,K) {
(Linf-Lm)*(1-exp(-K*dt))
}
Wang <- function(Lm,dt,Linf,K,b) {
if (length(Linf)==3) { b <- Linf[[3]]; K <- Linf[[2]]
Linf <- Linf[[1]] }
(Linf+b*(Lm-mean(Lm))-Lm)*(1-exp(-K*dt))
}
SWang <- function(Lm,dt,Linf,K,b) {
(Linf+b*(Lm-mean(Lm))-Lm)*(1-exp(-K*dt))
}
Wang2 <- function(Lm,dt,K,a,b) {
if (length(K)==3) { b <- K[[3]]; a <- K[[2]]; K <- K[[1]] }
(a+b*Lm)*(1-exp(-K*dt))
}
SWang2 <- function(Lm,dt,K,a,b) {
(a+b*Lm)*(1-exp(-K*dt))
}
Wang3 <- function(Lm,dt,K,a,b) {
if (length(K)==3) { b <- K[[3]]; a <- K[[2]]; K <- K[[1]] }
Lm+(a+b*Lm)*(1-exp(-K*dt))
}
SWang3 <- function(Lm,dt,K,a,b) {
Lm+(a+b*Lm)*(1-exp(-K*dt))
}
param <- match.arg(param)
if (msg) {
switch(param,
Typical=,typical=,Traditional=,traditional=,BevertonHolt= {
message("You have chosen the 'Typical'/'typical', 'Traditional'/'traditional', or 'BevertonHolt' parameterization.\n\n",
" E[L|t] = Linf*(1-exp(-K*(t-t0)))\n\n",
" where Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n\n")
},
Original=,original=,vonBertalanffy={
message("You have chosen the 'Original'/'original' or 'vonBertalanffy` parameterization.\n\n",
" E[L|t] = Linf-(Linf-L0)*exp(-K*t)\n\n",
" where Linf = asymptotic mean length\n",
" L0 = the mean length at age-0 (i.e., hatching or birth)\n",
" K = exponential rate of approach to Linf\n\n")
},
Francis={
message("You have chosen the 'Francis' parameterization.\n\n",
" E[L|t] = L1+(L3-L1)*[(1-r^(2*[(t-t1)/(t3-t1)]))/(1-r^2)]\n\n",
" where r = [(L3-L2)/(L2-L1)] and\n\n",
" L1 = the mean length at the first (small) reference age\n",
" L2 = the mean length at the intermediate reference age\n",
" L3 = the mean length at the third (large) reference age\n\n",
"You must also supply the constant values (i.e., they are NOT model parameters) for\n",
" t1 = the first (usually a younger) reference age\n",
" t3 = the third (usually an older) reference age\n\n")
},
GQ=,GallucciQuinn={
message("You have chosen the 'GQ' or 'GallucciQuinn' parameterization.\n\n",
" E[L|t] = [omega/K]*(1-exp(-K*(t-t0)))\n\n",
" where omega = growth rate near t0\n",
" K = exponential rate of approach to Linf\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n\n")
},
Mooij={
message("You have chosen the 'Mooij' parameterization.\n\n",
" E[L|t] = Linf-(Linf-L0)*exp(-(omega/Linf)*t)\n\n",
" where Linf = asymptotic mean length\n",
" L0 = the mean length at age-0 (i.e., hatching or birth)\n",
" omega = growth rate near L0\n\n")
},
Weisberg= {
message("You have chosen the 'Weisberg' parameterization.\n\n",
" E[L|t] = Linf*(1-exp(-(log(2)/(t50-t0))*(t-t0)))\n\n",
" where Linf = asymptotic mean length\n",
" t50 = age when half of Linf is reached\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n\n")
},
Schnute={
message("You have chosen the 'Schnute' parameterization.\n\n",
" E[L|t] = L1+(L2-L1)*[(1-exp(-K*(t-t1)))/(1-exp(-K*(t2-t1)))]\n\n",
" where L1 = the mean length at the youngest age in the sample\n",
" L2 = the mean length at the oldest age in the sample\n",
" K = exponential rate of approach to Linf\n\n",
" You must also supply the constant values (i.e., they are NOT model parameters) for\n",
" t1 = the youngest age in the sample\n",
" t2 = the oldest age in the sample\n\n")
},
Laslett=,Polacheck={
message("You have chosen the 'Laslett/Polacheck' 'double' parameterization.\n\n",
" E[L|t] = Linf*[1-exp(-K2*(t-to))((1+exp(-b(t-t0-a)))/(1+exp(ab)))^(-(K2-K1)/b)]\n\n",
" where Linf = asymptotic mean length\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n",
" K1 = the first (younger ages) exponential rate of approach to Linf\n",
" K2 = the second (older ages) exponential rate of approach to Linf\n",
" b = governs the rate of transition from K1 to K2\n",
" a = the central age of the transition from K1 to K2\n\n")
},
Somers={
message("You have chosen the 'Somers Seasonal' parameterization.\n\n",
" E[L|t] = Linf*(1-exp(-K*(t-to)-St+St0))\n\n",
" where St = (CK/2*pi)*sin(2*pi*(t-ts)) and\n",
" St0 = (CK/2*pi)*sin(2*pi*(t0-ts)) and\n\n",
" and Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n",
" C = proportional growth depression at 'winter peak'\n",
" ts = time from t=0 until the first growth oscillation begins.\n\n")
},
Somers2={
message("You have chosen the modified 'Somers2 Seasonal' parameterization.\n\n",
" E[L|t] = Linf*(1-exp(-K*(t-to)-Rt+Rt0))\n\n",
" where Rt = (CK/2*pi)*sin(2*pi*(t-WP+0.5)) and\n",
" Rt0 = (CK/2*pi)*sin(2*pi*(t0-WP+0.5)) and\n\n",
" and Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n",
" C = proportional growth depression at 'winter peak'\n",
" WP = the 'winter peak' (point of slowest growth).\n\n")
},
Fabens={
message("You have chosen the 'Fabens' parameterization for tag-return data.\n\n",
" E[Lr|Lm,dt] = Lm + (Linf-Lm)*(1-exp(-K*dt))\n\n",
" where Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n")
},
Fabens2={
message("You have chosen the 'Fabens2' parameterization for tag-return data.\n\n",
" E[Lr|Lm,dt] = (Linf-Lm)*(1-exp(-K*dt))\n\n",
" where Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n")
},
Wang={
message("You have chosen the 'Wang' parameterization for tag-return data.\n\n",
" E[Lr-Lm|Lm,dt] = (Linf+b(Lm-E(Lm))-Lm)*(1-exp(-K*dt))\n\n",
" where Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n",
" b = parameter\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n",
" and with E(Lm) = expectation (i.e., mean) of Lm.\n\n")
},
Wang2={
message("You have chosen the 'Wang2' parameterization for tag-return data.\n\n",
" E[Lr-Lm|Lm,dt] = (a+bLm)*(1-exp(-K*dt))\n\n",
" where K = exponential rate of approach to Linf\n",
" a, b = parameters\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n")
},
Wang3={
message("You have chosen the 'Wang3' parameterization for tag-return data.\n\n",
" E[Lr|Lm,dt] = Lm+(a+bLm)*(1-exp(-K*dt))\n\n",
" where K = exponential rate of approach to Linf\n",
" a, b = parameters\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n")
}
)
}
if (simple) param <- paste("S",param,sep="")
get(param)
}
#' @rdname growthModels
#' @export
GompertzFuns <- function(param=c("Ricker1","Ricker2","Ricker3",
"QuinnDeriso1","QuinnDeriso2","QuinnDeriso3",
"QD1","QD2","QD3",
"Original","original",
"Troynikov1","Troynikov2"),
simple=FALSE,msg=FALSE) {
Original <- original <- function(t,Linf,a=NULL,gi=NULL) {
if (length(Linf)==3) { a <- Linf[[2]]
gi <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*exp(-exp(a-gi*t))
}
SOriginal <- Soriginal <-function(t,Linf,a,gi) {
Linf*exp(-exp(a-gi*t))
}
Ricker1 <- function(t,Linf,gi=NULL,ti=NULL) {
if (length(Linf)==3) { gi <- Linf[[2]]
ti <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*exp(-exp(-gi*(t-ti)))
}
SRicker1 <- function(t,Linf,gi,ti) {
Linf*exp(-exp(-gi*(t-ti)))
}
QD1 <- QuinnDeriso1 <- Ricker2 <- function(t,L0,a=NULL,gi=NULL) {
if (length(L0)==3) { a <- L0[[2]]
gi <- L0[[3]]
L0 <- L0[[1]] }
L0*exp(a*(1-exp(-gi*t)))
}
SQD1 <- SQuinnDeriso1 <- SRicker2 <- function(t,L0,a,gi) {
L0*exp(a*(1-exp(-gi*t)))
}
QD2 <- QuinnDeriso2 <- Ricker3 <- function(t,Linf,a=NULL,gi=NULL) {
if (length(Linf)==3) { a <- Linf[[2]]
gi <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*exp(-a*exp(-gi*t))
}
SQD2 <- SQuinnDeriso2 <- SRicker3 <- function(t,Linf,a,gi) {
Linf*exp(-a*exp(-gi*t))
}
QD3 <- QuinnDeriso3 <- function(t,Linf,gi=NULL,t0=NULL) {
if (length(Linf)==3) { gi <- Linf[[2]]
t0 <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*exp(-(1/gi)*exp(-gi*(t-t0)))
}
SQD3 <- SQuinnDeriso3 <- function(t,Linf,gi,t0) {
Linf*exp(-(1/gi)*exp(-gi*(t-t0)))
}
Troynikov1 <- function(Lm,dt,Linf,gi=NULL) {
if (length(Linf)==2) { gi=Linf[2]
Linf=Linf[1] }
Linf*((Lm/Linf)^exp(-gi*dt))-Lm
}
STroynikov1 <- function(Lm,dt,Linf,gi) {
Linf*((Lm/Linf)^exp(-gi*dt))-Lm
}
Troynikov2 <- function(Lm,dt,Linf,gi=NULL) {
if (length(Linf)==2) { gi=Linf[2]
Linf=Linf[1] }
Linf*((Lm/Linf)^exp(-gi*dt))
}
STroynikov2 <- function(Lm,dt,Linf,gi) {
Linf*((Lm/Linf)^exp(-gi*dt))
}
## Main function
param <- match.arg(param)
comcat <- "parameterization of the Gompertz function.\n\n"
if (msg) {
switch(param,
Original=,original= {
message("You have chosen the 'Original'/'original'",comcat,
" E[L|t] = Linf*exp(-exp(a-gi*t))\n\n",
"where Linf = asymptotic mean length\n",
" gi = decrease in growth rate at the inflection point\n",
" a = an undefined parameter\n\n")
},
Ricker1= {
message("You have chosen the 'Ricker1'",comcat,
" E[L|t] = Linf*exp(-exp(-gi*(t-ti)))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" ti = time at the inflection point\n\n")
},
Ricker2=,QD1=,QuinnDeriso1= {
message("You have chosen the 'Ricker2'/'QuinnDeriso1'/'QD1'",comcat,
" E[L|t] = L0*exp(a*(1-exp(-gi*t)))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" a = dimenstionless parameter related to growth\n\n")
},
Ricker3=,QD2=,QuinnDeriso2= {
message("You have chosen the 'Ricker3'/'QuinnDeriso2'/'QD2'",comcat,
" E[L|t] = Linf*exp(-(a/gi)*exp(-gi*t))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" a = dimenstionless parameter related to growth\n\n")
},
QD3=,QuinnDeriso3== {
message("You have chosen the 'QuinnDeriso3'/'QD3'",comcat,
" E[L|t] = Linf*exp(-(1/gi)*exp(-gi*(t-t0)))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" t0 = a dimensionless parameter related to time/age\n\n")
},
Troynikov1= {
message("You have chosen the 'Troynikov1'",comcat,
" E[Lr-Lm|dt] = Linf*((Lm/Linf)^exp(-gi*dt))-Lm\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n")
},
Troynikov2= {
message("You have chosen the 'Troynikov2'",comcat,
" E[Lr|dt] = Linf*((Lm/Linf)^exp(-gi*dt))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n")
}
)
}
if (simple) param <- paste("S",param,sep="")
get(param)
}
#' @rdname growthModels
#' @export
RichardsFuns <- function(param=1,simple=FALSE,msg=FALSE) {
Richards1 <- function(t,Linf,k=NULL,a=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
a <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf*(1-a*exp(-k*t))^b
}
SRichards1 <- function(t,Linf,k,a,b) {
Linf*(1-a*exp(-k*t))^b
}
Richards2 <- function(t,Linf,k=NULL,ti=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
ti <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf*(1-(1/b)*exp(-k*(t-ti)))^b
}
SRichards2 <- function(t,Linf,k,ti,b) {
Linf*(1-(1/b)*exp(-k*(t-ti)))^b
}
Richards3 <- function(t,Linf,k=NULL,ti=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
ti <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf/((1+b*exp(-k*(t-ti)))^(1/b))
}
SRichards3 <- function(t,Linf,k,ti,b) {
Linf/((1+b*exp(-k*(t-ti)))^(1/b))
}
Richards4 <- function(t,Linf,k=NULL,ti=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
ti <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
}
SRichards4 <- function(t,Linf,k,ti,b) {
Linf*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
}
Richards5 <- function(t,Linf,k=NULL,L0=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
L0 <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf*(1+(((L0/Linf)^(1-b))-1)*exp(-k*t))^(1/(1-b))
}
SRichards5 <- function(t,Linf,k,L0,b) {
Linf*(1+(((L0/Linf)^(1-b))-1)*exp(-k*t))^(1/(1-b))
}
Richards6 <- function(t,Linf,k=NULL,ti=NULL,Lninf=NULL,b=NULL) {
if (length(Linf)==5) { k <- Linf[[2]]
ti <- Linf[[3]]
Lninf <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Lninf+(Linf-Lninf)*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
}
SRichards6 <- function(t,Linf,k,ti,Lninf,b) {
Lninf+(Linf-Lninf)*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
}
## Main function
if (!param %in% 1:6) stop("'param' must be in 1:6.")
param <- paste0("Richards",param)
if (msg) {
switch(param,
Richards1= {
message("You have chosen the '",param,"' parameterization.",
" E[L|t] = Linf*(1-a*exp(-k*t))^b\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" a = a dimensionless shape parameter\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards2= {
message("You have chosen the '",param,"' parameterization.",
" Linf*(1-(1/b)*exp(-k*(t-ti)))^b\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" ti = time/age at the inflection point\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards3= {
message("You have chosen the '",param,"' parameterization.",
" Linf/((1+b*exp(-k*(t-ti)))^(1/b))\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" ti = time/age at the inflection point\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards4= {
message("You have chosen the '",param,"' parameterization.",
" Linf*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" ti = time/age at the inflection point\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards5= {
message("You have chosen the '",param,"' parameterization.",
" Linf*(1+(((L0/Linf)^(1-b))-1)*exp(-k*t))^(1/(1-b))\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" L0 = mean length at t=0\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards6= {
message("You have chosen the '",param,"' parameterization.",
" Lninf+(Linf-Lninf)*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))\n\n",
" where Linf = upper asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" Lninf = lower asymptotic mean length\n",
" ti = time/age at the inflection point\n",
" b = a constant that controls the y- value of the inflection point\n\n")
}
)
}
if (simple) param <- paste("S",param,sep="")
get(param)
}
#' @rdname growthModels
#' @export
logisticFuns <- function(param=c("CJ1","CJ2","Karkach","Haddon","CampanaJones1","CampanaJones2"),
simple=FALSE,msg=FALSE) {
CJ1 <- CampanaJones1 <- function(t,Linf,gninf=NULL,ti=NULL) {
if (length(Linf)==3) { gninf <- Linf[[2]]
ti <- Linf[[3]]
Linf <- Linf[[1]] }
Linf/(1+exp(-gninf*(t-ti)))
}
SCJ1 <- SCampanaJones1 <- function(t,Linf,gninf,ti) {
Linf/(1+exp(-gninf*(t-ti)))
}
CJ2 <- CampanaJones2 <- function(t,Linf,gninf=NULL,a=NULL) {
if (length(Linf)==3) { gninf <- Linf[[2]]
a <- Linf[[3]]
Linf <- Linf[[1]] }
Linf/(1+a*exp(-gninf*t))
}
SCJ2 <- SCampanaJones2 <- function(t,Linf,gninf,a) {
Linf/(1+a*exp(-gninf*t))
}
Karkach <- function(t,Linf,L0=NULL,gninf=NULL) {
if (length(Linf)==3) { L0 <- Linf[[2]]
gninf <- Linf[[3]]
Linf <- Linf[[1]] }
L0*Linf/(L0+(Linf-L0)*exp(-gninf*t))
}
SKarkach <- function(t,Linf,L0,gninf) {
L0*Linf/(L0+(Linf-L0)*exp(-gninf*t))
}
Haddon <- function(Lm,dLmax,L50=NULL,L95=NULL) {
if (length(dLmax)==3) { L50=dLmax[2]
L95=dLmax[3]
dLmax=dLmax[1] }
dLmax/(1+exp(log(19)*((Lm-L50)/(L95-L50))))
}
SHaddon <- function(Lm,dLmax,L50,L95) {
dLmax/(1+exp(log(19)*((Lm-L50)/(L95-L50))))
}
## Main function
param <- match.arg(param)
comcat <- "parameterization of the logistic growth function.\n\n"
if (msg) {
switch(param,
CJ1=,CampanaJones1= {
message("You have chosen the 'CampanaJones1'/'CJ1'",comcat,
" E[L|t] = Linf/(1+exp(-gninf*(t-ti)))\n\n",
" where Linf = asymptotic mean length\n",
" gninif = instantaneous growth rate at t=-infinity\n",
" ti = time at the inflection point\n\n")
},
CJ2=,CampanaJones2= {
message("You have chosen the 'CampanaJones2'/'CJ2'",comcat,
" E[L|t] = Linf/(1+a*exp(-gninf*t))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" a = a dimensionless parameter related to growth\n\n")
},
Karkach= {
message("You have chosen the 'Karkach'",comcat,
" E[L|t] = L0*Linf/(L0+(Linf-L0)*exp(-gninf*t))\n\n",
" where Linf = asymptotic mean length\n",
" L0 = mean length at time/age 0\n",
" gi = instantaneous growth rate at the inflection point\n\n")
},
Haddon= {
message("You have chosen the 'Haddon Inverse'",comcat,
" E[Lr-Lm|dt] = dLmax/(1+exp(log(19)*((Lm-L50)/(L95-L50))))\n\n",
" where dLmax = maximum growth increment during the study\n",
" L50 = length at marking to produces a growth increment of 0.5*dLmax",
" L95 = length at marking to produces a growth increment of 0.95*dLmax\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n")
}
)
}
if (simple) param <- paste("S",param,sep="")
get(param)
}
#' @title The four-parameter growth function from Schnute (1981).
#'
#' @description The four-parameter growth function from Schnute (1981). Use \code{SchnuteModels()} to see the equations for each growth function.
#'
#' @param t A numeric vector of ages over which to model growth.
#' @param case A string that indicates the case of the Schnute growth function to use.
#' @param t1 The (young) age that corresponds to \code{L1}. Set to minimum value in \code{t} by default.
#' @param t3 The (old) age that corresponds to \code{L3}. Set to maximum value in \code{t} by default.
#' @param L1 The mean size/length at \code{t1}.
#' @param L3 The mean size/length at \code{t3}.
#' @param a A dimensionless parameter that is related to the time/age at the inflection point.
#' @param b A dimensionless parameter that is related to size/length at the inflection point.
#'
#' @return \code{Schnute} returns a predicted size given the case of the function and the provided parameter values.
#'
#' \code{SchnuteModels} returns a graphic that uses \code{\link{plotmath}} to show the growth function equation in a pretty format.
#'
#' @author Derek H. Ogle, \email{derek@@derekogle.com}
#'
#' @section IFAR Chapter: None specifically, but 12-Individual Growth is related.
#'
#' @seealso See \code{\link{vbFuns}}, \code{\link{GompertzFuns}}, \code{\link{RichardsFuns}}, and \code{\link{logisticFuns}} for similar functionality for other models.
#'
#' @references Schnute, J. 1981. A versatile growth model with statistical stable parameters. Canadian Journal of Fisheris and Aquatic Sciences 38:1128-1140.
#'
#' @keywords manip
#'
#' @examples
#' ## See the formulae
#' growthFunShow("Schnute",1,plot=TRUE)
#' growthFunShow("Schnute",2,plot=TRUE)
#' growthFunShow("Schnute",3,plot=TRUE)
#' growthFunShow("Schnute",4,plot=TRUE)
#'
#' ## Simple examples
#' ages <- 1:15
#' s1 <- Schnute(ages,case=1,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1)
#' s2 <- Schnute(ages,case=2,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1)
#' s3 <- Schnute(ages,case=3,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1)
#' s4 <- Schnute(ages,case=4,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1)
#'
#' plot(s1~ages,type="l",lwd=2)
#' lines(s2~ages,lwd=2,col="red")
#' lines(s3~ages,lwd=2,col="blue")
#' lines(s4~ages,lwd=2,col="green")
#'
#' @rdname Schnute
#' @export
Schnute <- function(t,case=1,t1=NULL,t3=NULL,L1=NULL,L3=NULL,a=NULL,b=NULL) {
## check case
case <- as.character(case)
if (!case %in% c("1","2","3","4")) stop("'case' must be 1, 2, 3, or 4.",call.=FALSE)
## needed to get around global binding issue
b <- b
## check t1 and t3
if (length(t)==1) {
if (is.null(t1)) stop("Must provide a 't1' if 't' is only one value.",call.=FALSE)
if (is.null(t3)) stop("Must provide a 't3' if 't' is only one value.",call.=FALSE)
} else {
if (is.null(t1)) t1 <- min(t,na.rm=TRUE)
if (is.null(t3)) t3 <- max(t,na.rm=TRUE)
}
if (t1==t3) stop("'t1' cannot equal 't3'.",call.=FALSE)
if (t1>t3) {
warning("'t1' was greater than 't3'; values reversed.",call.=FALSE)
tmp <- t3
t3 <- t1
t1 <- tmp
}
## check L1 and L3
if (L1>L3) stop ("'L1' cannot be greater than 'L3'",call.=FALSE)
## Compute values based on case
switch(case,
"1"={ val <- ((L1^b)+((L3^b)-(L1^b))*((1-exp(-a*(t-t1)))/(1-exp(-a*(t3-t1)))))^(1/b) },
"2"={ val <- L1*exp(log(L3/L1)*((1-exp(-a*(t-t1)))/(1-exp(-a*(t3-t1))))) },
"3"={ val <- ((L1^b)+((L3^b)-(L1^b))*((t-t1)/(t3-t1)))^(1/b) },
"4"={ val <- L1*exp(log(L3/L1)*((t-t1)/(t3-t1))) }
)
val
}
#' @rdname growthModels
#' @export
growthFunShow <- function(type=c("vonBertalanffy","Gompertz","Richards","Logistic","Schnute"),
param=NULL,plot=FALSE,...) {
type <- match.arg(type)
switch(type,
vonBertalanffy = { expr <- iSGF_VB(param) },
Gompertz = { expr <- iSGF_GOMP(param) },
Richards = { expr <- iSGF_RICHARDS(param) },
Logistic = { expr <- iSGF_LOGISTIC(param) },
Schnute = { expr <- iSGF_SCHNUTE(param) })
if (plot) {
op <- graphics::par(mar=c(0.1,0.1,0.1,0.1))
graphics::plot(0,type="n",ylim=c(0,1),xlim=c(0,1),xaxt="n",yaxt="n",
xlab="",ylab="",bty="n",...)
graphics::text(0.5,0.5,expr,...)
graphics::par(op)
}
expr
}
################################################################################
## Internal functions for growth model expressions
################################################################################
iSGF_VB <- function(param=c("Original","original","vonBertalanffy",
"Typical","typical","Traditional","traditional","BevertonHolt",
"GallucciQuinn","GQ","Mooij","Weisberg",
"Schnute","Francis","Laslett","Polacheck",
"Somers","Somers2",
"Fabens","Fabens2","Wang","Wang2","Wang3")) {
if(!is.character(param)) stop("'param' must be a character string.",call.=FALSE)
param <- match.arg(param)
switch(param,
Typical=,typical=,Traditional=,traditional=,BevertonHolt= {
expr <- expression(E(L[t])==L[infinity]*bgroup("(",1-e^{-K*(t~-~t[0])},")"))
},
Original=,original=,vonBertalanffy= {
expr <- expression(E(L[t])==L[infinity]~-~(L[infinity]-L[0])*~e^{-Kt})
},
GallucciQuinn=,GQ= {
expr <- expression(E(L[t])==frac(omega,K)*~bgroup("(",1-e^{-K*(t~-~t[0])},")"))
},
Mooij= {
expr <- expression(E(L[t])==L[infinity]~-~(L[infinity]-L[0])*~e^{-frac(omega,L[infinity])*~t})
},
Weisberg= {
expr <- expression(E(L[t])==L[infinity]*bgroup("(",1-e^{-frac(log(2),(t[50]~-~t[0]))*(t~-~t[0])},")"))
},
Schnute= {
expr <- expression(E(L[t])==L[1]+(L[3]-L[1])*~frac(1-e^{-K*(~t~-~t[1])},1-e^{-K*(~t[3]~-~t[1])}))
},
Francis= {
expr <- expression(atop(E(L[t])==L[1]+(L[3]-L[1])*~frac(1-r^{2*frac(t-t[1],t[3]-t[1])},1-r^{2}),
plain("where" )~r==frac(L[3]-L[2],L[2]-L[1])))
},
Laslett= {
expr <- expression(plain("Not Yet Implemented"))
},
Polacheck= {
expr <- expression(plain("Not Yet Implemented"))
},
Somers= {
expr <- expression(atop(E(L[t])==L[infinity]*bgroup("(",1-e^{-K*(t~-~t[0])-S(t)+S(t[0])},")"),
plain("where" )~S(t)==bgroup("(",frac(C*K,2*~pi),")")*~sin(2*pi*(t-t[s]))))
},
Somers2= {
expr <- expression(atop(E(L[t])==L[infinity]*bgroup("(",1-e^{-K*(t~-~t[0])-R(t)+R(t[0])},")"),
plain("where" )~R(t)==bgroup("(",frac(C*K,2*~pi),")")*~sin(2*pi*(t-WP+0.5))))
},
Fabens= {
expr <- expression(E(L[r]-L[m])==(L[infinity]-L[m])*bgroup("(",1-e^{-K*Delta*t},")"))
},
Fabens2= {
expr <- expression(E(L[r])==L[m]+(L[infinity]-L[m])*bgroup("(",1-e^{-K*Delta*t},")"))
},
Wang= {
expr <- expression(E(L[r]-L[m])==(L[infinity]+beta*(L[t]-L[t])-L[m])*bgroup("(",1-e^{-K*Delta*t},")"))
},
Wang2= {
expr <- expression(E(L[r]-L[m])==(alpha+beta*L[t])*bgroup("(",1-e^{-K*Delta*t},")"))
},
Wang3= {
expr <- expression(E(L[r])==L[m]+(alpha+beta*L[t])*bgroup("(",1-e^{-K*Delta*t},")"))
})
expr
}
iSGF_GOMP <- function(param=c("Original","original","Ricker1","Ricker2","Ricker3",
"QuinnDeriso1","QuinnDeriso2","QuinnDeriso3","QD1","QD2","QD3",
"Troynikov1","Troynikov2")) {
if(!is.character(param)) stop("'param' must be a character string.",call.=FALSE)
param <- match.arg(param)
switch(param,
Original=,original= {
expr <- expression(E(L[t])==L[infinity]*~e^{-e^{a-g[i]*t}})
},
Ricker1= {
expr <- expression(E(L[t])==L[infinity]*~e^{-e^{-g[i]*(t-t[i])}})
},
Ricker2=,QuinnDeriso1=,QD1= {
expr <- expression(E(L[t])==L[0]*~e^{a*bgroup("(",1-e^{-g[i]*t},")")})
},
Ricker3=,QuinnDeriso2=,QD2= {
expr <- expression(E(L[t])==L[infinity]*~e^{-a*~e^{-g[i]*t}})
},
QuinnDeriso3=,QD3= {
expr <- expression(E(L[t])==L[infinity]*~e^{-~frac(1,g[i])*~e^{-g[i]*~(~t~-~t^{plain("*")})}})
},
Troynikov1= {
expr <- expression(E(L[r]-L[m])==L[infinity]*~bgroup("(",frac(L[m],L[infinity]),")")^{e^{-g[i]*Delta*t}}-L[m])
},
Troynikov2= {
expr <- expression(E(L[r])==L[infinity]*~bgroup("(",frac(L[m],L[infinity]),")")^{e^{-g[i]*Delta*t}})
})
expr
}
iSGF_RICHARDS <- function(param=1:6) {
if (!is.numeric(param)) stop("'param' must be numeric when type='Richards'.",call.=FALSE)
if (!param %in% 1:6) stop("'param' must be from 1-6 when type='Richards'.",call.=FALSE)
if(param==1){
expr <- expression(E(L[t])==L[infinity]*~bgroup("(",1-a*e^{-kt},")")^{b})
} else if (param==2) {
expr <- expression(E(L[t])==L[infinity]*~bgroup("(",1-frac(1,b)*~e^{-k*(t-t[i])},")")^{~b})
} else if (param==3) {
expr <- expression(E(L[t])==frac(L[infinity],bgroup("(",1+b*e^{-k*(t-t[i])},")")^{~frac(1,b)}))
} else if (param==4) {
expr <- expression(E(L[t])==L[infinity]*~bgroup("(",1+(b-1)*~e^{-k*(t-t[i])},")")^{~frac(1,1-b)})
} else if (param==5) {
expr <- expression(E(L[t])==L[infinity]*~bgroup("[",bgroup("(",1+bgroup("(",frac(L[0],L[infinity]),")")^{1-b}-1,")")*~e^{-k*t},"]")^{~frac(1,1-b)})
} else {
expr <- expression(E(L[t])==L[-infinity]+(L[infinity]-L[-infinity])*~bgroup("(",1+(b-1)*~e^{-k*(t-t[i])},")")^{~frac(1,1-b)})
}
expr
}
iSGF_LOGISTIC <- function(param=c("CJ1","CJ2","Karkach","Haddon","CampanaJones1","CampanaJones2")) {
if(!is.character(param)) stop("'param' must be a character string.",call.=FALSE)
param <- match.arg(param)
switch(param,
CJ1=,CampanaJones1= {
expr <- expression(E(L[t])==frac(L[infinity],1+g[-infinity]*(t-t[i])))
},
CJ2=,CampanaJones2= {
expr <- expression(E(L[t])==frac(L[infinity],1+~ae^{-g[-infinity]*t}))
},
Karkach= {
expr <- expression(E(L[t])==frac(L[0]*L[infinity],L[0]+(L[infinity]-L[0])*e^{-g[-infinity]*t}))
},
Haddon= {
expr <- expression(E(L[r]-L[m])==frac(Delta*L[max],1+e^{log(19)*frac(L[m]~-~L[50],L[95]~-~L[50])}))
})
expr
}
iSGF_SCHNUTE <- function(case=1:4) {
if (!is.numeric(case)) stop("'case' must be numeric when type='Schnute'.",call.=FALSE)
if (!case %in% 1:4) stop("'case' must be from 1-4 when type='Schnute'.",call.=FALSE)
if(case==1){
expr <- expression(E(L[t])==bgroup("[",L[1]^{b}+(L[3]^{b}-L[1]^{b})*~frac(1-e^{-a*(~t~-~t[1])},1-e^{-a*(~t[3]~-~t[1])}),"]")^{~frac(1,b)})
} else if (case==2) {
expr <- expression(E(L[t])==L[1]*e^{log~bgroup("(",frac(L[3],L[1]),")")*~frac(1-e^{-a*(~t~-~t[1])},1-e^{-a*(~t[3]~-~t[1])})})
} else if (case==3) {
expr <- expression(E(L[t])==bgroup("[",L[1]^{b}+(L[3]^{b}-L[1]^{b})*~frac(~t~-~t[1],~t[3]~-~t[1]),"]")^{~frac(1,b)})
} else {
expr <- expression(E(L[t])==L[1]*e^{log~bgroup("(",frac(L[3],L[1]),")")*~frac(~t~-~t[1],~t[3]~-~t[1])})
}
expr
}
|
/FSA/R/growthModels.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 60,150
|
r
|
#' @name growthModels
#'
#' @title Creates a function for a specific parameterization of the von Bertalanffy, Gompertz, Richards, and logistic growth functions.
#'
#' @description Creates a function for a specific parameterizations of the von Bertalanffy, Gompertz, Richards, and logistic growth functions. Use \code{growthFunShow()} to see the equations for each growth function.
#'
#' @param type A string (in \code{growthFunShow}) that indicates the type of growth function to show.
#' @param param A string (for von Bertalanffy, Gompertz, and logistic) or numeric (for Richards) that indicates the specific parameterization of the growth function See details.
#' @param simple A logical that indicates whether the function will accept all parameter values in the first parameter argument (\code{=FALSE}; DEFAULT) or whether all individual parameters must be specified in separate arguments (\code{=TRUE}).
#' @param msg A logical that indicates whether a message about the growth function and parameter definitions should be output (\code{=TRUE}) or not (\code{=FALSE}; DEFAULT).
#' @param plot A logical that indicates whether the growth function expression should be shown as an equation in a simple plot.
#' @param \dots Not implemented.
#'
#' @return The functions ending in \code{xxxFuns} return a function that can be used to predict fish size given a vector of ages and values for the growth function parameters and, in some parameterizations, values for constants. The result should be saved to an object that is then the function name. When the resulting function is used, the parameters are ordered as shown when the definitions of the parameters are printed after the function is called (if \code{msg=TRUE}). If \code{simple=FALSE} (DEFAULT), then the values for all parameters may be included as a vector in the first parameter argument (but in the same order). Similarly, the values for all constants may be included as a vector in the first constant argument (i.e., \code{t1}). If \code{simple=TRUE}, then all parameters and constants must be declared individually. The resulting function is somewhat easier to read when \code{simple=TRUE}, but is less general for some applications.
#'
#' An expression of the equation for each growth function may be created with \code{growthFunShow}. In this function \code{type=} is used to select the major function type (e.g., von Bertalanffy, Gompertz, Richards, Logistic, Schnute) and \code{param=} is used to select a specific parameterization of that growth function. If \code{plot=TRUE}, then a simple graphic will be created with the equation using \code{\link{plotmath}} for a pretty format.
#'
#' @note Take note of the following for parameterizations (i.e., \code{param}) of each growth function:
#' \itemize{
#' \item von Bertalanffy
#' \itemize{
#' \item The \sQuote{Original} and \sQuote{vonBertalanffy} are synonymous as are \sQuote{Typical}, \sQuote{Traditional}, and \sQuote{BevertonHolt}.
#' }
#' \item Gompertz
#' \itemize{
#' \item The \sQuote{Ricker2} and \sQuote{QuinnDeriso1} are synonymous, as are \sQuote{Ricker3} and \sQuote{QuinnDeriso2}.
#' \item The parameterizations and parameters for the Gompertz function are varied and confusing in the literature. I have attempted to use a uniform set of parameters in these functions, but that makes a direct comparison to the literature difficult. Common sources for Gompertz models are listed in the references below. I make some comments here to aid comparisons to the literature.
#' \item Within FSA, L0 is the mean length at age 0, Linf is the mean asymptotic length, ti is the age at the inflection point, gi is the instantaneous growth rate at the inflection point, t* is a dimensionless parameter related to time/age, and a is a dimensionless parameter related to growth.
#' \item In the Quinn and Deriso (1999) functions (the \sQuote{QuinnDerisoX} functions), the a parameter here is equal to lambda/K there and the gi parameter here is equal to the K parameter there. Also note that their Y is L here.
#' \item In the Ricker (1979)[p. 705] functions (the \sQuote{RickerX} functions), the a parameter here is equal to k there and the gi paramter here is equal to the g parameter there. Also note that their w is L here. In the Ricker (1979) functions as presented in Campana and Jones (1992), the a parameter here is equal to k parameter there and the gi paramter here is equal to the G parameter there. Also note that their X is L here.
#' \item The function in Ricker (1975)[p. 232] is the same as \sQuote{Ricker2} where the a parameter here is qual to G there and the gi parameter here is equal to the g parameter there. Also note that their w is L here.
#' \item The function in Quist et al. (2012)[p. 714] is the same as \sQuote{Ricker1} where the gi parameter here is equal to the G parameter there and the ti parameter here is equal to the t0 parameter there.
#' \item The function in Katsanevakis and Maravelias (2008) is the same as \sQuote{Ricker1} where the gi parameter here is equal to k2 parameter there and the ti parameter here is equal to the t2 parameter there.
#' }
#' \item Richards
#' \itemize{
#' \item Within FSA, Linf is the mean asymptotic length, ti is the age at the inflection point, k is related to growth (slope at the inflection point), b is related to the vertical position of the inflection point, and L0 is the mean length at age-0.
#' \item The parameterizations (1-6) correspond to functions/equations 1, 4, 5, 6, 7, and 8, respectively, in Tjorve and Tjorve (2010). Note that their A, S, k, d, and B are Linf, a, k, b, and L0, respectively, here (in FSA).
#' }
#' \item logistic
#' \itemize{
#' \item Within FSA, L0 is the mean length at age 0, Linf is the mean asymptotic length, ti is the age at the inflection point, and gninf is the instantaneous growth rate at negative infinity.
#' }
#' }
#'
#' @author Derek H. Ogle, \email{derek@@derekogle.com}, thanks to Gabor Grothendieck for a hint about using \code{get()}.
#'
#' @section IFAR Chapter: 12-Individual Growth.
#'
#' @seealso See \code{\link{Schnute}} for an implementation of the Schnute (1981) model.
#'
#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL.
#'
#' Campana, S.E. and C.M. Jones. 1992. Analysis of otolith microstructure data. Pages 73-100 In D.K. Stevenson and S.E. Campana, editors. Otolith microstructure examination and analysis. Canadian Special Publication of Fisheries and Aquatic Sciences 117. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/141734.pdf.]
#'
#' Fabens, A. 1965. Properties and fitting of the von Bertalanffy growth curve. Growth 29:265-289.
#'
#' Francis, R.I.C.C. 1988. Are growth parameters estimated from tagging and age-length data comparable? Canadian Journal of Fisheries and Aquatic Sciences, 45:936-942.
#'
#' Gallucci, V.F. and T.J. Quinn II. 1979. Reparameterizing, fitting, and testing a simple growth model. Transactions of the American Fisheries Society, 108:14-25.
#'
#' Garcia-Berthou, E., G. Carmona-Catot, R. Merciai, and D.H. Ogle. A technical note on seasonal growth models. Reviews in Fish Biology and Fisheries 22:635-640. [Was (is?) from https://www.researchgate.net/publication/257658359_A_technical_note_on_seasonal_growth_models.]
#'
#' Gompertz, B. 1825. On the nature of the function expressive of the law of human mortality, and on a new method of determining the value of life contingencies. Philosophical Transactions of the Royal Society of London. 115:513-583.
#'
#' Haddon, M., C. Mundy, and D. Tarbath. 2008. Using an inverse-logistic model to describe growth increments of blacklip abalone (\emph{Haliotis rubra}) in Tasmania. Fishery Bulletin 106:58-71. [Was (is?) from http://aquaticcommons.org/8857/1/haddon_Fish_Bull_2008.pdf.]
#'
#' Karkach, A. S. 2006. Trajectories and models of individual growth. Demographic Research 15:347-400. [Was (is?) from http://www.demographic-research.org/volumes/vol15/12/15-12.pdf.]
#'
#' Katsanevakis, S. and C.D. Maravelias. 2008. Modelling fish growth: multi-model inference as a better alternative to a priori using von Bertalanffy equation. Fish and Fisheries 9:178-187.
#'
#' Mooij, W.M., J.M. Van Rooij, and S. Wijnhoven. 1999. Analysis and comparison of fish growth from small samples of length-at-age data: Detection of sexual dimorphism in Eurasian perch as an example. Transactions of the American Fisheries Society 128:483-490.
#'
#' Polacheck, T., J.P. Eveson, and G.M. Laslett. 2004. Increase in growth rates of southern bluefin tuna (\emph{Thunnus maccoyii}) over four decades: 1960 to 2000. Canadian Journal of Fisheries and Aquatic Sciences, 61:307-322.
#'
#' Quinn, T. J. and R. B. Deriso. 1999. Quantitative Fish Dynamics. Oxford University Press, New York, New York. 542 pages.
#'
#' Quist, M.C., M.A. Pegg, and D.R. DeVries. 2012. Age and Growth. Chapter 15 in A.V. Zale, D.L Parrish, and T.M. Sutton, Editors Fisheries Techniques, Third Edition. American Fisheries Society, Bethesda, MD.
#'
#' Richards, F. J. 1959. A flexible growth function for empirical use. Journal of Experimental Biology 10:290-300.
#'
#' Ricker, W.E. 1975. Computation and interpretation of biological statistics of fish populations. Technical Report Bulletin 191, Bulletin of the Fisheries Research Board of Canada. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/1485.pdf.]
#'
#' Ricker, W.E. 1979. Growth rates and models. Pages 677-743 In W.S. Hoar, D.J. Randall, and J.R. Brett, editors. Fish Physiology, Vol. 8: Bioenergetics and Growth. Academic Press, NY, NY. [Was (is?) from https://books.google.com/books?id=CB1qu2VbKwQC&pg=PA705&lpg=PA705&dq=Gompertz+fish&source=bl&ots=y34lhFP4IU&sig=EM_DGEQMPGIn_DlgTcGIi_wbItE&hl=en&sa=X&ei=QmM4VZK6EpDAgwTt24CABw&ved=0CE8Q6AEwBw#v=onepage&q=Gompertz\%20fish&f=false.]
#'
#' Schnute, J. 1981. A versatile growth model with statistically stable parameters. Canadian Journal of Fisheries and Aquatic Sciences, 38:1128-1140.
#'
#' Somers, I. F. 1988. On a seasonally oscillating growth function. Fishbyte 6(1):8-11. [Was (is?) from http://www.worldfishcenter.org/Naga/na_2914.pdf.]
#'
#' Tjorve, E. and K. M. C. Tjorve. 2010. A unified approach to the Richards-model family for use in growth analyses: Why we need only two model forms. Journal of Theoretical Biology 267:417-425. [Was (is?) from https://www.researchgate.net/profile/Even_Tjorve/publication/46218377_A_unified_approach_to_the_Richards-model_family_for_use_in_growth_analyses_why_we_need_only_two_model_forms/links/54ba83b80cf29e0cb04bd24e.pdf.]
#'
#' Troynikov, V. S., R. W. Day, and A. M. Leorke. Estimation of seasonal growth parameters using a stochastic Gompertz model for tagging data. Journal of Shellfish Research 17:833-838. [Was (is?) from https://www.researchgate.net/profile/Robert_Day2/publication/249340562_Estimation_of_seasonal_growth_parameters_using_a_stochastic_gompertz_model_for_tagging_data/links/54200fa30cf203f155c2a08a.pdf.]
#'
#' Vaughan, D. S. and T. E. Helser. 1990. Status of the red drum stock of the Atlantic coast: Stock assessment report for 1989. NOAA Technical Memorandum NMFS-SEFC-263, 117 p. [Was (is?) from http://docs.lib.noaa.gov/noaa_documents/NMFS/SEFSC/TM_NMFS_SEFSC/NMFS_SEFSC_TM_263.pdf.]
#'
#' Wang, Y.-G. 1998. An improved Fabens method for estimation of growth parameters in the von Bertalanffy model with individual asymptotes. Canadian Journal of Fisheries and Aquatic Sciences 55:397-400.
#'
#' Weisberg, S., G.R. Spangler, and L. S. Richmond. 2010. Mixed effects models for fish growth. Canadian Journal of Fisheries And Aquatic Sciences 67:269-277.
#'
#' Winsor, C.P. 1932. The Gompertz curve as a growth curve. Proceedings of the National Academy of Sciences. 18:1-8. [Was (is?) from http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1076153/pdf/pnas01729-0009.pdf.]
#'
#' @keywords manip hplot
#'
#' @examples
#' ###########################################################
#' ## Simple Examples -- Von B
#' ( vb1 <- vbFuns() )
#' ages <- 0:20
#' plot(vb1(ages,Linf=20,K=0.3,t0=-0.2)~ages,type="b",pch=19)
#' ( vb2 <- vbFuns("Francis") )
#' plot(vb2(ages,L1=10,L2=19,L3=20,t1=2,t3=18)~ages,type="b",pch=19)
#' ( vb2c <- vbFuns("Francis",simple=TRUE) ) # compare to vb2
#'
#' ## Simple Examples -- Gompertz
#' ( gomp1 <- GompertzFuns() )
#' plot(gomp1(ages,Linf=800,gi=0.5,ti=5)~ages,type="b",pch=19)
#' ( gomp2 <- GompertzFuns("Ricker2") )
#' plot(gomp2(ages,L0=2,a=6,gi=0.5)~ages,type="b",pch=19)
#' ( gomp2c <- GompertzFuns("Ricker2",simple=TRUE) ) # compare to gomp2
#' ( gompT <- GompertzFuns("Troynikov1"))
#'
#' ## Simple Examples -- Richards
#' ( rich1 <- RichardsFuns() )
#' plot(rich1(ages,Linf=800,k=0.5,a=1,b=6)~ages,type="b",pch=19)
#' ( rich2 <- RichardsFuns(2) )
#' plot(rich2(ages,Linf=800,k=0.5,ti=3,b=6)~ages,type="b",pch=19)
#' ( rich3 <- RichardsFuns(3) )
#' plot(rich3(ages,Linf=800,k=0.5,ti=3,b=0.15)~ages,type="b",pch=19)
#' ( rich4 <- RichardsFuns(4) )
#' plot(rich4(ages,Linf=800,k=0.5,ti=3,b=0.95)~ages,type="b",pch=19)
#' lines(rich4(ages,Linf=800,k=0.5,ti=3,b=1.5)~ages,type="b",pch=19,col="blue")
#' ( rich5 <- RichardsFuns(5) )
#' plot(rich5(ages,Linf=800,k=0.5,L0=50,b=1.5)~ages,type="b",pch=19)
#' ( rich6 <- RichardsFuns(6) )
#' plot(rich6(ages,Linf=800,k=0.5,ti=3,Lninf=50,b=1.5)~ages,type="b",pch=19)
#' ( rich2c <- RichardsFuns(2,simple=TRUE) ) # compare to rich2
#'
#' ## Simple Examples -- Logistic
#' ( log1 <- logisticFuns() )
#' plot(log1(ages,Linf=800,gninf=0.5,ti=5)~ages,type="b",pch=19)
#' ( log2 <- logisticFuns("CJ2") )
#' plot(log2(ages,Linf=800,gninf=0.5,a=10)~ages,type="b",pch=19)
#' ( log2c <- logisticFuns("CJ2",simple=TRUE) ) # compare to log2
#' ( log3 <- logisticFuns("Karkach") )
#' plot(log3(ages,L0=10,Linf=800,gninf=0.5)~ages,type="b",pch=19)
#' ( log4 <- logisticFuns("Haddon") )
#'
#'
#' ###########################################################
#' ## Examples of fitting
#' ## After the last example a plot is constructed with three
#' ## or four lines on top of each other illustrating that the
#' ## parameterizations all produce the same fitted values.
#' ## However, observe the correlations in the summary() results.
#'
#' ## Von B
#' data(SpotVA1)
#' # Fitting the typical paramaterization of the von B function
#' fit1 <- nls(tl~vb1(age,Linf,K,t0),data=SpotVA1,start=vbStarts(tl~age,data=SpotVA1))
#' summary(fit1,correlation=TRUE)
#' plot(tl~age,data=SpotVA1,pch=19)
#' curve(vb1(x,Linf=coef(fit1)),from=0,to=5,col="red",lwd=10,add=TRUE)
#'
#' # Fitting the Francis paramaterization of the von B function
#' fit2 <- nls(tl~vb2c(age,L1,L2,L3,t1=0,t3=5),data=SpotVA1,
#' start=vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5)))
#' summary(fit2,correlation=TRUE)
#' curve(vb2c(x,L1=coef(fit2)[1],L2=coef(fit2)[2],L3=coef(fit2)[3],t1=0,t3=5),
#' from=0,to=5,col="blue",lwd=5,add=TRUE)
#'
#' # Fitting the Schnute parameterization of the von B function
#' vb3 <- vbFuns("Schnute")
#' fit3 <- nls(tl~vb3(age,L1,L3,K,t1=0,t3=4),data=SpotVA1,
#' start=vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,4)))
#' summary(fit3,correlation=TRUE)
#' curve(vb3(x,L1=coef(fit3),t1=c(0,4)),from=0,to=5,col="green",lwd=2,add=TRUE)
#'
#' ## Gompertz
#' # Make some fake data using the original parameterization
#' gompO <- GompertzFuns("original")
#' # setup ages, sample sizes (general reduction in numbers with
#' # increasing age), and additive SD to model
#' t <- 1:15
#' n <- c(10,40,35,25,12,10,10,8,6,5,3,3,3,2,2)
#' sd <- 15
#' # expand ages
#' ages <- rep(t,n)
#' # get lengths from gompertz and a random error for individuals
#' lens <- gompO(ages,Linf=450,a=1,gi=0.3)+rnorm(length(ages),0,sd)
#' # put together as a data.frame
#' df <- data.frame(age=ages,len=round(lens,0))
#'
#' # Fit first Ricker parameterization
#' fit1 <- nls(len~gomp1(age,Linf,gi,ti),data=df,start=list(Linf=500,gi=0.3,ti=3))
#' summary(fit1,correlation=TRUE)
#' plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5))
#' curve(gomp1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE)
#'
#' # Fit third Ricker parameterization
#' fit2 <- nls(len~gomp2(age,L0,a,gi),data=df,start=list(L0=30,a=3,gi=0.3))
#' summary(fit2,correlation=TRUE)
#' curve(gomp2(x,L0=coef(fit2)),from=0,to=15,col="blue",lwd=5,add=TRUE)
#'
#' # Fit third Quinn and Deriso parameterization (using simple=TRUE model)
#' gomp3 <- GompertzFuns("QD3",simple=TRUE)
#' fit3 <- nls(len~gomp3(age,Linf,gi,t0),data=df,start=list(Linf=500,gi=0.3,t0=0))
#' summary(fit3,correlation=TRUE)
#' curve(gomp3(x,Linf=coef(fit3)[1],gi=coef(fit3)[2],t0=coef(fit3)[3]),
#' from=0,to=15,col="green",lwd=2,add=TRUE)
#'
#' ## Richards
#' # Fit first Richards parameterization
#' fit1 <- nls(len~rich1(age,Linf,k,a,b),data=df,start=list(Linf=450,k=0.25,a=0.65,b=3))
#' summary(fit1,correlation=TRUE)
#' plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5))
#' curve(rich1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE)
#'
#' # Fit second Richards parameterization
#' fit2 <- nls(len~rich2(age,Linf,k,ti,b),data=df,start=list(Linf=450,k=0.25,ti=3,b=3))
#' summary(fit2,correlation=TRUE)
#' curve(rich2(x,Linf=coef(fit2)),from=0,to=15,col="blue",lwd=7,add=TRUE)
#'
#' # Fit third Richards parameterization
#' fit3 <- nls(len~rich3(age,Linf,k,ti,b),data=df,start=list(Linf=450,k=0.25,ti=3,b=-0.3))
#' summary(fit3,correlation=TRUE)
#' curve(rich3(x,Linf=coef(fit3)),from=0,to=15,col="green",lwd=4,add=TRUE)
#'
#' # Fit fourth Richards parameterization
#' fit4 <- nls(len~rich4(age,Linf,k,ti,b),data=df,start=list(Linf=450,k=0.25,ti=3,b=0.7))
#' summary(fit4,correlation=TRUE)
#' curve(rich4(x,Linf=coef(fit4)),from=0,to=15,col="black",lwd=1,add=TRUE)
#'
#' ## Logistic
#' # Fit first Campana-Jones parameterization
#' fit1 <- nls(len~log1(age,Linf,gninf,ti),data=df,start=list(Linf=450,gninf=0.45,ti=4))
#' summary(fit1,correlation=TRUE)
#' plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5))
#' curve(log1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE)
#'
#' # Fit second Campana-Jones parameterization
#' fit2 <- nls(len~log2(age,Linf,gninf,a),data=df,start=list(Linf=450,gninf=0.45,a=7))
#' summary(fit2,correlation=TRUE)
#' curve(log2(x,Linf=coef(fit2)),from=0,to=15,col="blue",lwd=5,add=TRUE)
#'
#' # Fit Karkach parameterization (using simple=TRUE model)
#' log3 <- logisticFuns("Karkach",simple=TRUE)
#' fit3 <- nls(len~log3(age,Linf,L0,gninf),data=df,start=list(Linf=450,L0=30,gninf=0.45))
#' summary(fit3,correlation=TRUE)
#' curve(log3(x,Linf=coef(fit3)[1],L0=coef(fit3)[2],gninf=coef(fit3)[3]),
#' from=0,to=15,col="green",lwd=2,add=TRUE)
#'
#'
#' #############################################################################
#' ## Create expressions of the models
#' #############################################################################
#' # Typical von Bertalanffy ... Show as a stand-alone plot
#' growthFunShow("vonBertalanffy","Typical",plot=TRUE)
#' # Get and save the expression
#' ( tmp <- growthFunShow("vonBertalanffy","Typical") )
#' # Use expression as title on a plot
#' lens <- vb1(ages,Linf=20,K=0.3,t0=-0.2)
#' plot(lens~ages,type="b",pch=19,main=tmp)
#' # Put expression in the main plot
#' text(10,5,tmp)
#' # Put multiple expressions on a plot
#' op <- par(mar=c(0.1,0.1,0.1,0.1))
#' plot(0,type="n",xlab="",ylab="",xlim=c(0,1),ylim=c(0,3),xaxt="n",yaxt="n")
#' text(0,2.5,"Original:",pos=4)
#' text(0.5,2.5,growthFunShow("vonBertalanffy","Original"))
#' text(0,1.5,"Typical:",pos=4)
#' text(0.5,1.5,growthFunShow("vonBertalanffy","Typical"))
#' text(0,0.5,"Francis:",pos=4)
#' text(0.5,0.5,growthFunShow("vonBertalanffy","Francis"))
#' par(op)
NULL
#' @rdname growthModels
#' @export
vbFuns <- function(param=c("Typical","typical","Traditional","traditional","BevertonHolt",
"Original","original","vonBertalanffy",
"GQ","GallucciQuinn","Mooij","Weisberg",
"Schnute","Francis","Laslett","Polacheck",
"Somers","Somers2",
"Fabens","Fabens2","Wang","Wang2","Wang3"),
simple=FALSE,msg=FALSE) {
Typical <- typical <- Traditional <- traditional <- BevertonHolt <- function(t,Linf,K=NULL,t0=NULL) {
if (length(Linf)==3) { K <- Linf[[2]]
t0 <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*(1-exp(-K*(t-t0)))
}
STypical <- Stypical <- STraditional <- Straditional <- SBevertonHolt <- function(t,Linf,K,t0) {
Linf*(1-exp(-K*(t-t0)))
}
Original <- original <- vonBertalanffy <- function(t,Linf,L0=NULL,K=NULL) {
if (length(Linf)==3) { L0 <- Linf[[2]]
K <- Linf[[3]]
Linf <- Linf[[1]] }
Linf-(Linf-L0)*exp(-K*t)
}
SOriginal <- Soriginal <- SvonBertalanffy <- function(t,Linf,L0,K) {
Linf-(Linf-L0)*exp(-K*t)
}
GQ <- GallucciQuinn <- function(t,omega,K=NULL,t0=NULL) {
if (length(omega)==3) { K <- omega[[2]]
t0 <- omega[[3]]
omega <- omega[[1]] }
(omega/K)*(1-exp(-K*(t-t0)))
}
SGQ <- SGallucciQuinn <- function(t,omega,K,t0) {
(omega/K)*(1-exp(-K*(t-t0)))
}
Mooij <- function(t,Linf,L0=NULL,omega=NULL) {
if (length(Linf)==3) { L0 <- Linf[[2]]
omega <- Linf[[3]]
Linf <- Linf[[1]] }
Linf-(Linf-L0)*exp(-(omega/Linf)*t)
}
SMooij <- function(t,Linf,L0,omega) {
Linf-(Linf-L0)*exp(-(omega/Linf)*t)
}
Weisberg <- function(t,Linf,t50=NULL,t0=NULL) {
if (length(Linf)==3) { t50 <- Linf[[2]]
t0 <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*(1-exp(-(log(2)/(t50-t0))*(t-t0)))
}
SWeisberg <- function(t,Linf,t50,t0) {
Linf*(1-exp(-(log(2)/(t50-t0))*(t-t0)))
}
Schnute <- function(t,L1,L3=NULL,K=NULL,t1,t3=NULL) {
if (length(L1)==3) { L3 <- L1[[2]]; K <- L1[[3]]; L1 <- L1[[1]] }
if (length(t1)==2) { t3 <- t1[[2]]; t1 <- t1[[1]] }
L1+(L3-L1)*((1-exp(-K*(t-t1)))/(1-exp(-K*(t3-t1))))
}
SSchnute <- function(t,L1,L3,K,t1,t3) {
L1+(L3-L1)*((1-exp(-K*(t-t1)))/(1-exp(-K*(t3-t1))))
}
Francis <- function(t,L1,L2=NULL,L3=NULL,t1,t3=NULL) {
if (length(L1)==3) { L2 <- L1[[2]]; L3 <- L1[[3]]; L1 <- L1[[1]] }
if (length(t1)==2) { t3 <- t1[[2]]; t1 <- t1[[1]] }
r <- (L3-L2)/(L2-L1)
L1+(L3-L1)*((1-r^(2*((t-t1)/(t3-t1))))/(1-r^2))
}
SFrancis <- function(t,L1,L2,L3,t1,t3) {
r <- (L3-L2)/(L2-L1)
L1+(L3-L1)*((1-r^(2*((t-t1)/(t3-t1))))/(1-r^2))
}
Laslett <- Polacheck <- function(t,Linf,K1,K2,t0,a,b) {
if (length(Linf)==6) { K1 <- Linf[[2]]; K2 <- Linf[[3]]
t0 <- Linf[[4]]; a <- Linf[[5]]
b <- Linf[[6]]; Linf <- Linf[[1]] }
Linf*(1-exp(-K2*(t-t0))*((1+exp(-b*(t-t0-a)))/(1+exp(a*b)))^(-(K2-K1)/b))
}
SLaslett <- SPolacheck <- function(t,Linf,K1,K2,t0,a,b) {
Linf*(1-exp(-K2*(t-t0))*((1+exp(-b*(t-t0-a)))/(1+exp(a*b)))^(-(K2-K1)/b))
}
Somers <- function(t,Linf,K,t0,C,ts) {
if (length(Linf)==5) { K <- Linf[[2]]; t0 <- Linf[[3]]
C <- Linf[[4]]; ts <- Linf[[5]]
Linf <- Linf[[1]] }
St <- (C*K)/(2*pi)*sin(2*pi*(t-ts))
Sto <- (C*K)/(2*pi)*sin(2*pi*(t0-ts))
Linf*(1-exp(-K*(t-t0)-St+Sto))
}
SSomers <- function(t,Linf,K,t0,C,ts) {
Linf*(1-exp(-K*(t-t0)-(C*K)/(2*pi)*sin(2*pi*(t-ts))+(C*K)/(2*pi)*sin(2*pi*(t0-ts))))
}
Somers2 <- function(t,Linf,K,t0,C,WP) {
if (length(Linf)==5) { K <- Linf[[2]]; t0 <- Linf[[3]]
C <- Linf[[4]]; WP <- Linf[[5]]
Linf <- Linf[[1]] }
Rt <- (C*K)/(2*pi)*sin(2*pi*(t-WP+0.5))
Rto <- (C*K)/(2*pi)*sin(2*pi*(t0-WP+0.5))
Linf*(1-exp(-K*(t-t0)-Rt+Rto))
}
SSomers2 <- function(t,Linf,K,t0,C,WP) {
Linf*(1-exp(-K*(t-t0)-(C*K)/(2*pi)*sin(2*pi*(t-WP+0.5))+(C*K)/(2*pi)*sin(2*pi*(t0-WP+0.5))))
}
Fabens <- function(Lm,dt,Linf,K) {
if (length(Linf)==2) { K <- Linf[[2]]; Linf <- Linf[[1]] }
Lm+(Linf-Lm)*(1-exp(-K*dt))
}
SFabens <- function(Lm,dt,Linf,K) {
Lm+(Linf-Lm)*(1-exp(-K*dt))
}
Fabens2 <- function(Lm,dt,Linf,K) {
if (length(Linf)==2) { K <- Linf[[2]]; Linf <- Linf[[1]] }
(Linf-Lm)*(1-exp(-K*dt))
}
SFabens2 <- function(Lm,dt,Linf,K) {
(Linf-Lm)*(1-exp(-K*dt))
}
Wang <- function(Lm,dt,Linf,K,b) {
if (length(Linf)==3) { b <- Linf[[3]]; K <- Linf[[2]]
Linf <- Linf[[1]] }
(Linf+b*(Lm-mean(Lm))-Lm)*(1-exp(-K*dt))
}
SWang <- function(Lm,dt,Linf,K,b) {
(Linf+b*(Lm-mean(Lm))-Lm)*(1-exp(-K*dt))
}
Wang2 <- function(Lm,dt,K,a,b) {
if (length(K)==3) { b <- K[[3]]; a <- K[[2]]; K <- K[[1]] }
(a+b*Lm)*(1-exp(-K*dt))
}
SWang2 <- function(Lm,dt,K,a,b) {
(a+b*Lm)*(1-exp(-K*dt))
}
Wang3 <- function(Lm,dt,K,a,b) {
if (length(K)==3) { b <- K[[3]]; a <- K[[2]]; K <- K[[1]] }
Lm+(a+b*Lm)*(1-exp(-K*dt))
}
SWang3 <- function(Lm,dt,K,a,b) {
Lm+(a+b*Lm)*(1-exp(-K*dt))
}
param <- match.arg(param)
if (msg) {
switch(param,
Typical=,typical=,Traditional=,traditional=,BevertonHolt= {
message("You have chosen the 'Typical'/'typical', 'Traditional'/'traditional', or 'BevertonHolt' parameterization.\n\n",
" E[L|t] = Linf*(1-exp(-K*(t-t0)))\n\n",
" where Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n\n")
},
Original=,original=,vonBertalanffy={
message("You have chosen the 'Original'/'original' or 'vonBertalanffy` parameterization.\n\n",
" E[L|t] = Linf-(Linf-L0)*exp(-K*t)\n\n",
" where Linf = asymptotic mean length\n",
" L0 = the mean length at age-0 (i.e., hatching or birth)\n",
" K = exponential rate of approach to Linf\n\n")
},
Francis={
message("You have chosen the 'Francis' parameterization.\n\n",
" E[L|t] = L1+(L3-L1)*[(1-r^(2*[(t-t1)/(t3-t1)]))/(1-r^2)]\n\n",
" where r = [(L3-L2)/(L2-L1)] and\n\n",
" L1 = the mean length at the first (small) reference age\n",
" L2 = the mean length at the intermediate reference age\n",
" L3 = the mean length at the third (large) reference age\n\n",
"You must also supply the constant values (i.e., they are NOT model parameters) for\n",
" t1 = the first (usually a younger) reference age\n",
" t3 = the third (usually an older) reference age\n\n")
},
GQ=,GallucciQuinn={
message("You have chosen the 'GQ' or 'GallucciQuinn' parameterization.\n\n",
" E[L|t] = [omega/K]*(1-exp(-K*(t-t0)))\n\n",
" where omega = growth rate near t0\n",
" K = exponential rate of approach to Linf\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n\n")
},
Mooij={
message("You have chosen the 'Mooij' parameterization.\n\n",
" E[L|t] = Linf-(Linf-L0)*exp(-(omega/Linf)*t)\n\n",
" where Linf = asymptotic mean length\n",
" L0 = the mean length at age-0 (i.e., hatching or birth)\n",
" omega = growth rate near L0\n\n")
},
Weisberg= {
message("You have chosen the 'Weisberg' parameterization.\n\n",
" E[L|t] = Linf*(1-exp(-(log(2)/(t50-t0))*(t-t0)))\n\n",
" where Linf = asymptotic mean length\n",
" t50 = age when half of Linf is reached\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n\n")
},
Schnute={
message("You have chosen the 'Schnute' parameterization.\n\n",
" E[L|t] = L1+(L2-L1)*[(1-exp(-K*(t-t1)))/(1-exp(-K*(t2-t1)))]\n\n",
" where L1 = the mean length at the youngest age in the sample\n",
" L2 = the mean length at the oldest age in the sample\n",
" K = exponential rate of approach to Linf\n\n",
" You must also supply the constant values (i.e., they are NOT model parameters) for\n",
" t1 = the youngest age in the sample\n",
" t2 = the oldest age in the sample\n\n")
},
Laslett=,Polacheck={
message("You have chosen the 'Laslett/Polacheck' 'double' parameterization.\n\n",
" E[L|t] = Linf*[1-exp(-K2*(t-to))((1+exp(-b(t-t0-a)))/(1+exp(ab)))^(-(K2-K1)/b)]\n\n",
" where Linf = asymptotic mean length\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n",
" K1 = the first (younger ages) exponential rate of approach to Linf\n",
" K2 = the second (older ages) exponential rate of approach to Linf\n",
" b = governs the rate of transition from K1 to K2\n",
" a = the central age of the transition from K1 to K2\n\n")
},
Somers={
message("You have chosen the 'Somers Seasonal' parameterization.\n\n",
" E[L|t] = Linf*(1-exp(-K*(t-to)-St+St0))\n\n",
" where St = (CK/2*pi)*sin(2*pi*(t-ts)) and\n",
" St0 = (CK/2*pi)*sin(2*pi*(t0-ts)) and\n\n",
" and Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n",
" C = proportional growth depression at 'winter peak'\n",
" ts = time from t=0 until the first growth oscillation begins.\n\n")
},
Somers2={
message("You have chosen the modified 'Somers2 Seasonal' parameterization.\n\n",
" E[L|t] = Linf*(1-exp(-K*(t-to)-Rt+Rt0))\n\n",
" where Rt = (CK/2*pi)*sin(2*pi*(t-WP+0.5)) and\n",
" Rt0 = (CK/2*pi)*sin(2*pi*(t0-WP+0.5)) and\n\n",
" and Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n",
" t0 = the theoretical age when length = 0 (a modeling artifact)\n",
" C = proportional growth depression at 'winter peak'\n",
" WP = the 'winter peak' (point of slowest growth).\n\n")
},
Fabens={
message("You have chosen the 'Fabens' parameterization for tag-return data.\n\n",
" E[Lr|Lm,dt] = Lm + (Linf-Lm)*(1-exp(-K*dt))\n\n",
" where Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n")
},
Fabens2={
message("You have chosen the 'Fabens2' parameterization for tag-return data.\n\n",
" E[Lr|Lm,dt] = (Linf-Lm)*(1-exp(-K*dt))\n\n",
" where Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n")
},
Wang={
message("You have chosen the 'Wang' parameterization for tag-return data.\n\n",
" E[Lr-Lm|Lm,dt] = (Linf+b(Lm-E(Lm))-Lm)*(1-exp(-K*dt))\n\n",
" where Linf = asymptotic mean length\n",
" K = exponential rate of approach to Linf\n",
" b = parameter\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n",
" and with E(Lm) = expectation (i.e., mean) of Lm.\n\n")
},
Wang2={
message("You have chosen the 'Wang2' parameterization for tag-return data.\n\n",
" E[Lr-Lm|Lm,dt] = (a+bLm)*(1-exp(-K*dt))\n\n",
" where K = exponential rate of approach to Linf\n",
" a, b = parameters\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n")
},
Wang3={
message("You have chosen the 'Wang3' parameterization for tag-return data.\n\n",
" E[Lr|Lm,dt] = Lm+(a+bLm)*(1-exp(-K*dt))\n\n",
" where K = exponential rate of approach to Linf\n",
" a, b = parameters\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n\n")
}
)
}
if (simple) param <- paste("S",param,sep="")
get(param)
}
#' @rdname growthModels
#' @export
GompertzFuns <- function(param=c("Ricker1","Ricker2","Ricker3",
"QuinnDeriso1","QuinnDeriso2","QuinnDeriso3",
"QD1","QD2","QD3",
"Original","original",
"Troynikov1","Troynikov2"),
simple=FALSE,msg=FALSE) {
Original <- original <- function(t,Linf,a=NULL,gi=NULL) {
if (length(Linf)==3) { a <- Linf[[2]]
gi <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*exp(-exp(a-gi*t))
}
SOriginal <- Soriginal <-function(t,Linf,a,gi) {
Linf*exp(-exp(a-gi*t))
}
Ricker1 <- function(t,Linf,gi=NULL,ti=NULL) {
if (length(Linf)==3) { gi <- Linf[[2]]
ti <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*exp(-exp(-gi*(t-ti)))
}
SRicker1 <- function(t,Linf,gi,ti) {
Linf*exp(-exp(-gi*(t-ti)))
}
QD1 <- QuinnDeriso1 <- Ricker2 <- function(t,L0,a=NULL,gi=NULL) {
if (length(L0)==3) { a <- L0[[2]]
gi <- L0[[3]]
L0 <- L0[[1]] }
L0*exp(a*(1-exp(-gi*t)))
}
SQD1 <- SQuinnDeriso1 <- SRicker2 <- function(t,L0,a,gi) {
L0*exp(a*(1-exp(-gi*t)))
}
QD2 <- QuinnDeriso2 <- Ricker3 <- function(t,Linf,a=NULL,gi=NULL) {
if (length(Linf)==3) { a <- Linf[[2]]
gi <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*exp(-a*exp(-gi*t))
}
SQD2 <- SQuinnDeriso2 <- SRicker3 <- function(t,Linf,a,gi) {
Linf*exp(-a*exp(-gi*t))
}
QD3 <- QuinnDeriso3 <- function(t,Linf,gi=NULL,t0=NULL) {
if (length(Linf)==3) { gi <- Linf[[2]]
t0 <- Linf[[3]]
Linf <- Linf[[1]] }
Linf*exp(-(1/gi)*exp(-gi*(t-t0)))
}
SQD3 <- SQuinnDeriso3 <- function(t,Linf,gi,t0) {
Linf*exp(-(1/gi)*exp(-gi*(t-t0)))
}
Troynikov1 <- function(Lm,dt,Linf,gi=NULL) {
if (length(Linf)==2) { gi=Linf[2]
Linf=Linf[1] }
Linf*((Lm/Linf)^exp(-gi*dt))-Lm
}
STroynikov1 <- function(Lm,dt,Linf,gi) {
Linf*((Lm/Linf)^exp(-gi*dt))-Lm
}
Troynikov2 <- function(Lm,dt,Linf,gi=NULL) {
if (length(Linf)==2) { gi=Linf[2]
Linf=Linf[1] }
Linf*((Lm/Linf)^exp(-gi*dt))
}
STroynikov2 <- function(Lm,dt,Linf,gi) {
Linf*((Lm/Linf)^exp(-gi*dt))
}
## Main function
param <- match.arg(param)
comcat <- "parameterization of the Gompertz function.\n\n"
if (msg) {
switch(param,
Original=,original= {
message("You have chosen the 'Original'/'original'",comcat,
" E[L|t] = Linf*exp(-exp(a-gi*t))\n\n",
"where Linf = asymptotic mean length\n",
" gi = decrease in growth rate at the inflection point\n",
" a = an undefined parameter\n\n")
},
Ricker1= {
message("You have chosen the 'Ricker1'",comcat,
" E[L|t] = Linf*exp(-exp(-gi*(t-ti)))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" ti = time at the inflection point\n\n")
},
Ricker2=,QD1=,QuinnDeriso1= {
message("You have chosen the 'Ricker2'/'QuinnDeriso1'/'QD1'",comcat,
" E[L|t] = L0*exp(a*(1-exp(-gi*t)))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" a = dimenstionless parameter related to growth\n\n")
},
Ricker3=,QD2=,QuinnDeriso2= {
message("You have chosen the 'Ricker3'/'QuinnDeriso2'/'QD2'",comcat,
" E[L|t] = Linf*exp(-(a/gi)*exp(-gi*t))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" a = dimenstionless parameter related to growth\n\n")
},
QD3=,QuinnDeriso3== {
message("You have chosen the 'QuinnDeriso3'/'QD3'",comcat,
" E[L|t] = Linf*exp(-(1/gi)*exp(-gi*(t-t0)))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" t0 = a dimensionless parameter related to time/age\n\n")
},
Troynikov1= {
message("You have chosen the 'Troynikov1'",comcat,
" E[Lr-Lm|dt] = Linf*((Lm/Linf)^exp(-gi*dt))-Lm\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n")
},
Troynikov2= {
message("You have chosen the 'Troynikov2'",comcat,
" E[Lr|dt] = Linf*((Lm/Linf)^exp(-gi*dt))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n",
" dt = time between marking and recapture.\n")
}
)
}
if (simple) param <- paste("S",param,sep="")
get(param)
}
#' @rdname growthModels
#' @export
RichardsFuns <- function(param=1,simple=FALSE,msg=FALSE) {
Richards1 <- function(t,Linf,k=NULL,a=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
a <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf*(1-a*exp(-k*t))^b
}
SRichards1 <- function(t,Linf,k,a,b) {
Linf*(1-a*exp(-k*t))^b
}
Richards2 <- function(t,Linf,k=NULL,ti=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
ti <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf*(1-(1/b)*exp(-k*(t-ti)))^b
}
SRichards2 <- function(t,Linf,k,ti,b) {
Linf*(1-(1/b)*exp(-k*(t-ti)))^b
}
Richards3 <- function(t,Linf,k=NULL,ti=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
ti <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf/((1+b*exp(-k*(t-ti)))^(1/b))
}
SRichards3 <- function(t,Linf,k,ti,b) {
Linf/((1+b*exp(-k*(t-ti)))^(1/b))
}
Richards4 <- function(t,Linf,k=NULL,ti=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
ti <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
}
SRichards4 <- function(t,Linf,k,ti,b) {
Linf*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
}
Richards5 <- function(t,Linf,k=NULL,L0=NULL,b=NULL) {
if (length(Linf)==4) { k <- Linf[[2]]
L0 <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Linf*(1+(((L0/Linf)^(1-b))-1)*exp(-k*t))^(1/(1-b))
}
SRichards5 <- function(t,Linf,k,L0,b) {
Linf*(1+(((L0/Linf)^(1-b))-1)*exp(-k*t))^(1/(1-b))
}
Richards6 <- function(t,Linf,k=NULL,ti=NULL,Lninf=NULL,b=NULL) {
if (length(Linf)==5) { k <- Linf[[2]]
ti <- Linf[[3]]
Lninf <- Linf[[3]]
b <- Linf[[4]]
Linf <- Linf[[1]] }
Lninf+(Linf-Lninf)*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
}
SRichards6 <- function(t,Linf,k,ti,Lninf,b) {
Lninf+(Linf-Lninf)*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
}
## Main function
if (!param %in% 1:6) stop("'param' must be in 1:6.")
param <- paste0("Richards",param)
if (msg) {
switch(param,
Richards1= {
message("You have chosen the '",param,"' parameterization.",
" E[L|t] = Linf*(1-a*exp(-k*t))^b\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" a = a dimensionless shape parameter\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards2= {
message("You have chosen the '",param,"' parameterization.",
" Linf*(1-(1/b)*exp(-k*(t-ti)))^b\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" ti = time/age at the inflection point\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards3= {
message("You have chosen the '",param,"' parameterization.",
" Linf/((1+b*exp(-k*(t-ti)))^(1/b))\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" ti = time/age at the inflection point\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards4= {
message("You have chosen the '",param,"' parameterization.",
" Linf*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" ti = time/age at the inflection point\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards5= {
message("You have chosen the '",param,"' parameterization.",
" Linf*(1+(((L0/Linf)^(1-b))-1)*exp(-k*t))^(1/(1-b))\n\n",
" where Linf = asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" L0 = mean length at t=0\n",
" b = a constant that controls the y- value of the inflection point\n\n")
},
Richards6= {
message("You have chosen the '",param,"' parameterization.",
" Lninf+(Linf-Lninf)*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))\n\n",
" where Linf = upper asymptotic mean length\n",
" k = a constant that controls the slope at the inflection point\n",
" Lninf = lower asymptotic mean length\n",
" ti = time/age at the inflection point\n",
" b = a constant that controls the y- value of the inflection point\n\n")
}
)
}
if (simple) param <- paste("S",param,sep="")
get(param)
}
#' @rdname growthModels
#' @export
logisticFuns <- function(param=c("CJ1","CJ2","Karkach","Haddon","CampanaJones1","CampanaJones2"),
simple=FALSE,msg=FALSE) {
CJ1 <- CampanaJones1 <- function(t,Linf,gninf=NULL,ti=NULL) {
if (length(Linf)==3) { gninf <- Linf[[2]]
ti <- Linf[[3]]
Linf <- Linf[[1]] }
Linf/(1+exp(-gninf*(t-ti)))
}
SCJ1 <- SCampanaJones1 <- function(t,Linf,gninf,ti) {
Linf/(1+exp(-gninf*(t-ti)))
}
CJ2 <- CampanaJones2 <- function(t,Linf,gninf=NULL,a=NULL) {
if (length(Linf)==3) { gninf <- Linf[[2]]
a <- Linf[[3]]
Linf <- Linf[[1]] }
Linf/(1+a*exp(-gninf*t))
}
SCJ2 <- SCampanaJones2 <- function(t,Linf,gninf,a) {
Linf/(1+a*exp(-gninf*t))
}
Karkach <- function(t,Linf,L0=NULL,gninf=NULL) {
if (length(Linf)==3) { L0 <- Linf[[2]]
gninf <- Linf[[3]]
Linf <- Linf[[1]] }
L0*Linf/(L0+(Linf-L0)*exp(-gninf*t))
}
SKarkach <- function(t,Linf,L0,gninf) {
L0*Linf/(L0+(Linf-L0)*exp(-gninf*t))
}
Haddon <- function(Lm,dLmax,L50=NULL,L95=NULL) {
if (length(dLmax)==3) { L50=dLmax[2]
L95=dLmax[3]
dLmax=dLmax[1] }
dLmax/(1+exp(log(19)*((Lm-L50)/(L95-L50))))
}
SHaddon <- function(Lm,dLmax,L50,L95) {
dLmax/(1+exp(log(19)*((Lm-L50)/(L95-L50))))
}
## Main function
param <- match.arg(param)
comcat <- "parameterization of the logistic growth function.\n\n"
if (msg) {
switch(param,
CJ1=,CampanaJones1= {
message("You have chosen the 'CampanaJones1'/'CJ1'",comcat,
" E[L|t] = Linf/(1+exp(-gninf*(t-ti)))\n\n",
" where Linf = asymptotic mean length\n",
" gninif = instantaneous growth rate at t=-infinity\n",
" ti = time at the inflection point\n\n")
},
CJ2=,CampanaJones2= {
message("You have chosen the 'CampanaJones2'/'CJ2'",comcat,
" E[L|t] = Linf/(1+a*exp(-gninf*t))\n\n",
" where Linf = asymptotic mean length\n",
" gi = instantaneous growth rate at the inflection point\n",
" a = a dimensionless parameter related to growth\n\n")
},
Karkach= {
message("You have chosen the 'Karkach'",comcat,
" E[L|t] = L0*Linf/(L0+(Linf-L0)*exp(-gninf*t))\n\n",
" where Linf = asymptotic mean length\n",
" L0 = mean length at time/age 0\n",
" gi = instantaneous growth rate at the inflection point\n\n")
},
Haddon= {
message("You have chosen the 'Haddon Inverse'",comcat,
" E[Lr-Lm|dt] = dLmax/(1+exp(log(19)*((Lm-L50)/(L95-L50))))\n\n",
" where dLmax = maximum growth increment during the study\n",
" L50 = length at marking to produces a growth increment of 0.5*dLmax",
" L95 = length at marking to produces a growth increment of 0.95*dLmax\n\n",
" and the data are Lr = length at time of recapture\n",
" Lm = length at time of marking\n")
}
)
}
if (simple) param <- paste("S",param,sep="")
get(param)
}
#' @title The four-parameter growth function from Schnute (1981).
#'
#' @description The four-parameter growth function from Schnute (1981). Use \code{SchnuteModels()} to see the equations for each growth function.
#'
#' @param t A numeric vector of ages over which to model growth.
#' @param case A string that indicates the case of the Schnute growth function to use.
#' @param t1 The (young) age that corresponds to \code{L1}. Set to minimum value in \code{t} by default.
#' @param t3 The (old) age that corresponds to \code{L3}. Set to maximum value in \code{t} by default.
#' @param L1 The mean size/length at \code{t1}.
#' @param L3 The mean size/length at \code{t3}.
#' @param a A dimensionless parameter that is related to the time/age at the inflection point.
#' @param b A dimensionless parameter that is related to size/length at the inflection point.
#'
#' @return \code{Schnute} returns a predicted size given the case of the function and the provided parameter values.
#'
#' \code{SchnuteModels} returns a graphic that uses \code{\link{plotmath}} to show the growth function equation in a pretty format.
#'
#' @author Derek H. Ogle, \email{derek@@derekogle.com}
#'
#' @section IFAR Chapter: None specifically, but 12-Individual Growth is related.
#'
#' @seealso See \code{\link{vbFuns}}, \code{\link{GompertzFuns}}, \code{\link{RichardsFuns}}, and \code{\link{logisticFuns}} for similar functionality for other models.
#'
#' @references Schnute, J. 1981. A versatile growth model with statistical stable parameters. Canadian Journal of Fisheris and Aquatic Sciences 38:1128-1140.
#'
#' @keywords manip
#'
#' @examples
#' ## See the formulae
#' growthFunShow("Schnute",1,plot=TRUE)
#' growthFunShow("Schnute",2,plot=TRUE)
#' growthFunShow("Schnute",3,plot=TRUE)
#' growthFunShow("Schnute",4,plot=TRUE)
#'
#' ## Simple examples
#' ages <- 1:15
#' s1 <- Schnute(ages,case=1,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1)
#' s2 <- Schnute(ages,case=2,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1)
#' s3 <- Schnute(ages,case=3,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1)
#' s4 <- Schnute(ages,case=4,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1)
#'
#' plot(s1~ages,type="l",lwd=2)
#' lines(s2~ages,lwd=2,col="red")
#' lines(s3~ages,lwd=2,col="blue")
#' lines(s4~ages,lwd=2,col="green")
#'
#' @rdname Schnute
#' @export
Schnute <- function(t,case=1,t1=NULL,t3=NULL,L1=NULL,L3=NULL,a=NULL,b=NULL) {
## check case
case <- as.character(case)
if (!case %in% c("1","2","3","4")) stop("'case' must be 1, 2, 3, or 4.",call.=FALSE)
## needed to get around global binding issue
b <- b
## check t1 and t3
if (length(t)==1) {
if (is.null(t1)) stop("Must provide a 't1' if 't' is only one value.",call.=FALSE)
if (is.null(t3)) stop("Must provide a 't3' if 't' is only one value.",call.=FALSE)
} else {
if (is.null(t1)) t1 <- min(t,na.rm=TRUE)
if (is.null(t3)) t3 <- max(t,na.rm=TRUE)
}
if (t1==t3) stop("'t1' cannot equal 't3'.",call.=FALSE)
if (t1>t3) {
warning("'t1' was greater than 't3'; values reversed.",call.=FALSE)
tmp <- t3
t3 <- t1
t1 <- tmp
}
## check L1 and L3
if (L1>L3) stop ("'L1' cannot be greater than 'L3'",call.=FALSE)
## Compute values based on case
switch(case,
"1"={ val <- ((L1^b)+((L3^b)-(L1^b))*((1-exp(-a*(t-t1)))/(1-exp(-a*(t3-t1)))))^(1/b) },
"2"={ val <- L1*exp(log(L3/L1)*((1-exp(-a*(t-t1)))/(1-exp(-a*(t3-t1))))) },
"3"={ val <- ((L1^b)+((L3^b)-(L1^b))*((t-t1)/(t3-t1)))^(1/b) },
"4"={ val <- L1*exp(log(L3/L1)*((t-t1)/(t3-t1))) }
)
val
}
#' @rdname growthModels
#' @export
growthFunShow <- function(type=c("vonBertalanffy","Gompertz","Richards","Logistic","Schnute"),
param=NULL,plot=FALSE,...) {
type <- match.arg(type)
switch(type,
vonBertalanffy = { expr <- iSGF_VB(param) },
Gompertz = { expr <- iSGF_GOMP(param) },
Richards = { expr <- iSGF_RICHARDS(param) },
Logistic = { expr <- iSGF_LOGISTIC(param) },
Schnute = { expr <- iSGF_SCHNUTE(param) })
if (plot) {
op <- graphics::par(mar=c(0.1,0.1,0.1,0.1))
graphics::plot(0,type="n",ylim=c(0,1),xlim=c(0,1),xaxt="n",yaxt="n",
xlab="",ylab="",bty="n",...)
graphics::text(0.5,0.5,expr,...)
graphics::par(op)
}
expr
}
################################################################################
## Internal functions for growth model expressions
################################################################################
iSGF_VB <- function(param=c("Original","original","vonBertalanffy",
"Typical","typical","Traditional","traditional","BevertonHolt",
"GallucciQuinn","GQ","Mooij","Weisberg",
"Schnute","Francis","Laslett","Polacheck",
"Somers","Somers2",
"Fabens","Fabens2","Wang","Wang2","Wang3")) {
if(!is.character(param)) stop("'param' must be a character string.",call.=FALSE)
param <- match.arg(param)
switch(param,
Typical=,typical=,Traditional=,traditional=,BevertonHolt= {
expr <- expression(E(L[t])==L[infinity]*bgroup("(",1-e^{-K*(t~-~t[0])},")"))
},
Original=,original=,vonBertalanffy= {
expr <- expression(E(L[t])==L[infinity]~-~(L[infinity]-L[0])*~e^{-Kt})
},
GallucciQuinn=,GQ= {
expr <- expression(E(L[t])==frac(omega,K)*~bgroup("(",1-e^{-K*(t~-~t[0])},")"))
},
Mooij= {
expr <- expression(E(L[t])==L[infinity]~-~(L[infinity]-L[0])*~e^{-frac(omega,L[infinity])*~t})
},
Weisberg= {
expr <- expression(E(L[t])==L[infinity]*bgroup("(",1-e^{-frac(log(2),(t[50]~-~t[0]))*(t~-~t[0])},")"))
},
Schnute= {
expr <- expression(E(L[t])==L[1]+(L[3]-L[1])*~frac(1-e^{-K*(~t~-~t[1])},1-e^{-K*(~t[3]~-~t[1])}))
},
Francis= {
expr <- expression(atop(E(L[t])==L[1]+(L[3]-L[1])*~frac(1-r^{2*frac(t-t[1],t[3]-t[1])},1-r^{2}),
plain("where" )~r==frac(L[3]-L[2],L[2]-L[1])))
},
Laslett= {
expr <- expression(plain("Not Yet Implemented"))
},
Polacheck= {
expr <- expression(plain("Not Yet Implemented"))
},
Somers= {
expr <- expression(atop(E(L[t])==L[infinity]*bgroup("(",1-e^{-K*(t~-~t[0])-S(t)+S(t[0])},")"),
plain("where" )~S(t)==bgroup("(",frac(C*K,2*~pi),")")*~sin(2*pi*(t-t[s]))))
},
Somers2= {
expr <- expression(atop(E(L[t])==L[infinity]*bgroup("(",1-e^{-K*(t~-~t[0])-R(t)+R(t[0])},")"),
plain("where" )~R(t)==bgroup("(",frac(C*K,2*~pi),")")*~sin(2*pi*(t-WP+0.5))))
},
Fabens= {
expr <- expression(E(L[r]-L[m])==(L[infinity]-L[m])*bgroup("(",1-e^{-K*Delta*t},")"))
},
Fabens2= {
expr <- expression(E(L[r])==L[m]+(L[infinity]-L[m])*bgroup("(",1-e^{-K*Delta*t},")"))
},
Wang= {
expr <- expression(E(L[r]-L[m])==(L[infinity]+beta*(L[t]-L[t])-L[m])*bgroup("(",1-e^{-K*Delta*t},")"))
},
Wang2= {
expr <- expression(E(L[r]-L[m])==(alpha+beta*L[t])*bgroup("(",1-e^{-K*Delta*t},")"))
},
Wang3= {
expr <- expression(E(L[r])==L[m]+(alpha+beta*L[t])*bgroup("(",1-e^{-K*Delta*t},")"))
})
expr
}
iSGF_GOMP <- function(param=c("Original","original","Ricker1","Ricker2","Ricker3",
"QuinnDeriso1","QuinnDeriso2","QuinnDeriso3","QD1","QD2","QD3",
"Troynikov1","Troynikov2")) {
if(!is.character(param)) stop("'param' must be a character string.",call.=FALSE)
param <- match.arg(param)
switch(param,
Original=,original= {
expr <- expression(E(L[t])==L[infinity]*~e^{-e^{a-g[i]*t}})
},
Ricker1= {
expr <- expression(E(L[t])==L[infinity]*~e^{-e^{-g[i]*(t-t[i])}})
},
Ricker2=,QuinnDeriso1=,QD1= {
expr <- expression(E(L[t])==L[0]*~e^{a*bgroup("(",1-e^{-g[i]*t},")")})
},
Ricker3=,QuinnDeriso2=,QD2= {
expr <- expression(E(L[t])==L[infinity]*~e^{-a*~e^{-g[i]*t}})
},
QuinnDeriso3=,QD3= {
expr <- expression(E(L[t])==L[infinity]*~e^{-~frac(1,g[i])*~e^{-g[i]*~(~t~-~t^{plain("*")})}})
},
Troynikov1= {
expr <- expression(E(L[r]-L[m])==L[infinity]*~bgroup("(",frac(L[m],L[infinity]),")")^{e^{-g[i]*Delta*t}}-L[m])
},
Troynikov2= {
expr <- expression(E(L[r])==L[infinity]*~bgroup("(",frac(L[m],L[infinity]),")")^{e^{-g[i]*Delta*t}})
})
expr
}
iSGF_RICHARDS <- function(param=1:6) {
if (!is.numeric(param)) stop("'param' must be numeric when type='Richards'.",call.=FALSE)
if (!param %in% 1:6) stop("'param' must be from 1-6 when type='Richards'.",call.=FALSE)
if(param==1){
expr <- expression(E(L[t])==L[infinity]*~bgroup("(",1-a*e^{-kt},")")^{b})
} else if (param==2) {
expr <- expression(E(L[t])==L[infinity]*~bgroup("(",1-frac(1,b)*~e^{-k*(t-t[i])},")")^{~b})
} else if (param==3) {
expr <- expression(E(L[t])==frac(L[infinity],bgroup("(",1+b*e^{-k*(t-t[i])},")")^{~frac(1,b)}))
} else if (param==4) {
expr <- expression(E(L[t])==L[infinity]*~bgroup("(",1+(b-1)*~e^{-k*(t-t[i])},")")^{~frac(1,1-b)})
} else if (param==5) {
expr <- expression(E(L[t])==L[infinity]*~bgroup("[",bgroup("(",1+bgroup("(",frac(L[0],L[infinity]),")")^{1-b}-1,")")*~e^{-k*t},"]")^{~frac(1,1-b)})
} else {
expr <- expression(E(L[t])==L[-infinity]+(L[infinity]-L[-infinity])*~bgroup("(",1+(b-1)*~e^{-k*(t-t[i])},")")^{~frac(1,1-b)})
}
expr
}
iSGF_LOGISTIC <- function(param=c("CJ1","CJ2","Karkach","Haddon","CampanaJones1","CampanaJones2")) {
if(!is.character(param)) stop("'param' must be a character string.",call.=FALSE)
param <- match.arg(param)
switch(param,
CJ1=,CampanaJones1= {
expr <- expression(E(L[t])==frac(L[infinity],1+g[-infinity]*(t-t[i])))
},
CJ2=,CampanaJones2= {
expr <- expression(E(L[t])==frac(L[infinity],1+~ae^{-g[-infinity]*t}))
},
Karkach= {
expr <- expression(E(L[t])==frac(L[0]*L[infinity],L[0]+(L[infinity]-L[0])*e^{-g[-infinity]*t}))
},
Haddon= {
expr <- expression(E(L[r]-L[m])==frac(Delta*L[max],1+e^{log(19)*frac(L[m]~-~L[50],L[95]~-~L[50])}))
})
expr
}
iSGF_SCHNUTE <- function(case=1:4) {
if (!is.numeric(case)) stop("'case' must be numeric when type='Schnute'.",call.=FALSE)
if (!case %in% 1:4) stop("'case' must be from 1-4 when type='Schnute'.",call.=FALSE)
if(case==1){
expr <- expression(E(L[t])==bgroup("[",L[1]^{b}+(L[3]^{b}-L[1]^{b})*~frac(1-e^{-a*(~t~-~t[1])},1-e^{-a*(~t[3]~-~t[1])}),"]")^{~frac(1,b)})
} else if (case==2) {
expr <- expression(E(L[t])==L[1]*e^{log~bgroup("(",frac(L[3],L[1]),")")*~frac(1-e^{-a*(~t~-~t[1])},1-e^{-a*(~t[3]~-~t[1])})})
} else if (case==3) {
expr <- expression(E(L[t])==bgroup("[",L[1]^{b}+(L[3]^{b}-L[1]^{b})*~frac(~t~-~t[1],~t[3]~-~t[1]),"]")^{~frac(1,b)})
} else {
expr <- expression(E(L[t])==L[1]*e^{log~bgroup("(",frac(L[3],L[1]),")")*~frac(~t~-~t[1],~t[3]~-~t[1])})
}
expr
}
|
# Hack to get around Exec.json always dumping to same Result.hex key
# TODO: Need better way to manage temporary/intermediate values in calculations! Right now, overwriting occurs silently
.pkg.env = new.env()
.pkg.env$result_count = 0
.pkg.env$temp_count = 0
.pkg.env$IS_LOGGING = FALSE
.TEMP_KEY = "Last.value"
.RESULT_MAX = 1000
.MAX_INSPECT_ROW_VIEW = 10000
.MAX_INSPECT_COL_VIEW = 10000
.LOGICAL_OPERATORS = c("==", ">", "<", "!=", ">=", "<=", "&", "|", "&&", "||", "!", "is.na")
# Initialize functions for R logging
.myPath = paste(Sys.getenv("HOME"), "Library", "Application Support", "h2o", sep=.Platform$file.sep)
if(.Platform$OS.type == "windows")
.myPath = paste(Sys.getenv("APPDATA"), "h2o", sep=.Platform$file.sep)
.pkg.env$h2o.__LOG_COMMAND = paste(.myPath, "commands.log", sep=.Platform$file.sep)
.pkg.env$h2o.__LOG_ERROR = paste(.myPath, "errors.log", sep=.Platform$file.sep)
h2o.startLogging <- function() {
cmdDir <- normalizePath(dirname(.pkg.env$h2o.__LOG_COMMAND))
errDir <- normalizePath(dirname(.pkg.env$h2o.__LOG_ERROR))
if(!file.exists(cmdDir)) {
warning(cmdDir, " directory does not exist. Creating it now...")
dir.create(cmdDir, recursive = TRUE)
}
if(!file.exists(errDir)) {
warning(errDir, " directory does not exist. Creating it now...")
dir.create(errDir, recursive = TRUE)
}
cat("Appending to log file", .pkg.env$h2o.__LOG_COMMAND, "\n")
cat("Appending to log file", .pkg.env$h2o.__LOG_ERROR, "\n")
assign("IS_LOGGING", TRUE, envir = .pkg.env)
}
h2o.stopLogging <- function() { cat("Logging stopped"); assign("IS_LOGGING", FALSE, envir = .pkg.env) }
h2o.clearLogs <- function() { file.remove(.pkg.env$h2o.__LOG_COMMAND)
file.remove(.pkg.env$h2o.__LOG_ERROR) }
h2o.getLogPath <- function(type) {
if(missing(type) || !type %in% c("Command", "Error"))
stop("type must be either 'Command' or 'Error'")
switch(type, Command = .pkg.env$h2o.__LOG_COMMAND, Error = .pkg.env$h2o.__LOG_ERROR)
}
h2o.openLog <- function(type) {
if(missing(type) || !type %in% c("Command", "Error"))
stop("type must be either 'Command' or 'Error'")
myFile = switch(type, Command = .pkg.env$h2o.__LOG_COMMAND, Error = .pkg.env$h2o.__LOG_ERROR)
if(!file.exists(myFile)) stop(myFile, " does not exist")
myOS = Sys.info()["sysname"]
if(myOS == "Windows") shell.exec(paste("open '", myFile, "'", sep=""))
else system(paste("open '", myFile, "'", sep=""))
}
h2o.setLogPath <- function(path, type) {
if(missing(path) || !is.character(path)) stop("path must be a character string")
if(!file.exists(path)) stop(path, " directory does not exist")
if(missing(type) || !type %in% c("Command", "Error"))
stop("type must be either 'Command' or 'Error'")
myVar = switch(type, Command = "h2o.__LOG_COMMAND", Error = "h2o.__LOG_ERROR")
myFile = switch(type, Command = "commands.log", Error = "errors.log")
cmd <- paste(path, myFile, sep = .Platform$file.sep)
assign(myVar, cmd, envir = .pkg.env)
}
.h2o.__logIt <- function(m, tmp, commandOrErr, isPost = TRUE) {
# m is a url if commandOrErr == "Command"
if(is.null(tmp) || is.null(get("tmp"))) s <- m
else {
tmp <- get("tmp"); nams = names(tmp)
if(length(nams) != length(tmp)) {
if (is.null(nams) && commandOrErr != "Command") nams = "[WARN/ERROR]"
}
s <- rep(" ", max(length(tmp), length(nams)))
for(i in seq_along(tmp)){
s[i] <- paste(nams[i], ": ", tmp[[i]], sep="", collapse = " ")
}
s <- paste(m, "\n", paste(s, collapse = ", "), ifelse(nchar(s) > 0, "\n", ""))
}
# if(commandOrErr != "Command") s <- paste(s, '\n')
h <- format(Sys.time(), format = "%a %b %d %X %Y %Z", tz = "GMT")
if(commandOrErr == "Command")
h <- paste(h, ifelse(isPost, "POST", "GET"), sep = "\n")
s <- paste(h, "\n", s)
myFile <- ifelse(commandOrErr == "Command", .pkg.env$h2o.__LOG_COMMAND, .pkg.env$h2o.__LOG_ERROR)
myDir <- normalizePath(dirname(myFile))
if(!file.exists(myDir)) stop(myDir, " directory does not exist")
write(s, file = myFile, append = TRUE)
}
# Internal functions & declarations
.h2o.__PAGE_CANCEL = "Cancel.json"
.h2o.__PAGE_CLOUD = "Cloud.json"
.h2o.__PAGE_GET = "GetVector.json"
.h2o.__PAGE_EXPORTHDFS = "ExportHdfs.json"
.h2o.__PAGE_INSPECT = "Inspect.json"
.h2o.__PAGE_JOBS = "Jobs.json"
.h2o.__PAGE_PARSE = "Parse.json"
.h2o.__PAGE_PUT = "PutVector.json"
.h2o.__PAGE_REMOVE = "Remove.json"
.h2o.__PAGE_REMOVEALL = "2/RemoveAll.json"
.h2o.__PAGE_SUMMARY = "SummaryPage.json"
.h2o.__PAGE_SHUTDOWN = "Shutdown.json"
.h2o.__PAGE_VIEWALL = "StoreView.json"
.h2o.__DOWNLOAD_LOGS = "LogDownload.json"
.h2o.__PAGE_EXEC2 = "2/Exec2.json"
.h2o.__PAGE_IMPORTFILES2 = "2/ImportFiles2.json"
.h2o.__PAGE_EXPORTFILES = "2/ExportFiles.json"
.h2o.__PAGE_INSPECT2 = "2/Inspect2.json"
.h2o.__PAGE_PARSE2 = "2/Parse2.json"
.h2o.__PAGE_PREDICT2 = "2/Predict.json"
.h2o.__PAGE_SUMMARY2 = "2/SummaryPage2.json"
.h2o.__PAGE_LOG_AND_ECHO = "2/LogAndEcho.json"
.h2o.__HACK_LEVELS2 = "2/Levels2.json"
.h2o.__HACK_SETCOLNAMES2 = "2/SetColumnNames2.json"
.h2o.__PAGE_CONFUSION = "2/ConfusionMatrix.json"
.h2o.__PAGE_AUC = "2/AUC.json"
.h2o.__PAGE_HITRATIO = "2/HitRatio.json"
.h2o.__PAGE_GAPSTAT = "2/GapStatistic.json"
.h2o.__PAGE_GAPSTATVIEW = "2/GapStatisticModelView.json"
.h2o.__PAGE_QUANTILES = "2/QuantilesPage.json"
.h2o.__PAGE_DRF = "2/DRF.json"
.h2o.__PAGE_DRFProgress = "2/DRFProgressPage.json"
.h2o.__PAGE_DRFModelView = "2/DRFModelView.json"
.h2o.__PAGE_GBM = "2/GBM.json"
.h2o.__PAGE_GBMProgress = "2/GBMProgressPage.json"
.h2o.__PAGE_GRIDSEARCH = "2/GridSearchProgress.json"
.h2o.__PAGE_GBMModelView = "2/GBMModelView.json"
.h2o.__PAGE_GLM2 = "2/GLM2.json"
.h2o.__PAGE_GLM2Progress = "2/GLMProgress.json"
.h2o.__PAGE_GLMModelView = "2/GLMModelView.json"
.h2o.__PAGE_GLMValidView = "2/GLMValidationView.json"
.h2o.__PAGE_GLM2GridView = "2/GLMGridView.json"
.h2o.__PAGE_KMEANS2 = "2/KMeans2.json"
.h2o.__PAGE_KM2Progress = "2/KMeans2Progress.json"
.h2o.__PAGE_KM2ModelView = "2/KMeans2ModelView.json"
.h2o.__PAGE_DeepLearning = "2/DeepLearning.json"
.h2o.__PAGE_DeepLearningProgress = "2/DeepLearningProgressPage.json"
.h2o.__PAGE_DeepLearningModelView = "2/DeepLearningModelView.json"
.h2o.__PAGE_PCA = "2/PCA.json"
.h2o.__PAGE_PCASCORE = "2/PCAScore.json"
.h2o.__PAGE_PCAProgress = "2/PCAProgressPage.json"
.h2o.__PAGE_PCAModelView = "2/PCAModelView.json"
.h2o.__PAGE_SpeeDRF = "2/SpeeDRF.json"
.h2o.__PAGE_SpeeDRFProgress = "2/SpeeDRFProgressPage.json"
.h2o.__PAGE_SpeeDRFModelView = "2/SpeeDRFModelView.json"
.h2o.__PAGE_BAYES = "2/NaiveBayes.json"
.h2o.__PAGE_NBProgress = "2/NBProgressPage.json"
.h2o.__PAGE_NBModelView = "2/NBModelView.json"
.h2o.__PAGE_CreateFrame = "2/CreateFrame.json"
# client -- Connection object returned from h2o.init().
# page -- URL to access within the H2O server.
# parms -- List of parameters to send to the server.
.h2o.__remoteSendWithParms <- function(client, page, parms) {
cmd = ".h2o.__remoteSend(client, page"
for (i in 1:length(parms)) {
thisparmname = names(parms)[i]
cmd = sprintf("%s, %s=parms$%s", cmd, thisparmname, thisparmname)
}
cmd = sprintf("%s)", cmd)
#cat(sprintf("TOM: cmd is %s\n", cmd))
rv = eval(parse(text=cmd))
return(rv)
}
.h2o.__remoteSend <- function(client, page, ...) {
.h2o.__checkClientHealth(client)
ip = client@ip
port = client@port
myURL = paste("http://", ip, ":", port, "/", page, sep="")
# Sends the given arguments as URL arguments to the given page on the specified server
#
# Re-enable POST since we found the bug in NanoHTTPD which was causing POST
# payloads to be dropped.
#
if(.pkg.env$IS_LOGGING) {
# Log list of parameters sent to H2O
.h2o.__logIt(myURL, list(...), "Command")
hg = basicHeaderGatherer()
tg = basicTextGatherer()
postForm(myURL, style = "POST", .opts = curlOptions(headerfunction = hg$update, writefunc = tg[[1]]), ...)
temp = tg$value()
# Log HTTP response from H2O
hh <- hg$value()
s <- paste(hh["Date"], "\nHTTP status code: ", hh["status"], "\n ", temp, sep = "")
s <- paste(s, "\n\n------------------------------------------------------------------\n")
cmdDir <- normalizePath(dirname(.pkg.env$h2o.__LOG_COMMAND))
if(!file.exists(cmdDir)) stop(cmdDir, " directory does not exist")
write(s, file = .pkg.env$h2o.__LOG_COMMAND, append = TRUE)
} else
temp = postForm(myURL, style = "POST", ...)
# The GET code that we used temporarily while NanoHTTPD POST was known to be busted.
#
#if(length(list(...)) == 0)
# temp = getURLContent(myURL)
#else
# temp = getForm(myURL, ..., .checkParams = FALSE) # Some H2O params overlap with Curl params
# after = gsub("\\\\\\\"NaN\\\\\\\"", "NaN", temp[1])
# after = gsub("NaN", '"NaN"', after)
after = gsub('"Infinity"', '"Inf"', temp[1])
after = gsub('"-Infinity"', '"-Inf"', after)
res = fromJSON(after)
if(!is.null(res$error)) {
if(.pkg.env$IS_LOGGING) .h2o.__writeToFile(res, .pkg.env$h2o.__LOG_ERROR)
stop(paste(myURL," returned the following error:\n", .h2o.__formatError(res$error)))
}
res
}
.h2o.__cloudSick <- function(node_name = NULL, client) {
url <- paste("http://", client@ip, ":", client@port, "/Cloud.html", sep = "")
m1 <- "Attempting to execute action on an unhealthy cluster!\n"
m2 <- ifelse(node_name != NULL, paste("The sick node is identified to be: ", node_name, "\n", sep = "", collapse = ""), "")
m3 <- paste("Check cloud status here: ", url, sep = "", collapse = "")
m <- paste(m1, m2, "\n", m3, sep = "")
stop(m)
}
.h2o.__checkClientHealth <- function(client) {
grabCloudStatus <- function(client) {
ip <- client@ip
port <- client@port
url <- paste("http://", ip, ":", port, "/", .h2o.__PAGE_CLOUD, sep = "")
if(!url.exists(url)) stop(paste("H2O connection has been severed. Instance no longer up at address ", ip, ":", port, "/", sep = "", collapse = ""))
fromJSON(getURLContent(url))
}
checker <- function(node, client) {
status <- node$node_healthy
elapsed <- node$elapsed_time
nport <- unlist(strsplit(node$name, ":"))[2]
if(!status) .h2o.__cloudSick(node_name = node$name, client = client)
if(elapsed > 45000) .h2o.__cloudSick(node_name = NULL, client = client)
if(elapsed > 10000) {
Sys.sleep(5)
lapply(grabCloudStatus(client)$nodes, checker, client)
}
return(0)
}
cloudStatus <- grabCloudStatus(client)
if(!cloudStatus$cloud_healthy) .h2o.__cloudSick(node_name = NULL, client = client)
lapply(cloudStatus$nodes, checker, client)
return(0)
}
#------------------------------------ Job Polling ------------------------------------#
.h2o.__poll <- function(client, keyName) {
if(missing(client)) stop("client is missing!")
if(class(client) != "H2OClient") stop("client must be a H2OClient object")
if(missing(keyName)) stop("keyName is missing!")
if(!is.character(keyName) || nchar(keyName) == 0) stop("keyName must be a non-empty string")
res = .h2o.__remoteSend(client, .h2o.__PAGE_JOBS)
res = res$jobs
if(length(res) == 0) stop("No jobs found in queue")
prog = NULL
for(i in 1:length(res)) {
if(res[[i]]$key == keyName)
prog = res[[i]]
}
if(is.null(prog)) stop("Job key ", keyName, " not found in job queue")
# if(prog$end_time == -1 || prog$progress == -2.0) stop("Job key ", keyName, " has been cancelled")
if(!is.null(prog$result$val) && prog$result$val == "CANCELLED") stop("Job key ", keyName, " was cancelled by user")
else if(!is.null(prog$result$exception) && prog$result$exception == 1) stop(prog$result$val)
prog$progress
}
.h2o.__allDone <- function(client) {
res = .h2o.__remoteSend(client, .h2o.__PAGE_JOBS)
notDone = lapply(res$jobs, function(x) { !(x$progress == -1.0 || x$cancelled) })
!any(unlist(notDone))
}
.h2o.__pollAll <- function(client, timeout) {
start = Sys.time()
while(!.h2o.__allDone(client)) {
Sys.sleep(1)
if(as.numeric(difftime(Sys.time(), start)) > timeout)
stop("Timeout reached! Check if any jobs have frozen in H2O.")
}
}
.h2o.__waitOnJob <- function(client, job_key, pollInterval = 1, progressBar = TRUE) {
if(!is.character(job_key) || nchar(job_key) == 0) stop("job_key must be a non-empty string")
if(progressBar) {
pb = txtProgressBar(style = 3)
tryCatch(while((prog = .h2o.__poll(client, job_key)) != -1) { Sys.sleep(pollInterval); setTxtProgressBar(pb, prog) },
error = function(e) { cat("\nPolling fails:\n"); print(e) },
finally = .h2o.__cancelJob(client, job_key))
setTxtProgressBar(pb, 1.0); close(pb)
} else
tryCatch(while(.h2o.__poll(client, job_key) != -1) { Sys.sleep(pollInterval) },
finally = .h2o.__cancelJob(client, job_key))
}
# For checking progress from each algorithm's progress page (no longer used)
# .h2o.__isDone <- function(client, algo, resH) {
# if(!algo %in% c("GBM", "KM", "RF1", "RF2", "DeepLearning", "GLM1", "GLM2", "GLM1Grid", "PCA")) stop(algo, " is not a supported algorithm")
# version = ifelse(algo %in% c("RF1", "GLM1", "GLM1Grid"), 1, 2)
# page = switch(algo, GBM = .h2o.__PAGE_GBMProgress, KM = .h2o.__PAGE_KM2Progress, RF1 = .h2o.__PAGE_RFVIEW,
# RF2 = .h2o.__PAGE_DRFProgress, DeepLearning = .h2o.__PAGE_DeepLearningProgress, GLM1 = .h2o.__PAGE_GLMProgress,
# GLM1Grid = .h2o.__PAGE_GLMGridProgress, GLM2 = .h2o.__PAGE_GLM2Progress, PCA = .h2o.__PAGE_PCAProgress)
#
# if(version == 1) {
# job_key = resH$response$redirect_request_args$job
# dest_key = resH$destination_key
# if(algo == "RF1")
# res = .h2o.__remoteSend(client, page, model_key = dest_key, data_key = resH$data_key, response_variable = resH$response$redirect_request_args$response_variable)
# else
# res = .h2o.__remoteSend(client, page, job = job_key, destination_key = dest_key)
# if(res$response$status == "error") stop(res$error)
# res$response$status != "poll"
# } else {
# job_key = resH$job_key; dest_key = resH$destination_key
# res = .h2o.__remoteSend(client, page, job_key = job_key, destination_key = dest_key)
# if(res$response_info$status == "error") stop(res$error)
#
# if(!is.null(res$response_info$redirect_url)) {
# ind = regexpr("\\?", res$response_info$redirect_url)[1]
# url = ifelse(ind > 1, substr(res$response_info$redirect_url, 1, ind-1), res$response_info$redirect_url)
# !(res$response_info$status == "poll" || (res$response_info$status == "redirect" && url == page))
# } else
# res$response_info$status == "done"
# }
# }
.h2o.__cancelJob <- function(client, keyName) {
res = .h2o.__remoteSend(client, .h2o.__PAGE_JOBS)
res = res$jobs
if(length(res) == 0) stop("No jobs found in queue")
prog = NULL
for(i in 1:length(res)) {
if(res[[i]]$key == keyName) {
prog = res[[i]]; break
}
}
if(is.null(prog)) stop("Job key ", keyName, " not found in job queue")
if(!(prog$cancelled || prog$progress == -1.0 || prog$progress == -2.0 || prog$end_time == -1)) {
.h2o.__remoteSend(client, .h2o.__PAGE_CANCEL, key=keyName)
cat("Job key", keyName, "was cancelled by user\n")
}
}
#------------------------------------ Exec2 ------------------------------------#
.h2o.__exec2 <- function(client, expr) {
destKey = paste(.TEMP_KEY, ".", .pkg.env$temp_count, sep="")
.pkg.env$temp_count = (.pkg.env$temp_count + 1) %% .RESULT_MAX
.h2o.__exec2_dest_key(client, expr, destKey)
# .h2o.__exec2_dest_key(client, expr, .TEMP_KEY)
}
.h2o.__exec2_dest_key <- function(client, expr, destKey) {
type = tryCatch({ typeof(expr) }, error = function(e) { "expr" })
if (type != "character")
expr = deparse(substitute(expr))
expr = paste(destKey, "=", expr)
res = .h2o.__remoteSend(client, .h2o.__PAGE_EXEC2, str=expr)
if(!is.null(res$response$status) && res$response$status == "error") stop("H2O returned an error!")
res$dest_key = destKey
return(res)
}
#'
#' Check for assignment with `<-` or `=`
#'
.isAssignment<-
function(expr) {
if (identical(expr, quote(`<-`)) || identical(expr, quote(`=`))) return(TRUE)
return(FALSE)
}
#'
#' Get the class of the object from the envir.
#'
#' The environment is the parent frame (i.e. wherever h2o.exec is called from)
.eval_class<-
function(i, envir) {
val <- tryCatch(class(get(as.character(i), envir)), error = function(e) {
tryCatch(class(i), error = function(e) {
return(NA)
})
})
}
#'
#' Helper function to recursively unfurl an expression into a list of statements/exprs/calls/names.
#'
.as_list<-
function(expr) {
if (is.call(expr)) {
return(lapply(as.list(expr), .as_list))
}
return(expr)
}
#'
#' Cast the expression list back to a call.
#'
.back_to_expr<-
function(some_expr_list) {
len <- length(some_expr_list)
while(len > 1) {
num_sub_lists <- 0
if (length(some_expr_list[[len]]) == 1) {
num_sub_lists <- 1
} else {
num_sub_lists <- length(unlist(some_expr_list[[len]])) / length(some_expr_list[[len]])
}
if (num_sub_lists > 1) {
some_expr_list[[len]] <- .back_to_expr(some_expr_list[[len]])
} else if (is.atomic(some_expr_list[[len]]) || is.name(some_expr_list[[len]])) {
some_expr_list[[len]] <- some_expr_list[[len]]
} else {
some_expr_list[[len]] <- as.call(some_expr_list[[len]])
}
len <- len - 1
}
return(as.call(some_expr_list))
}
#'
#' Swap the variable with the key.
#'
#' Once there's a key, set its columns to the COLNAMES variable in the .pkg.env (used by .get_col_id)
.swap_with_key<-
function(object, envir) {
assign("SERVER", get(as.character(object), envir = envir)@h2o, envir = .pkg.env)
if ( !exists("COLNAMES", .pkg.env)) {
assign("COLNAMES", colnames(get(as.character(object), envir = envir)), .pkg.env)
}
object <- as.name(get(as.character(object), envir = envir)@key)
return(object)
}
#'
#' Does the column id getting
#'
.get_col_id<-
function(ch, envir) {
which(ch == .pkg.env$COLNAMES)
}
#'
#' Swap the column name with its index in the (H2O) data frame
#'
#' Calls .get_col_id to probe a variable in the .pkg.env environment
.swap_with_colid<-
function(object, envir) {
object <- .get_col_id(as.character(object), envir)
}
#'
#' Actually do the replacing of variable/column name with the h2o key names / indices
.replace_all<-
function(a_list, envir) {
# Check if there is H2OParsedData object to sub out, grab their indices.
idxs <- which( "H2OParsedData" == unlist(lapply(a_list, .eval_class, envir)))
# Check if there are column names to sub out, grab their indices.
idx2 <- which( "character" == unlist(lapply(a_list, .eval_class, envir)))
# If nothing to sub, return
if (length(idxs) == 0 && length(idx2) == 0) return(a_list)
# Swap out keys
if (length(idxs) != 0) {
for (i in idxs) {
if(length(a_list) == 1) {
a_list <- .swap_with_key(a_list, envir)
} else {
a_list[[i]] <- .swap_with_key(a_list[[i]], envir)
}
}
}
# Swap out column names with indices
if (length(idx2) != 0) {
for (i in idx2) {
if (length(a_list) == 1) {
a_list <- .swap_with_colid(a_list, envir)
} else {
a_list[[i]] <- .swap_with_colid(a_list[[i]], envir)
}
}
}
return(a_list)
}
#'
#' Replace the R variable with a H2O key name.
#' Replace column names with their indices.
.replace_with_keys_helper<-
function(some_expr_list, envir) {
#Loop over the length of the list
len <- length(some_expr_list)
i <- 1
while(i <= len) {
# Check if there are sub lists and recurse them
num_sub_lists <- 0
if (length(some_expr_list[[i]]) == 1) {
num_sub_lists <- 1
} else {
num_sub_lists <- length(unlist(some_expr_list[[i]])) / length(some_expr_list[[i]])
}
if (num_sub_lists > 1) {
# recurse on the sublist
some_expr_list[[i]] <- .replace_with_keys_helper(some_expr_list[[i]], envir)
} else {
# replace the item in the list with the key name (or column index)
some_expr_list[[i]] <- .replace_all(some_expr_list[[i]], envir)
}
i <- i + 1
}
return(some_expr_list)
}
#'
#' Front-end work for h2o.exec
#'
#' Discover the destination key (if there is one), the client, and sub in the actual key name for the R variable
#' that contains the pointer to the key in H2O.
.replace_with_keys<-
function(expr, envir = globalenv()) {
dest_key <- ""
# Is this an assignment?
if ( .isAssignment(as.list(expr)[[1]])) {
# The destination key is the name that's being assigned to (covers both `<-` and `=`)
dest_key <- as.character(as.list(expr)[[2]])
# Don't bother with the assignment anymore, discard it and iterate down the RHS.
expr <- as.list(expr)[[3]]
}
# Assign the dest_key if one was found to the .pkg.env for later use.
assign("DESTKEY", dest_key, envir = .pkg.env)
# list-ify the expression
l <- lapply(as.list(expr), .as_list)
# replace any R variable names with the key name in the cloud, also handles column names passed as strings
l <- .replace_with_keys_helper(l, envir)
# return the modified expression
as.name(as.character(as.expression(.back_to_expr(l))))
}
.h2o.__unop2 <- function(op, x) {
if(missing(x)) stop("Must specify data set")
if(class(x) != "H2OParsedData") stop(cat("\nData must be an H2O data set. Got ", class(x), "\n"))
expr = paste(op, "(", x@key, ")", sep = "")
res = .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
return(ifelse(op %in% .LOGICAL_OPERATORS, as.logical(res$scalar), res$scalar))
if(op %in% .LOGICAL_OPERATORS)
new("H2OParsedData", h2o=x@h2o, key=res$dest_key, logic=TRUE)
else
new("H2OParsedData", h2o=x@h2o, key=res$dest_key, logic=FALSE)
}
.h2o.__binop2 <- function(op, x, y) {
if(class(x) != "H2OParsedData" && length(x) != 1) stop("Unimplemented: x must be a scalar value")
if(class(y) != "H2OParsedData" && length(y) != 1) stop("Unimplemented: y must be a scalar value")
# if(!((ncol(x) == 1 || class(x) == "numeric") && (ncol(y) == 1 || class(y) == "numeric")))
# stop("Can only operate on single column vectors")
LHS = ifelse(class(x) == "H2OParsedData", x@key, x)
if((class(x) == "H2OParsedData" || class(y) == "H2OParsedData") && !( op %in% c('==', '!='))) {
anyFactorsX <- .h2o.__checkForFactors(x)
anyFactorsY <- .h2o.__checkForFactors(y)
anyFactors <- any(c(anyFactorsX, anyFactorsY))
if(anyFactors) warning("Operation not meaningful for factors.")
}
RHS = ifelse(class(y) == "H2OParsedData", y@key, y)
expr = paste(LHS, op, RHS)
if(class(x) == "H2OParsedData") myClient = x@h2o
else myClient = y@h2o
res = .h2o.__exec2(myClient, expr)
if(res$num_rows == 0 && res$num_cols == 0)
return(ifelse(op %in% .LOGICAL_OPERATORS, as.logical(res$scalar), res$scalar))
if(op %in% .LOGICAL_OPERATORS)
new("H2OParsedData", h2o=myClient, key=res$dest_key, logic=TRUE)
else
new("H2OParsedData", h2o=myClient, key=res$dest_key, logic=FALSE)
}
#------------------------------------ Utilities ------------------------------------#
.h2o.__writeToFile <- function(res, fileName) {
formatVector = function(vec) {
result = rep(" ", length(vec))
nams = names(vec)
for(i in 1:length(vec))
result[i] = paste(nams[i], ": ", vec[i], sep="")
paste(result, collapse="\n")
}
cat("Writing JSON response to", fileName, "\n")
temp = strsplit(as.character(Sys.time()), " ")[[1]]
# myDate = gsub("-", "", temp[1]); myTime = gsub(":", "", temp[2])
write(paste(temp[1], temp[2], '\t', formatVector(unlist(res))), file = fileName, append = TRUE)
# writeLines(unlist(lapply(res$response, paste, collapse=" ")), fileConn)
}
.h2o.__formatError <- function(error,prefix=" ") {
result = ""
items = strsplit(as.character(error),"\n")[[1]];
for (i in 1:length(items))
result = paste(result,prefix,items[i],"\n",sep="")
result
}
.h2o.__uniqID <- function(prefix = "") {
hex_digits <- c(as.character(0:9), letters[1:6])
y_digits <- hex_digits[9:12]
temp = paste(
paste(sample(hex_digits, 8, replace=TRUE), collapse='', sep=''),
paste(sample(hex_digits, 4, replace=TRUE), collapse='', sep=''),
paste('4', paste(sample(hex_digits, 3, replace=TRUE), collapse='', sep=''), collapse='', sep=''),
paste(sample(y_digits,1), paste(sample(hex_digits, 3, replace=TRUE), collapse='', sep=''), collapse='', sep = ''),
paste(sample(hex_digits, 12, replace=TRUE), collapse='', sep=''), sep='-')
temp = gsub("-", "", temp)
paste(prefix, temp, sep="_")
}
# Check if key_env$key exists in H2O and remove if it does
# .h2o.__finalizer <- function(key_env) {
# if("h2o" %in% ls(key_env) && "key" %in% ls(key_env) && class(key_env$h2o) == "H2OClient" && class(key_env$key) == "character" && key_env$key != "") {
# res = .h2o.__remoteSend(key_env$h2o, .h2o.__PAGE_VIEWALL, filter=key_env$key)
# if(length(res$keys) != 0)
# .h2o.__remoteSend(key_env$h2o, .h2o.__PAGE_REMOVE, key=key_env$key)
# }
# }
.h2o.__checkForFactors <- function(object) {
if(class(object) != "H2OParsedData") return(FALSE)
h2o.anyFactor(object)
}
.h2o.__version <- function(client) {
res = .h2o.__remoteSend(client, .h2o.__PAGE_CLOUD)
res$version
}
.h2o.__getFamily <- function(family, link, tweedie.var.p = 0, tweedie.link.p = 1-tweedie.var.p) {
if(family == "tweedie")
return(tweedie(var.power = tweedie.var.p, link.power = tweedie.link.p))
if(missing(link)) {
switch(family,
gaussian = gaussian(),
binomial = binomial(),
poisson = poisson(),
gamma = gamma())
} else {
switch(family,
gaussian = gaussian(link),
binomial = binomial(link),
poisson = poisson(link),
gamma = gamma(link))
}
}
|
/R/h2o-package/R/Internal.R
|
permissive
|
SantiagoPOPO/h2o
|
R
| false
| false
| 25,887
|
r
|
# Hack to get around Exec.json always dumping to same Result.hex key
# TODO: Need better way to manage temporary/intermediate values in calculations! Right now, overwriting occurs silently
.pkg.env = new.env()
.pkg.env$result_count = 0
.pkg.env$temp_count = 0
.pkg.env$IS_LOGGING = FALSE
.TEMP_KEY = "Last.value"
.RESULT_MAX = 1000
.MAX_INSPECT_ROW_VIEW = 10000
.MAX_INSPECT_COL_VIEW = 10000
.LOGICAL_OPERATORS = c("==", ">", "<", "!=", ">=", "<=", "&", "|", "&&", "||", "!", "is.na")
# Initialize functions for R logging
.myPath = paste(Sys.getenv("HOME"), "Library", "Application Support", "h2o", sep=.Platform$file.sep)
if(.Platform$OS.type == "windows")
.myPath = paste(Sys.getenv("APPDATA"), "h2o", sep=.Platform$file.sep)
.pkg.env$h2o.__LOG_COMMAND = paste(.myPath, "commands.log", sep=.Platform$file.sep)
.pkg.env$h2o.__LOG_ERROR = paste(.myPath, "errors.log", sep=.Platform$file.sep)
h2o.startLogging <- function() {
cmdDir <- normalizePath(dirname(.pkg.env$h2o.__LOG_COMMAND))
errDir <- normalizePath(dirname(.pkg.env$h2o.__LOG_ERROR))
if(!file.exists(cmdDir)) {
warning(cmdDir, " directory does not exist. Creating it now...")
dir.create(cmdDir, recursive = TRUE)
}
if(!file.exists(errDir)) {
warning(errDir, " directory does not exist. Creating it now...")
dir.create(errDir, recursive = TRUE)
}
cat("Appending to log file", .pkg.env$h2o.__LOG_COMMAND, "\n")
cat("Appending to log file", .pkg.env$h2o.__LOG_ERROR, "\n")
assign("IS_LOGGING", TRUE, envir = .pkg.env)
}
h2o.stopLogging <- function() { cat("Logging stopped"); assign("IS_LOGGING", FALSE, envir = .pkg.env) }
h2o.clearLogs <- function() { file.remove(.pkg.env$h2o.__LOG_COMMAND)
file.remove(.pkg.env$h2o.__LOG_ERROR) }
h2o.getLogPath <- function(type) {
if(missing(type) || !type %in% c("Command", "Error"))
stop("type must be either 'Command' or 'Error'")
switch(type, Command = .pkg.env$h2o.__LOG_COMMAND, Error = .pkg.env$h2o.__LOG_ERROR)
}
h2o.openLog <- function(type) {
if(missing(type) || !type %in% c("Command", "Error"))
stop("type must be either 'Command' or 'Error'")
myFile = switch(type, Command = .pkg.env$h2o.__LOG_COMMAND, Error = .pkg.env$h2o.__LOG_ERROR)
if(!file.exists(myFile)) stop(myFile, " does not exist")
myOS = Sys.info()["sysname"]
if(myOS == "Windows") shell.exec(paste("open '", myFile, "'", sep=""))
else system(paste("open '", myFile, "'", sep=""))
}
h2o.setLogPath <- function(path, type) {
if(missing(path) || !is.character(path)) stop("path must be a character string")
if(!file.exists(path)) stop(path, " directory does not exist")
if(missing(type) || !type %in% c("Command", "Error"))
stop("type must be either 'Command' or 'Error'")
myVar = switch(type, Command = "h2o.__LOG_COMMAND", Error = "h2o.__LOG_ERROR")
myFile = switch(type, Command = "commands.log", Error = "errors.log")
cmd <- paste(path, myFile, sep = .Platform$file.sep)
assign(myVar, cmd, envir = .pkg.env)
}
.h2o.__logIt <- function(m, tmp, commandOrErr, isPost = TRUE) {
# m is a url if commandOrErr == "Command"
if(is.null(tmp) || is.null(get("tmp"))) s <- m
else {
tmp <- get("tmp"); nams = names(tmp)
if(length(nams) != length(tmp)) {
if (is.null(nams) && commandOrErr != "Command") nams = "[WARN/ERROR]"
}
s <- rep(" ", max(length(tmp), length(nams)))
for(i in seq_along(tmp)){
s[i] <- paste(nams[i], ": ", tmp[[i]], sep="", collapse = " ")
}
s <- paste(m, "\n", paste(s, collapse = ", "), ifelse(nchar(s) > 0, "\n", ""))
}
# if(commandOrErr != "Command") s <- paste(s, '\n')
h <- format(Sys.time(), format = "%a %b %d %X %Y %Z", tz = "GMT")
if(commandOrErr == "Command")
h <- paste(h, ifelse(isPost, "POST", "GET"), sep = "\n")
s <- paste(h, "\n", s)
myFile <- ifelse(commandOrErr == "Command", .pkg.env$h2o.__LOG_COMMAND, .pkg.env$h2o.__LOG_ERROR)
myDir <- normalizePath(dirname(myFile))
if(!file.exists(myDir)) stop(myDir, " directory does not exist")
write(s, file = myFile, append = TRUE)
}
# Internal functions & declarations
.h2o.__PAGE_CANCEL = "Cancel.json"
.h2o.__PAGE_CLOUD = "Cloud.json"
.h2o.__PAGE_GET = "GetVector.json"
.h2o.__PAGE_EXPORTHDFS = "ExportHdfs.json"
.h2o.__PAGE_INSPECT = "Inspect.json"
.h2o.__PAGE_JOBS = "Jobs.json"
.h2o.__PAGE_PARSE = "Parse.json"
.h2o.__PAGE_PUT = "PutVector.json"
.h2o.__PAGE_REMOVE = "Remove.json"
.h2o.__PAGE_REMOVEALL = "2/RemoveAll.json"
.h2o.__PAGE_SUMMARY = "SummaryPage.json"
.h2o.__PAGE_SHUTDOWN = "Shutdown.json"
.h2o.__PAGE_VIEWALL = "StoreView.json"
.h2o.__DOWNLOAD_LOGS = "LogDownload.json"
.h2o.__PAGE_EXEC2 = "2/Exec2.json"
.h2o.__PAGE_IMPORTFILES2 = "2/ImportFiles2.json"
.h2o.__PAGE_EXPORTFILES = "2/ExportFiles.json"
.h2o.__PAGE_INSPECT2 = "2/Inspect2.json"
.h2o.__PAGE_PARSE2 = "2/Parse2.json"
.h2o.__PAGE_PREDICT2 = "2/Predict.json"
.h2o.__PAGE_SUMMARY2 = "2/SummaryPage2.json"
.h2o.__PAGE_LOG_AND_ECHO = "2/LogAndEcho.json"
.h2o.__HACK_LEVELS2 = "2/Levels2.json"
.h2o.__HACK_SETCOLNAMES2 = "2/SetColumnNames2.json"
.h2o.__PAGE_CONFUSION = "2/ConfusionMatrix.json"
.h2o.__PAGE_AUC = "2/AUC.json"
.h2o.__PAGE_HITRATIO = "2/HitRatio.json"
.h2o.__PAGE_GAPSTAT = "2/GapStatistic.json"
.h2o.__PAGE_GAPSTATVIEW = "2/GapStatisticModelView.json"
.h2o.__PAGE_QUANTILES = "2/QuantilesPage.json"
.h2o.__PAGE_DRF = "2/DRF.json"
.h2o.__PAGE_DRFProgress = "2/DRFProgressPage.json"
.h2o.__PAGE_DRFModelView = "2/DRFModelView.json"
.h2o.__PAGE_GBM = "2/GBM.json"
.h2o.__PAGE_GBMProgress = "2/GBMProgressPage.json"
.h2o.__PAGE_GRIDSEARCH = "2/GridSearchProgress.json"
.h2o.__PAGE_GBMModelView = "2/GBMModelView.json"
.h2o.__PAGE_GLM2 = "2/GLM2.json"
.h2o.__PAGE_GLM2Progress = "2/GLMProgress.json"
.h2o.__PAGE_GLMModelView = "2/GLMModelView.json"
.h2o.__PAGE_GLMValidView = "2/GLMValidationView.json"
.h2o.__PAGE_GLM2GridView = "2/GLMGridView.json"
.h2o.__PAGE_KMEANS2 = "2/KMeans2.json"
.h2o.__PAGE_KM2Progress = "2/KMeans2Progress.json"
.h2o.__PAGE_KM2ModelView = "2/KMeans2ModelView.json"
.h2o.__PAGE_DeepLearning = "2/DeepLearning.json"
.h2o.__PAGE_DeepLearningProgress = "2/DeepLearningProgressPage.json"
.h2o.__PAGE_DeepLearningModelView = "2/DeepLearningModelView.json"
.h2o.__PAGE_PCA = "2/PCA.json"
.h2o.__PAGE_PCASCORE = "2/PCAScore.json"
.h2o.__PAGE_PCAProgress = "2/PCAProgressPage.json"
.h2o.__PAGE_PCAModelView = "2/PCAModelView.json"
.h2o.__PAGE_SpeeDRF = "2/SpeeDRF.json"
.h2o.__PAGE_SpeeDRFProgress = "2/SpeeDRFProgressPage.json"
.h2o.__PAGE_SpeeDRFModelView = "2/SpeeDRFModelView.json"
.h2o.__PAGE_BAYES = "2/NaiveBayes.json"
.h2o.__PAGE_NBProgress = "2/NBProgressPage.json"
.h2o.__PAGE_NBModelView = "2/NBModelView.json"
.h2o.__PAGE_CreateFrame = "2/CreateFrame.json"
# client -- Connection object returned from h2o.init().
# page -- URL to access within the H2O server.
# parms -- List of parameters to send to the server.
.h2o.__remoteSendWithParms <- function(client, page, parms) {
cmd = ".h2o.__remoteSend(client, page"
for (i in 1:length(parms)) {
thisparmname = names(parms)[i]
cmd = sprintf("%s, %s=parms$%s", cmd, thisparmname, thisparmname)
}
cmd = sprintf("%s)", cmd)
#cat(sprintf("TOM: cmd is %s\n", cmd))
rv = eval(parse(text=cmd))
return(rv)
}
.h2o.__remoteSend <- function(client, page, ...) {
.h2o.__checkClientHealth(client)
ip = client@ip
port = client@port
myURL = paste("http://", ip, ":", port, "/", page, sep="")
# Sends the given arguments as URL arguments to the given page on the specified server
#
# Re-enable POST since we found the bug in NanoHTTPD which was causing POST
# payloads to be dropped.
#
if(.pkg.env$IS_LOGGING) {
# Log list of parameters sent to H2O
.h2o.__logIt(myURL, list(...), "Command")
hg = basicHeaderGatherer()
tg = basicTextGatherer()
postForm(myURL, style = "POST", .opts = curlOptions(headerfunction = hg$update, writefunc = tg[[1]]), ...)
temp = tg$value()
# Log HTTP response from H2O
hh <- hg$value()
s <- paste(hh["Date"], "\nHTTP status code: ", hh["status"], "\n ", temp, sep = "")
s <- paste(s, "\n\n------------------------------------------------------------------\n")
cmdDir <- normalizePath(dirname(.pkg.env$h2o.__LOG_COMMAND))
if(!file.exists(cmdDir)) stop(cmdDir, " directory does not exist")
write(s, file = .pkg.env$h2o.__LOG_COMMAND, append = TRUE)
} else
temp = postForm(myURL, style = "POST", ...)
# The GET code that we used temporarily while NanoHTTPD POST was known to be busted.
#
#if(length(list(...)) == 0)
# temp = getURLContent(myURL)
#else
# temp = getForm(myURL, ..., .checkParams = FALSE) # Some H2O params overlap with Curl params
# after = gsub("\\\\\\\"NaN\\\\\\\"", "NaN", temp[1])
# after = gsub("NaN", '"NaN"', after)
after = gsub('"Infinity"', '"Inf"', temp[1])
after = gsub('"-Infinity"', '"-Inf"', after)
res = fromJSON(after)
if(!is.null(res$error)) {
if(.pkg.env$IS_LOGGING) .h2o.__writeToFile(res, .pkg.env$h2o.__LOG_ERROR)
stop(paste(myURL," returned the following error:\n", .h2o.__formatError(res$error)))
}
res
}
.h2o.__cloudSick <- function(node_name = NULL, client) {
url <- paste("http://", client@ip, ":", client@port, "/Cloud.html", sep = "")
m1 <- "Attempting to execute action on an unhealthy cluster!\n"
m2 <- ifelse(node_name != NULL, paste("The sick node is identified to be: ", node_name, "\n", sep = "", collapse = ""), "")
m3 <- paste("Check cloud status here: ", url, sep = "", collapse = "")
m <- paste(m1, m2, "\n", m3, sep = "")
stop(m)
}
.h2o.__checkClientHealth <- function(client) {
grabCloudStatus <- function(client) {
ip <- client@ip
port <- client@port
url <- paste("http://", ip, ":", port, "/", .h2o.__PAGE_CLOUD, sep = "")
if(!url.exists(url)) stop(paste("H2O connection has been severed. Instance no longer up at address ", ip, ":", port, "/", sep = "", collapse = ""))
fromJSON(getURLContent(url))
}
checker <- function(node, client) {
status <- node$node_healthy
elapsed <- node$elapsed_time
nport <- unlist(strsplit(node$name, ":"))[2]
if(!status) .h2o.__cloudSick(node_name = node$name, client = client)
if(elapsed > 45000) .h2o.__cloudSick(node_name = NULL, client = client)
if(elapsed > 10000) {
Sys.sleep(5)
lapply(grabCloudStatus(client)$nodes, checker, client)
}
return(0)
}
cloudStatus <- grabCloudStatus(client)
if(!cloudStatus$cloud_healthy) .h2o.__cloudSick(node_name = NULL, client = client)
lapply(cloudStatus$nodes, checker, client)
return(0)
}
#------------------------------------ Job Polling ------------------------------------#
.h2o.__poll <- function(client, keyName) {
if(missing(client)) stop("client is missing!")
if(class(client) != "H2OClient") stop("client must be a H2OClient object")
if(missing(keyName)) stop("keyName is missing!")
if(!is.character(keyName) || nchar(keyName) == 0) stop("keyName must be a non-empty string")
res = .h2o.__remoteSend(client, .h2o.__PAGE_JOBS)
res = res$jobs
if(length(res) == 0) stop("No jobs found in queue")
prog = NULL
for(i in 1:length(res)) {
if(res[[i]]$key == keyName)
prog = res[[i]]
}
if(is.null(prog)) stop("Job key ", keyName, " not found in job queue")
# if(prog$end_time == -1 || prog$progress == -2.0) stop("Job key ", keyName, " has been cancelled")
if(!is.null(prog$result$val) && prog$result$val == "CANCELLED") stop("Job key ", keyName, " was cancelled by user")
else if(!is.null(prog$result$exception) && prog$result$exception == 1) stop(prog$result$val)
prog$progress
}
.h2o.__allDone <- function(client) {
res = .h2o.__remoteSend(client, .h2o.__PAGE_JOBS)
notDone = lapply(res$jobs, function(x) { !(x$progress == -1.0 || x$cancelled) })
!any(unlist(notDone))
}
.h2o.__pollAll <- function(client, timeout) {
start = Sys.time()
while(!.h2o.__allDone(client)) {
Sys.sleep(1)
if(as.numeric(difftime(Sys.time(), start)) > timeout)
stop("Timeout reached! Check if any jobs have frozen in H2O.")
}
}
.h2o.__waitOnJob <- function(client, job_key, pollInterval = 1, progressBar = TRUE) {
if(!is.character(job_key) || nchar(job_key) == 0) stop("job_key must be a non-empty string")
if(progressBar) {
pb = txtProgressBar(style = 3)
tryCatch(while((prog = .h2o.__poll(client, job_key)) != -1) { Sys.sleep(pollInterval); setTxtProgressBar(pb, prog) },
error = function(e) { cat("\nPolling fails:\n"); print(e) },
finally = .h2o.__cancelJob(client, job_key))
setTxtProgressBar(pb, 1.0); close(pb)
} else
tryCatch(while(.h2o.__poll(client, job_key) != -1) { Sys.sleep(pollInterval) },
finally = .h2o.__cancelJob(client, job_key))
}
# For checking progress from each algorithm's progress page (no longer used)
# .h2o.__isDone <- function(client, algo, resH) {
# if(!algo %in% c("GBM", "KM", "RF1", "RF2", "DeepLearning", "GLM1", "GLM2", "GLM1Grid", "PCA")) stop(algo, " is not a supported algorithm")
# version = ifelse(algo %in% c("RF1", "GLM1", "GLM1Grid"), 1, 2)
# page = switch(algo, GBM = .h2o.__PAGE_GBMProgress, KM = .h2o.__PAGE_KM2Progress, RF1 = .h2o.__PAGE_RFVIEW,
# RF2 = .h2o.__PAGE_DRFProgress, DeepLearning = .h2o.__PAGE_DeepLearningProgress, GLM1 = .h2o.__PAGE_GLMProgress,
# GLM1Grid = .h2o.__PAGE_GLMGridProgress, GLM2 = .h2o.__PAGE_GLM2Progress, PCA = .h2o.__PAGE_PCAProgress)
#
# if(version == 1) {
# job_key = resH$response$redirect_request_args$job
# dest_key = resH$destination_key
# if(algo == "RF1")
# res = .h2o.__remoteSend(client, page, model_key = dest_key, data_key = resH$data_key, response_variable = resH$response$redirect_request_args$response_variable)
# else
# res = .h2o.__remoteSend(client, page, job = job_key, destination_key = dest_key)
# if(res$response$status == "error") stop(res$error)
# res$response$status != "poll"
# } else {
# job_key = resH$job_key; dest_key = resH$destination_key
# res = .h2o.__remoteSend(client, page, job_key = job_key, destination_key = dest_key)
# if(res$response_info$status == "error") stop(res$error)
#
# if(!is.null(res$response_info$redirect_url)) {
# ind = regexpr("\\?", res$response_info$redirect_url)[1]
# url = ifelse(ind > 1, substr(res$response_info$redirect_url, 1, ind-1), res$response_info$redirect_url)
# !(res$response_info$status == "poll" || (res$response_info$status == "redirect" && url == page))
# } else
# res$response_info$status == "done"
# }
# }
.h2o.__cancelJob <- function(client, keyName) {
res = .h2o.__remoteSend(client, .h2o.__PAGE_JOBS)
res = res$jobs
if(length(res) == 0) stop("No jobs found in queue")
prog = NULL
for(i in 1:length(res)) {
if(res[[i]]$key == keyName) {
prog = res[[i]]; break
}
}
if(is.null(prog)) stop("Job key ", keyName, " not found in job queue")
if(!(prog$cancelled || prog$progress == -1.0 || prog$progress == -2.0 || prog$end_time == -1)) {
.h2o.__remoteSend(client, .h2o.__PAGE_CANCEL, key=keyName)
cat("Job key", keyName, "was cancelled by user\n")
}
}
#------------------------------------ Exec2 ------------------------------------#
.h2o.__exec2 <- function(client, expr) {
destKey = paste(.TEMP_KEY, ".", .pkg.env$temp_count, sep="")
.pkg.env$temp_count = (.pkg.env$temp_count + 1) %% .RESULT_MAX
.h2o.__exec2_dest_key(client, expr, destKey)
# .h2o.__exec2_dest_key(client, expr, .TEMP_KEY)
}
.h2o.__exec2_dest_key <- function(client, expr, destKey) {
type = tryCatch({ typeof(expr) }, error = function(e) { "expr" })
if (type != "character")
expr = deparse(substitute(expr))
expr = paste(destKey, "=", expr)
res = .h2o.__remoteSend(client, .h2o.__PAGE_EXEC2, str=expr)
if(!is.null(res$response$status) && res$response$status == "error") stop("H2O returned an error!")
res$dest_key = destKey
return(res)
}
#'
#' Check for assignment with `<-` or `=`
#'
.isAssignment<-
function(expr) {
if (identical(expr, quote(`<-`)) || identical(expr, quote(`=`))) return(TRUE)
return(FALSE)
}
#'
#' Get the class of the object from the envir.
#'
#' The environment is the parent frame (i.e. wherever h2o.exec is called from)
.eval_class<-
function(i, envir) {
val <- tryCatch(class(get(as.character(i), envir)), error = function(e) {
tryCatch(class(i), error = function(e) {
return(NA)
})
})
}
#'
#' Helper function to recursively unfurl an expression into a list of statements/exprs/calls/names.
#'
.as_list<-
function(expr) {
if (is.call(expr)) {
return(lapply(as.list(expr), .as_list))
}
return(expr)
}
#'
#' Cast the expression list back to a call.
#'
.back_to_expr<-
function(some_expr_list) {
len <- length(some_expr_list)
while(len > 1) {
num_sub_lists <- 0
if (length(some_expr_list[[len]]) == 1) {
num_sub_lists <- 1
} else {
num_sub_lists <- length(unlist(some_expr_list[[len]])) / length(some_expr_list[[len]])
}
if (num_sub_lists > 1) {
some_expr_list[[len]] <- .back_to_expr(some_expr_list[[len]])
} else if (is.atomic(some_expr_list[[len]]) || is.name(some_expr_list[[len]])) {
some_expr_list[[len]] <- some_expr_list[[len]]
} else {
some_expr_list[[len]] <- as.call(some_expr_list[[len]])
}
len <- len - 1
}
return(as.call(some_expr_list))
}
#'
#' Swap the variable with the key.
#'
#' Once there's a key, set its columns to the COLNAMES variable in the .pkg.env (used by .get_col_id)
.swap_with_key<-
function(object, envir) {
assign("SERVER", get(as.character(object), envir = envir)@h2o, envir = .pkg.env)
if ( !exists("COLNAMES", .pkg.env)) {
assign("COLNAMES", colnames(get(as.character(object), envir = envir)), .pkg.env)
}
object <- as.name(get(as.character(object), envir = envir)@key)
return(object)
}
#'
#' Does the column id getting
#'
.get_col_id<-
function(ch, envir) {
which(ch == .pkg.env$COLNAMES)
}
#'
#' Swap the column name with its index in the (H2O) data frame
#'
#' Calls .get_col_id to probe a variable in the .pkg.env environment
.swap_with_colid<-
function(object, envir) {
object <- .get_col_id(as.character(object), envir)
}
#'
#' Actually do the replacing of variable/column name with the h2o key names / indices
.replace_all<-
function(a_list, envir) {
# Check if there is H2OParsedData object to sub out, grab their indices.
idxs <- which( "H2OParsedData" == unlist(lapply(a_list, .eval_class, envir)))
# Check if there are column names to sub out, grab their indices.
idx2 <- which( "character" == unlist(lapply(a_list, .eval_class, envir)))
# If nothing to sub, return
if (length(idxs) == 0 && length(idx2) == 0) return(a_list)
# Swap out keys
if (length(idxs) != 0) {
for (i in idxs) {
if(length(a_list) == 1) {
a_list <- .swap_with_key(a_list, envir)
} else {
a_list[[i]] <- .swap_with_key(a_list[[i]], envir)
}
}
}
# Swap out column names with indices
if (length(idx2) != 0) {
for (i in idx2) {
if (length(a_list) == 1) {
a_list <- .swap_with_colid(a_list, envir)
} else {
a_list[[i]] <- .swap_with_colid(a_list[[i]], envir)
}
}
}
return(a_list)
}
#'
#' Replace the R variable with a H2O key name.
#' Replace column names with their indices.
.replace_with_keys_helper<-
function(some_expr_list, envir) {
#Loop over the length of the list
len <- length(some_expr_list)
i <- 1
while(i <= len) {
# Check if there are sub lists and recurse them
num_sub_lists <- 0
if (length(some_expr_list[[i]]) == 1) {
num_sub_lists <- 1
} else {
num_sub_lists <- length(unlist(some_expr_list[[i]])) / length(some_expr_list[[i]])
}
if (num_sub_lists > 1) {
# recurse on the sublist
some_expr_list[[i]] <- .replace_with_keys_helper(some_expr_list[[i]], envir)
} else {
# replace the item in the list with the key name (or column index)
some_expr_list[[i]] <- .replace_all(some_expr_list[[i]], envir)
}
i <- i + 1
}
return(some_expr_list)
}
#'
#' Front-end work for h2o.exec
#'
#' Discover the destination key (if there is one), the client, and sub in the actual key name for the R variable
#' that contains the pointer to the key in H2O.
.replace_with_keys<-
function(expr, envir = globalenv()) {
dest_key <- ""
# Is this an assignment?
if ( .isAssignment(as.list(expr)[[1]])) {
# The destination key is the name that's being assigned to (covers both `<-` and `=`)
dest_key <- as.character(as.list(expr)[[2]])
# Don't bother with the assignment anymore, discard it and iterate down the RHS.
expr <- as.list(expr)[[3]]
}
# Assign the dest_key if one was found to the .pkg.env for later use.
assign("DESTKEY", dest_key, envir = .pkg.env)
# list-ify the expression
l <- lapply(as.list(expr), .as_list)
# replace any R variable names with the key name in the cloud, also handles column names passed as strings
l <- .replace_with_keys_helper(l, envir)
# return the modified expression
as.name(as.character(as.expression(.back_to_expr(l))))
}
.h2o.__unop2 <- function(op, x) {
if(missing(x)) stop("Must specify data set")
if(class(x) != "H2OParsedData") stop(cat("\nData must be an H2O data set. Got ", class(x), "\n"))
expr = paste(op, "(", x@key, ")", sep = "")
res = .h2o.__exec2(x@h2o, expr)
if(res$num_rows == 0 && res$num_cols == 0)
return(ifelse(op %in% .LOGICAL_OPERATORS, as.logical(res$scalar), res$scalar))
if(op %in% .LOGICAL_OPERATORS)
new("H2OParsedData", h2o=x@h2o, key=res$dest_key, logic=TRUE)
else
new("H2OParsedData", h2o=x@h2o, key=res$dest_key, logic=FALSE)
}
.h2o.__binop2 <- function(op, x, y) {
if(class(x) != "H2OParsedData" && length(x) != 1) stop("Unimplemented: x must be a scalar value")
if(class(y) != "H2OParsedData" && length(y) != 1) stop("Unimplemented: y must be a scalar value")
# if(!((ncol(x) == 1 || class(x) == "numeric") && (ncol(y) == 1 || class(y) == "numeric")))
# stop("Can only operate on single column vectors")
LHS = ifelse(class(x) == "H2OParsedData", x@key, x)
if((class(x) == "H2OParsedData" || class(y) == "H2OParsedData") && !( op %in% c('==', '!='))) {
anyFactorsX <- .h2o.__checkForFactors(x)
anyFactorsY <- .h2o.__checkForFactors(y)
anyFactors <- any(c(anyFactorsX, anyFactorsY))
if(anyFactors) warning("Operation not meaningful for factors.")
}
RHS = ifelse(class(y) == "H2OParsedData", y@key, y)
expr = paste(LHS, op, RHS)
if(class(x) == "H2OParsedData") myClient = x@h2o
else myClient = y@h2o
res = .h2o.__exec2(myClient, expr)
if(res$num_rows == 0 && res$num_cols == 0)
return(ifelse(op %in% .LOGICAL_OPERATORS, as.logical(res$scalar), res$scalar))
if(op %in% .LOGICAL_OPERATORS)
new("H2OParsedData", h2o=myClient, key=res$dest_key, logic=TRUE)
else
new("H2OParsedData", h2o=myClient, key=res$dest_key, logic=FALSE)
}
#------------------------------------ Utilities ------------------------------------#
.h2o.__writeToFile <- function(res, fileName) {
formatVector = function(vec) {
result = rep(" ", length(vec))
nams = names(vec)
for(i in 1:length(vec))
result[i] = paste(nams[i], ": ", vec[i], sep="")
paste(result, collapse="\n")
}
cat("Writing JSON response to", fileName, "\n")
temp = strsplit(as.character(Sys.time()), " ")[[1]]
# myDate = gsub("-", "", temp[1]); myTime = gsub(":", "", temp[2])
write(paste(temp[1], temp[2], '\t', formatVector(unlist(res))), file = fileName, append = TRUE)
# writeLines(unlist(lapply(res$response, paste, collapse=" ")), fileConn)
}
.h2o.__formatError <- function(error,prefix=" ") {
result = ""
items = strsplit(as.character(error),"\n")[[1]];
for (i in 1:length(items))
result = paste(result,prefix,items[i],"\n",sep="")
result
}
.h2o.__uniqID <- function(prefix = "") {
hex_digits <- c(as.character(0:9), letters[1:6])
y_digits <- hex_digits[9:12]
temp = paste(
paste(sample(hex_digits, 8, replace=TRUE), collapse='', sep=''),
paste(sample(hex_digits, 4, replace=TRUE), collapse='', sep=''),
paste('4', paste(sample(hex_digits, 3, replace=TRUE), collapse='', sep=''), collapse='', sep=''),
paste(sample(y_digits,1), paste(sample(hex_digits, 3, replace=TRUE), collapse='', sep=''), collapse='', sep = ''),
paste(sample(hex_digits, 12, replace=TRUE), collapse='', sep=''), sep='-')
temp = gsub("-", "", temp)
paste(prefix, temp, sep="_")
}
# Check if key_env$key exists in H2O and remove if it does
# .h2o.__finalizer <- function(key_env) {
# if("h2o" %in% ls(key_env) && "key" %in% ls(key_env) && class(key_env$h2o) == "H2OClient" && class(key_env$key) == "character" && key_env$key != "") {
# res = .h2o.__remoteSend(key_env$h2o, .h2o.__PAGE_VIEWALL, filter=key_env$key)
# if(length(res$keys) != 0)
# .h2o.__remoteSend(key_env$h2o, .h2o.__PAGE_REMOVE, key=key_env$key)
# }
# }
.h2o.__checkForFactors <- function(object) {
if(class(object) != "H2OParsedData") return(FALSE)
h2o.anyFactor(object)
}
.h2o.__version <- function(client) {
res = .h2o.__remoteSend(client, .h2o.__PAGE_CLOUD)
res$version
}
.h2o.__getFamily <- function(family, link, tweedie.var.p = 0, tweedie.link.p = 1-tweedie.var.p) {
if(family == "tweedie")
return(tweedie(var.power = tweedie.var.p, link.power = tweedie.link.p))
if(missing(link)) {
switch(family,
gaussian = gaussian(),
binomial = binomial(),
poisson = poisson(),
gamma = gamma())
} else {
switch(family,
gaussian = gaussian(link),
binomial = binomial(link),
poisson = poisson(link),
gamma = gamma(link))
}
}
|
##########################Project 2-ML Code###################################
## Setting WD ####
setwd("~/Desktop/Intoduction to ML and DM (02450)/Project 2/Project 2-ML")
## Loading packages ####
library(keras)
library(formattable)
library(markdown)
library(tidyverse)
library(caret)
library(dplyr)
library(doFuture)
library(doParallel)
library(earth)
library(gbm)
library(gam)
library(ggplot2)
library(glmnet)
library(grid)
library(gridExtra)
library(hexbin)
library(ipred)
library(labeling)
library(MASS)
library(neuralnet)
library(NeuralNetTools)
library(NeuralNetworkVisualization)
library(nnet)
library(pdp)
library(plotmo)
library(randomForest)
library(ranger)
library(reshape2)
library(rlang)
library(rpart.plot)
library(rsample)
library(shape)
library(splines)
library(xgboost)
library(pROC)
library(caTools)
library(adabag)
library(reshape2)
library(caret)
library(recipes)
library(tidymodels)
library(parsnip)
## Creating data sets and Merging the two datasets ####
math=read.table("Data/math.csv",sep=";",header=TRUE)
port=read.table("Data/port.csv",sep=";",header=TRUE)
port$id<-seq(1:nrow(port))
merged=merge(port,math,by=c("school","sex","age","address","famsize","Pstatus","Medu",
"Fedu","Mjob","Fjob","reason","nursery","internet","guardian"
,"traveltime","studytime","activities","higher","romantic"
,"goout","Dalc","Walc","famrel","freetime","famsup","schoolsup","health"),all.x = TRUE)
#merge by the personal attributes (i.e NOT the course specific attributes)
sum(is.na(merged$G3.y))
##adding indicator variable to portugese data set (1 if enrolled in math else 0)
math_ind<-rep(0,nrow(merged))
for(i in 1:nrow(merged)){
ifelse(is.na(merged$failures.y[i]),math_ind[i]<-0,math_ind[i]<-1)
}
merged$math_ind=math_ind
merged<-merged[order(merged$id),]
port<-merged[,-c(34:40)]
names(port)[28:33]<-c("failures","paid","absences","G1","G2","G3")
sum(port$math_ind) #370
port$math_ind<-as.factor(port$math_ind)
## Structure of the variables####
str(port)
################ Variable transformation ###############################
## Changing integers to factors and ordering factors####
port<-transform(port,
studytime=factor(studytime,labels=c('<2 h','2-5 h','5-10 h','>10 h'),ordered=F),
traveltime=factor(traveltime,labels=c('<15 min','15-30 min','30-60 min','>60 min'),ordered=F),
Fedu=factor(Fedu,labels=c('none','1st-4th grade','5th-9th grade','10th-12th grade','higher'),ordered=F),
Medu=factor(Medu,labels=c('none','1st-4th grade','5th-9th grade','10th-12th grade','higher'),ordered=F),
freetime=factor(freetime,labels=c('very low','low','medium','high','very high'),ordered=F),
goout=factor(goout,labels=c('very low','low','medium','high','very high'),ordered=F),
Dalc=factor(Dalc,labels=c('very low','low','medium','high','very high'),ordered=F),
Walc=factor(Walc,labels=c('very low','low','medium','high','very high'),ordered=F),
health=factor(health,labels=c('very bad','bad','medium','good','very good'),ordered=F),
famrel=factor(famrel,labels=c('very bad','bad','medium','good','very good'),ordered=F)
)
levels(port$famsize)<-c("LE3","GT3")
port$famsize<-factor(port$famsize,ordered=F)
str(port)
## Combining levels for sparse levels##
t1<-data.frame(table(port$Medu))
names(t1)[1]<-"Medu"
t2<-data.frame(table(port$Fedu))
names(t2)[1]<-"Fedu"
grid.arrange(formattable(t1), formattable(t2))
table(port$Dalc)
table(port$Walc)
levels(port$Medu)<-c("None/1st-4th","None/1st-4th","5th-9th","10th-12th","higher")
levels(port$Fedu)<-c("None/1st-4th","None/1st-4th","5th-9th","10th-12th","higher")
levels(port$Dalc)<-c('very low','low','medium','high/very high','high/very high')
levels(port$Walc)<-c('very low','low','medium','high/very high','high/very high')
str(port)
###########Regression part a ##############
options(dplyr.print_max = 1e9)
port$intercept<-rep(1,nrow(port)) #including intercept term
### Cross-Validation
train<-port
cv_folds <- rsample::vfold_cv(train, v = 5)
## Recipe
rec_1<-recipe(G3 ~ ., data = train) %>%
step_normalize(all_numeric(),- c(all_outcomes(),intercept))%>%
step_dummy(names(Filter(is.factor, port)))
baked_data_1<-bake(prep(rec_1),new_data = train)
## Fitting a reg.regression model tuning lambda##
RegReg <- function(rec,cv_folds,x){
glmnet_spec <- parsnip::linear_reg(
penalty = tune(),
mixture=0.5
) %>%
set_engine("glmnet")
lmbda_mixtr_grid <- grid_regular(
penalty(c(-3,0)),
levels = 50
)
wf <- workflow() %>%
add_recipe(rec) %>%
add_model(glmnet_spec)
doParallel::registerDoParallel()
model_tuned <- tune::tune_grid(
wf %>% update_model(glmnet_spec),
resamples = cv_folds,
grid = lmbda_mixtr_grid,
metrics = yardstick::metric_set(rmse)
)
plot<-model_tuned %>%
collect_metrics() %>%
ggplot(aes(penalty, mean, color = .metric)) +
geom_point()+
facet_wrap(~.metric, scales = "free", nrow = 2) +
scale_x_log10() +
theme(legend.position = "none")
lowest_rmse <- model_tuned %>%
select_best("rmse", maximize = FALSE) #select the model with the lowest RMSE
final_model <- finalize_workflow( #Define the best fit model
wf %>% update_model(glmnet_spec),
lowest_rmse
)
RMSE_best<-show_best(model_tuned, "rmse", n = 1) #Cross-Validation RMSE for best tuned model
coef<-final_model %>%
fit(x) %>%
pull_workflow_fit() %>%
tidy()%>%
print(n = Inf)%>%filter(estimate!=0)
return(list(RMSE_best,final_model))
}
RegReg(rec=rec_2,cv_folds=cv_folds,x=train)
#### Regression part b ####
##CV algorithm for Neural network algorithm ####
crossvalidation <- function(model,data,size) {
n <- nrow(data)
group <- sample(rep(1:2, length.out = n))
err <- list()
for(i in 1:5){
d1 <- data[group != i, ]
d2 <- data[group == i, ]
m <- model(d1,size)
p <- predict(m, d2)
err[i] <- list((p - d2$y_train)^2)
}
mean(unlist(err))
}
##Two-layer Cross-validation####
X<-port[-33]
y<-port$G3
N = nrow(X)
M = ncol(X)
K=5
y_true = matrix(, 0,1)
yhat = matrix(, 0,3)
r = matrix(, 0,3)
lambda=matrix(, 0,1)
h_unit=matrix(, 0,1)
## set the seed to make your partition reproducible
set.seed(123)
CV <- cvFolds(N, K=K)
for(k in 1:K){ # For each outer fold
print(paste('Crossvalidation fold ', k, '/', CV$NumTestSets, sep=''))
# Extract training and test set
X_train <- X[CV$subsets[CV$which!=k], ];
y_train <- y[CV$subsets[CV$which!=k]];
X_test <- X[CV$subsets[CV$which==k], ];
y_test <- y[CV$subsets[CV$which==k]];
Xdatframe_train <- X_train
Xdatframe_test = X_test
##Reg.Regression and baseline##
data_train<-data.frame(cbind(y_train,Xdatframe_train))
rec<-recipe(y_train ~ ., data = data_train) %>%
step_normalize(all_numeric(),- c(all_outcomes(),intercept))%>% #standardize and dummy encode recipe
step_dummy(names(Filter(is.factor,data_train)))
cv_folds_new <- rsample::vfold_cv(data_train, v = 5)
best_fit_A<-RegReg(rec=rec,cv_folds=cv_folds_new,x=data_train) #lin.reg function from part a) using CV (inner fold)
opt_A<-best_fit_A[[2]]$fit$actions$model$spec[[1]]$penalty[[2]] #Optimal lambda
final_model_A<-glmnet(x=Xdatframe_train,y=y_train,alpha=0.5,standardize=FALSE)
##neural network ##
rec_1<-recipe(y_train ~ ., data =data.frame(cbind(y_train,Xdatframe_train)) ) %>%
step_normalize(all_numeric(),- c(all_outcomes(),intercept))%>% #standardize and dummy encode recipe
step_dummy(names(Filter(is.factor,data.frame(cbind(y_train,Xdatframe_train)))))
dummy_train<-bake(prep(rec_1),Xdatframe_train)
colnames<-colnames(dummy_train)
fmla <- as.formula(paste("y_train ~ ", paste(colnames, collapse= "+")))
n1 <- function(d,sz){nnet(fmla,data=d,size=sz,linout=T)}
n1error<-rep(0,10)
for(i in 1:10){
n1error[i]<-crossvalidation(model=n1, data=cbind(y_train,dummy_train),size=i) #10 different values of h (tuning grid)
}
error<-data.frame(n1error)
opt_C<-which(error==min(error))
final_model_C<-n1(d=dummy_train,sz=opt_C)
##Predictions
yhat_A<-predict(final_model_A,newx=data.matrix(Xdatframe_test),s=opt_A)
yhat_B<-data.frame(rep(mean(y_test),length(y_test)))
rec_2<-recipe(y_test ~ ., data =data.frame(cbind(y_test,Xdatframe_test)) ) %>%
step_normalize(all_numeric(),- c(all_outcomes(),intercept))%>% #standardize and dummy encode recipe
step_dummy(names(Filter(is.factor,data.frame(cbind(y_test,Xdatframe_test)))))
dummy_test<-bake(prep(rec_2),Xdatframe_test)
yhat_C<-predict(final_model_C,data.matrix(dummy_test))
dyhat = cbind(yhat_A,yhat_B,yhat_C)
yhat <- rbind( yhat, dyhat)
dr_A = colMeans( ( yhat_A-y_test )^2)
dr_B=colMeans((yhat_B-y_test)^2)
dr_C=colMeans((yhat_C-y_test)^2)
dr = cbind(dr_A,dr_B,dr_C)
r = rbind(r, dr)
lambda<-rbind(lambda,opt_A)
h_unit<-rbind(h_unit,opt_C)
y_true<-data.frame(y_test)
y_true<- rbind( y_true, y_test)
}
print(list(r,lambda,h_unit))
|
/V2.R
|
no_license
|
AlexLarsen1/Project-2-ML
|
R
| false
| false
| 9,286
|
r
|
##########################Project 2-ML Code###################################
## Setting WD ####
setwd("~/Desktop/Intoduction to ML and DM (02450)/Project 2/Project 2-ML")
## Loading packages ####
library(keras)
library(formattable)
library(markdown)
library(tidyverse)
library(caret)
library(dplyr)
library(doFuture)
library(doParallel)
library(earth)
library(gbm)
library(gam)
library(ggplot2)
library(glmnet)
library(grid)
library(gridExtra)
library(hexbin)
library(ipred)
library(labeling)
library(MASS)
library(neuralnet)
library(NeuralNetTools)
library(NeuralNetworkVisualization)
library(nnet)
library(pdp)
library(plotmo)
library(randomForest)
library(ranger)
library(reshape2)
library(rlang)
library(rpart.plot)
library(rsample)
library(shape)
library(splines)
library(xgboost)
library(pROC)
library(caTools)
library(adabag)
library(reshape2)
library(caret)
library(recipes)
library(tidymodels)
library(parsnip)
## Creating data sets and Merging the two datasets ####
math=read.table("Data/math.csv",sep=";",header=TRUE)
port=read.table("Data/port.csv",sep=";",header=TRUE)
port$id<-seq(1:nrow(port))
merged=merge(port,math,by=c("school","sex","age","address","famsize","Pstatus","Medu",
"Fedu","Mjob","Fjob","reason","nursery","internet","guardian"
,"traveltime","studytime","activities","higher","romantic"
,"goout","Dalc","Walc","famrel","freetime","famsup","schoolsup","health"),all.x = TRUE)
#merge by the personal attributes (i.e NOT the course specific attributes)
sum(is.na(merged$G3.y))
##adding indicator variable to portugese data set (1 if enrolled in math else 0)
math_ind<-rep(0,nrow(merged))
for(i in 1:nrow(merged)){
ifelse(is.na(merged$failures.y[i]),math_ind[i]<-0,math_ind[i]<-1)
}
merged$math_ind=math_ind
merged<-merged[order(merged$id),]
port<-merged[,-c(34:40)]
names(port)[28:33]<-c("failures","paid","absences","G1","G2","G3")
sum(port$math_ind) #370
port$math_ind<-as.factor(port$math_ind)
## Structure of the variables####
str(port)
################ Variable transformation ###############################
## Changing integers to factors and ordering factors####
port<-transform(port,
studytime=factor(studytime,labels=c('<2 h','2-5 h','5-10 h','>10 h'),ordered=F),
traveltime=factor(traveltime,labels=c('<15 min','15-30 min','30-60 min','>60 min'),ordered=F),
Fedu=factor(Fedu,labels=c('none','1st-4th grade','5th-9th grade','10th-12th grade','higher'),ordered=F),
Medu=factor(Medu,labels=c('none','1st-4th grade','5th-9th grade','10th-12th grade','higher'),ordered=F),
freetime=factor(freetime,labels=c('very low','low','medium','high','very high'),ordered=F),
goout=factor(goout,labels=c('very low','low','medium','high','very high'),ordered=F),
Dalc=factor(Dalc,labels=c('very low','low','medium','high','very high'),ordered=F),
Walc=factor(Walc,labels=c('very low','low','medium','high','very high'),ordered=F),
health=factor(health,labels=c('very bad','bad','medium','good','very good'),ordered=F),
famrel=factor(famrel,labels=c('very bad','bad','medium','good','very good'),ordered=F)
)
levels(port$famsize)<-c("LE3","GT3")
port$famsize<-factor(port$famsize,ordered=F)
str(port)
## Combining levels for sparse levels##
t1<-data.frame(table(port$Medu))
names(t1)[1]<-"Medu"
t2<-data.frame(table(port$Fedu))
names(t2)[1]<-"Fedu"
grid.arrange(formattable(t1), formattable(t2))
table(port$Dalc)
table(port$Walc)
levels(port$Medu)<-c("None/1st-4th","None/1st-4th","5th-9th","10th-12th","higher")
levels(port$Fedu)<-c("None/1st-4th","None/1st-4th","5th-9th","10th-12th","higher")
levels(port$Dalc)<-c('very low','low','medium','high/very high','high/very high')
levels(port$Walc)<-c('very low','low','medium','high/very high','high/very high')
str(port)
###########Regression part a ##############
options(dplyr.print_max = 1e9)
port$intercept<-rep(1,nrow(port)) #including intercept term
### Cross-Validation
train<-port
cv_folds <- rsample::vfold_cv(train, v = 5)
## Recipe
rec_1<-recipe(G3 ~ ., data = train) %>%
step_normalize(all_numeric(),- c(all_outcomes(),intercept))%>%
step_dummy(names(Filter(is.factor, port)))
baked_data_1<-bake(prep(rec_1),new_data = train)
## Fitting a reg.regression model tuning lambda##
RegReg <- function(rec,cv_folds,x){
glmnet_spec <- parsnip::linear_reg(
penalty = tune(),
mixture=0.5
) %>%
set_engine("glmnet")
lmbda_mixtr_grid <- grid_regular(
penalty(c(-3,0)),
levels = 50
)
wf <- workflow() %>%
add_recipe(rec) %>%
add_model(glmnet_spec)
doParallel::registerDoParallel()
model_tuned <- tune::tune_grid(
wf %>% update_model(glmnet_spec),
resamples = cv_folds,
grid = lmbda_mixtr_grid,
metrics = yardstick::metric_set(rmse)
)
plot<-model_tuned %>%
collect_metrics() %>%
ggplot(aes(penalty, mean, color = .metric)) +
geom_point()+
facet_wrap(~.metric, scales = "free", nrow = 2) +
scale_x_log10() +
theme(legend.position = "none")
lowest_rmse <- model_tuned %>%
select_best("rmse", maximize = FALSE) #select the model with the lowest RMSE
final_model <- finalize_workflow( #Define the best fit model
wf %>% update_model(glmnet_spec),
lowest_rmse
)
RMSE_best<-show_best(model_tuned, "rmse", n = 1) #Cross-Validation RMSE for best tuned model
coef<-final_model %>%
fit(x) %>%
pull_workflow_fit() %>%
tidy()%>%
print(n = Inf)%>%filter(estimate!=0)
return(list(RMSE_best,final_model))
}
RegReg(rec=rec_2,cv_folds=cv_folds,x=train)
#### Regression part b ####
##CV algorithm for Neural network algorithm ####
crossvalidation <- function(model,data,size) {
n <- nrow(data)
group <- sample(rep(1:2, length.out = n))
err <- list()
for(i in 1:5){
d1 <- data[group != i, ]
d2 <- data[group == i, ]
m <- model(d1,size)
p <- predict(m, d2)
err[i] <- list((p - d2$y_train)^2)
}
mean(unlist(err))
}
##Two-layer Cross-validation####
X<-port[-33]
y<-port$G3
N = nrow(X)
M = ncol(X)
K=5
y_true = matrix(, 0,1)
yhat = matrix(, 0,3)
r = matrix(, 0,3)
lambda=matrix(, 0,1)
h_unit=matrix(, 0,1)
## set the seed to make your partition reproducible
set.seed(123)
CV <- cvFolds(N, K=K)
for(k in 1:K){ # For each outer fold
print(paste('Crossvalidation fold ', k, '/', CV$NumTestSets, sep=''))
# Extract training and test set
X_train <- X[CV$subsets[CV$which!=k], ];
y_train <- y[CV$subsets[CV$which!=k]];
X_test <- X[CV$subsets[CV$which==k], ];
y_test <- y[CV$subsets[CV$which==k]];
Xdatframe_train <- X_train
Xdatframe_test = X_test
##Reg.Regression and baseline##
data_train<-data.frame(cbind(y_train,Xdatframe_train))
rec<-recipe(y_train ~ ., data = data_train) %>%
step_normalize(all_numeric(),- c(all_outcomes(),intercept))%>% #standardize and dummy encode recipe
step_dummy(names(Filter(is.factor,data_train)))
cv_folds_new <- rsample::vfold_cv(data_train, v = 5)
best_fit_A<-RegReg(rec=rec,cv_folds=cv_folds_new,x=data_train) #lin.reg function from part a) using CV (inner fold)
opt_A<-best_fit_A[[2]]$fit$actions$model$spec[[1]]$penalty[[2]] #Optimal lambda
final_model_A<-glmnet(x=Xdatframe_train,y=y_train,alpha=0.5,standardize=FALSE)
##neural network ##
rec_1<-recipe(y_train ~ ., data =data.frame(cbind(y_train,Xdatframe_train)) ) %>%
step_normalize(all_numeric(),- c(all_outcomes(),intercept))%>% #standardize and dummy encode recipe
step_dummy(names(Filter(is.factor,data.frame(cbind(y_train,Xdatframe_train)))))
dummy_train<-bake(prep(rec_1),Xdatframe_train)
colnames<-colnames(dummy_train)
fmla <- as.formula(paste("y_train ~ ", paste(colnames, collapse= "+")))
n1 <- function(d,sz){nnet(fmla,data=d,size=sz,linout=T)}
n1error<-rep(0,10)
for(i in 1:10){
n1error[i]<-crossvalidation(model=n1, data=cbind(y_train,dummy_train),size=i) #10 different values of h (tuning grid)
}
error<-data.frame(n1error)
opt_C<-which(error==min(error))
final_model_C<-n1(d=dummy_train,sz=opt_C)
##Predictions
yhat_A<-predict(final_model_A,newx=data.matrix(Xdatframe_test),s=opt_A)
yhat_B<-data.frame(rep(mean(y_test),length(y_test)))
rec_2<-recipe(y_test ~ ., data =data.frame(cbind(y_test,Xdatframe_test)) ) %>%
step_normalize(all_numeric(),- c(all_outcomes(),intercept))%>% #standardize and dummy encode recipe
step_dummy(names(Filter(is.factor,data.frame(cbind(y_test,Xdatframe_test)))))
dummy_test<-bake(prep(rec_2),Xdatframe_test)
yhat_C<-predict(final_model_C,data.matrix(dummy_test))
dyhat = cbind(yhat_A,yhat_B,yhat_C)
yhat <- rbind( yhat, dyhat)
dr_A = colMeans( ( yhat_A-y_test )^2)
dr_B=colMeans((yhat_B-y_test)^2)
dr_C=colMeans((yhat_C-y_test)^2)
dr = cbind(dr_A,dr_B,dr_C)
r = rbind(r, dr)
lambda<-rbind(lambda,opt_A)
h_unit<-rbind(h_unit,opt_C)
y_true<-data.frame(y_test)
y_true<- rbind( y_true, y_test)
}
print(list(r,lambda,h_unit))
|
#' @title Add polygon glyphs on scatter plot
#' @description Each point glyph can be a polygon object.
#' We provide some common polygon coords in \code{\link{polygon_glyph}}. Also, users can
#' customize their own polygons.
#' @inheritParams geom_serialaxes_glyph
#' @param polygon_x nested list of x-coordinates of polygons, one list element for each scatterplot point.
#' If not provided, a point visual (\code{geom_point()}) will be displayed.
#' @param polygon_y nested list of y-coordinates of polygons, one list element for each scatterplot point.
#' If not provided, a point visual (\code{geom_point()}) will be displayed.
#' @export
#'
#' @section Aesthetics:
#' geom_..._glyph() understands the following aesthetics (required aesthetics are in bold):
#' \itemize{
#' \item{\strong{x}}
#' \item{\strong{y}}
#' \item{alpha}
#' \item{colour}
#' \item{fill}
#' \item{group}
#' \item{size}
#' \item{linetype}
#' \item{shape}
#' \item{stroke}
#' }
#'
#' The size unit is \code{cm}
#'
#' Note that the shape and stroke do not have real meanings unless the essential
#' argument \code{polygon_x} or \code{polygon_y} is missing.
#' If so, a point visual will be displayed with corresponding shape and stroke.
#'
#'
#' @return a \code{geom} layer
#' @seealso \code{\link{geom_serialaxes_glyph}}, \code{\link{geom_image_glyph}}
#'
#' @examples
#' # polygon glyph
#' p <- ggplot(data = data.frame(x = 1:4, y = 1:4),
#' mapping = aes(x = x, y = y)) +
#' geom_polygon_glyph(polygon_x = list(x_star, x_cross, x_hexagon, x_airplane),
#' polygon_y = list(y_star, y_cross, y_hexagon, y_airplane),
#' colour = 'black', fill = 'red')
#' p
#'
#' # the coords of each polygons can be achieved by calling function `ggplot_build`
#' build <- ggplot2::ggplot_build(p)
#' polygon_x <- build$data[[1]]$polygon_x
#' polygon_y <- build$data[[1]]$polygon_y
#'
geom_polygon_glyph <- function(mapping = NULL, data = NULL, stat = 'identity',
position = 'identity', ...,
polygon_x, polygon_y, linewidth = 1,
na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE) {
if(missing(polygon_x) || missing(polygon_y) || is.null(polygon_x) || is.null(polygon_y))
return(
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = ggplot2::GeomPoint,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
...
)
)
)
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomPolygonGlyph,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
polygon_x = polygon_x,
polygon_y = polygon_y,
linewidth = linewidth,
na.rm = na.rm,
...
)
)
}
#' @rdname Geom-ggproto
#' @export
GeomPolygonGlyph <- ggplot2::ggproto('GeomPolygonGlyph',
ggplot2::Geom,
required_aes = c('x', 'y'),
default_aes = ggplot2::aes(colour = 'black',
fill = 'black', size = 0.5,
linetype = 1, alpha = 1,
shape = 21, stroke = 0.5),
draw_key = function (data, params, size) {
data$size <- ggplot2::GeomPoint$default_aes$size/0.5 * data$size
ggplot2::draw_key_point(data, params, size)
},
setup_params = function(data, params) {
n <- dim(data)[1]
polygon_x <- params$polygon_x
polygon_y <- params$polygon_y
params$polygon_x <- glyph_input_setup(polygon_x, n = n)
params$polygon_y <- glyph_input_setup(polygon_y, n = n)
params
},
setup_data = function(data, params) {
# store list inside of data
# mainly used for extraction
data$polygon_x <- params$polygon_x
data$polygon_y <- params$polygon_y
data$linewidth <- rep_len(params$linewidth, dim(data)[1])
data
},
draw_panel = function(data, panel_params, coord,
polygon_x, polygon_y, linewidth = 1, na.rm = FALSE) {
data <- coord$transform(data, panel_params)
n <- dim(data)[1]
fill <- data$fill
show.area <- !any(is.na(fill))
# fill is NA --> polyline
# fill is not NA --> polygon
polyline <- is.na(fill)
polygon <- !is.na(fill)
if(sum(polyline) > 0) {
poly_x <- poly_coord(polygon_x[polyline], data[polyline, ],
orientation = "x",
show.area = FALSE)
poly_y <- poly_coord(polygon_y[polyline], data[polyline, ],
orientation = "y",
show.area = FALSE)
linegrob <- grid::polylineGrob(
x = do.call(grid::unit.c, poly_x),
y = do.call(grid::unit.c, poly_y),
id = rep(seq(length(poly_x)), lengths(poly_x)),
gp = grid::gpar(
col = data$colour[polyline],
lwd = data$linewidth[polyline],
alpha = data$alpha[polyline]
)
)
} else {linegrob <- grid::grob()}
if(sum(polygon) > 0) {
poly_x <- poly_coord(polygon_x[polygon], data[polygon, ],
orientation = "x",
show.area = TRUE)
poly_y <- poly_coord(polygon_y[polygon], data[polygon, ],
orientation = "y",
show.area = TRUE)
gongrob <- grid::polygonGrob(
x = do.call(grid::unit.c, poly_x),
y = do.call(grid::unit.c, poly_y),
id = rep(seq(length(poly_x)), lengths(poly_x)),
gp = grid::gpar(
fill = fill[polygon],
col = data$colour[polygon],
lwd = data$linewidth[polygon],
alpha = data$alpha[polygon]
)
)
} else {gongrob <- grid::grob()}
ggname("geom_polygon_glyph",
grid::gTree(
children = grid::gList(
linegrob,
gongrob
)
)
)
}
)
poly_coord <- function(poly, data, orientation = "x", show.area = FALSE, unit = "cm") {
n <- dim(data)[1]
lapply(seq_len(n),
function(i) {
if(show.area) {
grid::unit(data[[orientation]][i], 'native') +
grid::unit(poly[[i]] * data$size[i], unit)
} else {
grid::unit(data[[orientation]][i], 'native') +
grid::unit(c(poly[[i]], poly[[i]][1]) * data$size[i], unit)
}
})
}
glyph_input_setup <- function(x, n = integer(1L)) {
if(is.atomic(x)) {
x <- lapply(1:n, function(i) x)
} else {
if(length(x) == 1) {
x <- rep(x, n)
} else if(length(x) == n) {
NULL
} else {
stop("The length of ", deparse(substitute(x)),
" must be either length 1 or the same as the data ", n)
}
}
return(x)
}
|
/R/geom-polygon-glyph.R
|
no_license
|
LucyNjoki/ggmulti
|
R
| false
| false
| 9,787
|
r
|
#' @title Add polygon glyphs on scatter plot
#' @description Each point glyph can be a polygon object.
#' We provide some common polygon coords in \code{\link{polygon_glyph}}. Also, users can
#' customize their own polygons.
#' @inheritParams geom_serialaxes_glyph
#' @param polygon_x nested list of x-coordinates of polygons, one list element for each scatterplot point.
#' If not provided, a point visual (\code{geom_point()}) will be displayed.
#' @param polygon_y nested list of y-coordinates of polygons, one list element for each scatterplot point.
#' If not provided, a point visual (\code{geom_point()}) will be displayed.
#' @export
#'
#' @section Aesthetics:
#' geom_..._glyph() understands the following aesthetics (required aesthetics are in bold):
#' \itemize{
#' \item{\strong{x}}
#' \item{\strong{y}}
#' \item{alpha}
#' \item{colour}
#' \item{fill}
#' \item{group}
#' \item{size}
#' \item{linetype}
#' \item{shape}
#' \item{stroke}
#' }
#'
#' The size unit is \code{cm}
#'
#' Note that the shape and stroke do not have real meanings unless the essential
#' argument \code{polygon_x} or \code{polygon_y} is missing.
#' If so, a point visual will be displayed with corresponding shape and stroke.
#'
#'
#' @return a \code{geom} layer
#' @seealso \code{\link{geom_serialaxes_glyph}}, \code{\link{geom_image_glyph}}
#'
#' @examples
#' # polygon glyph
#' p <- ggplot(data = data.frame(x = 1:4, y = 1:4),
#' mapping = aes(x = x, y = y)) +
#' geom_polygon_glyph(polygon_x = list(x_star, x_cross, x_hexagon, x_airplane),
#' polygon_y = list(y_star, y_cross, y_hexagon, y_airplane),
#' colour = 'black', fill = 'red')
#' p
#'
#' # the coords of each polygons can be achieved by calling function `ggplot_build`
#' build <- ggplot2::ggplot_build(p)
#' polygon_x <- build$data[[1]]$polygon_x
#' polygon_y <- build$data[[1]]$polygon_y
#'
geom_polygon_glyph <- function(mapping = NULL, data = NULL, stat = 'identity',
position = 'identity', ...,
polygon_x, polygon_y, linewidth = 1,
na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE) {
if(missing(polygon_x) || missing(polygon_y) || is.null(polygon_x) || is.null(polygon_y))
return(
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = ggplot2::GeomPoint,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
...
)
)
)
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomPolygonGlyph,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
polygon_x = polygon_x,
polygon_y = polygon_y,
linewidth = linewidth,
na.rm = na.rm,
...
)
)
}
#' @rdname Geom-ggproto
#' @export
GeomPolygonGlyph <- ggplot2::ggproto('GeomPolygonGlyph',
ggplot2::Geom,
required_aes = c('x', 'y'),
default_aes = ggplot2::aes(colour = 'black',
fill = 'black', size = 0.5,
linetype = 1, alpha = 1,
shape = 21, stroke = 0.5),
draw_key = function (data, params, size) {
data$size <- ggplot2::GeomPoint$default_aes$size/0.5 * data$size
ggplot2::draw_key_point(data, params, size)
},
setup_params = function(data, params) {
n <- dim(data)[1]
polygon_x <- params$polygon_x
polygon_y <- params$polygon_y
params$polygon_x <- glyph_input_setup(polygon_x, n = n)
params$polygon_y <- glyph_input_setup(polygon_y, n = n)
params
},
setup_data = function(data, params) {
# store list inside of data
# mainly used for extraction
data$polygon_x <- params$polygon_x
data$polygon_y <- params$polygon_y
data$linewidth <- rep_len(params$linewidth, dim(data)[1])
data
},
draw_panel = function(data, panel_params, coord,
polygon_x, polygon_y, linewidth = 1, na.rm = FALSE) {
data <- coord$transform(data, panel_params)
n <- dim(data)[1]
fill <- data$fill
show.area <- !any(is.na(fill))
# fill is NA --> polyline
# fill is not NA --> polygon
polyline <- is.na(fill)
polygon <- !is.na(fill)
if(sum(polyline) > 0) {
poly_x <- poly_coord(polygon_x[polyline], data[polyline, ],
orientation = "x",
show.area = FALSE)
poly_y <- poly_coord(polygon_y[polyline], data[polyline, ],
orientation = "y",
show.area = FALSE)
linegrob <- grid::polylineGrob(
x = do.call(grid::unit.c, poly_x),
y = do.call(grid::unit.c, poly_y),
id = rep(seq(length(poly_x)), lengths(poly_x)),
gp = grid::gpar(
col = data$colour[polyline],
lwd = data$linewidth[polyline],
alpha = data$alpha[polyline]
)
)
} else {linegrob <- grid::grob()}
if(sum(polygon) > 0) {
poly_x <- poly_coord(polygon_x[polygon], data[polygon, ],
orientation = "x",
show.area = TRUE)
poly_y <- poly_coord(polygon_y[polygon], data[polygon, ],
orientation = "y",
show.area = TRUE)
gongrob <- grid::polygonGrob(
x = do.call(grid::unit.c, poly_x),
y = do.call(grid::unit.c, poly_y),
id = rep(seq(length(poly_x)), lengths(poly_x)),
gp = grid::gpar(
fill = fill[polygon],
col = data$colour[polygon],
lwd = data$linewidth[polygon],
alpha = data$alpha[polygon]
)
)
} else {gongrob <- grid::grob()}
ggname("geom_polygon_glyph",
grid::gTree(
children = grid::gList(
linegrob,
gongrob
)
)
)
}
)
poly_coord <- function(poly, data, orientation = "x", show.area = FALSE, unit = "cm") {
n <- dim(data)[1]
lapply(seq_len(n),
function(i) {
if(show.area) {
grid::unit(data[[orientation]][i], 'native') +
grid::unit(poly[[i]] * data$size[i], unit)
} else {
grid::unit(data[[orientation]][i], 'native') +
grid::unit(c(poly[[i]], poly[[i]][1]) * data$size[i], unit)
}
})
}
glyph_input_setup <- function(x, n = integer(1L)) {
if(is.atomic(x)) {
x <- lapply(1:n, function(i) x)
} else {
if(length(x) == 1) {
x <- rep(x, n)
} else if(length(x) == n) {
NULL
} else {
stop("The length of ", deparse(substitute(x)),
" must be either length 1 or the same as the data ", n)
}
}
return(x)
}
|
#Function is_integer()
#' @title Is Integer
#' @description Determines whether a number is an integer or not
#' @param x number
#' @return TRUE if x is an intenger
#' @return FALSE if x is not an integer
is_integer <- function(x) {
if (x %% 1 == 0) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_integer(-1)
is_integer(0)
is_integer(2L)
is_integer(2)
is_integer(2.1)
is_integer(pi)
is_integer(0.01)
#Function is_positive()
#' @title Is Positive
#' @description Determines whether the indicated value is positive or negative
#' @param x number
#' @return TRUE if number is positive
#' @return FALSE if number is negative
is_positive <- function(x) {
if (x > 0) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_positive(0.01)
is_positive(2)
is_positive(-2)
is_positive(0)
#Function is_nonnegative()
#' @title Is Nonnegative
#' @description Determines whether a number is not negative or not
#' @param x number
#' @return TRUE if number is not a negative number
#' @return FALSE if number is a negative number
is_nonnegative <- function (x) {
if (x >= 0) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_nonnegative(0)
is_nonnegative(2)
is_nonnegative(-0.00001)
is_nonnegative(-2)
#Function is_positive_integer()
#' @title Is Positive Integer
#' @description Determines whether a number is positive and an integer or not
#' @param x number
#' @return TRUE if number is both positive and an integer
#' @return FALSE if number is not both positive and an integer
is_positive_integer <- function(x) {
if (is_integer(x) == TRUE & is_positive(x) == TRUE) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_positive_integer(2)
is_positive_integer(2L)
is_positive_integer(0)
is_positive_integer(-2)
#Function is_nonneg_integer()
#' @title Is Nonnegative Integer
#' @description Determines whether a number is both nonnegative and an integer or not
#' @param x number
#' @return TRUE if number is both nonnegative and an integer
#' @return FALSE if number is not both nonnegative and an integer
is_nonneg_integer <- function(x) {
if(is_nonnegative(x) == TRUE & is_integer(x) == TRUE) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_nonneg_integer(0)
is_nonneg_integer(1)
is_nonneg_integer(-1)
is_nonneg_integer(-2.5)
#Function is_probability()
#' @title Is Probability
#' @description Determines whether a given number x is a valid probability value: 0 <= x <= 1
#' @param x number
#' @return TRUE if x is a valid probability number: 0 <= x <= 1
#' @return FALSE if x is not a valid probability number: x > 1 or x < 0
is_probability <- function(x) {
if(x >= 0 & x <= 1) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_probability(0)
is_probability(0.5)
is_probability(1)
is_probability(-1)
is_probability(1.0000001)
#Function bin_factorial()
#' @title Binomial Factorial
#' @description Takes a number x and returns the factorial of that number
#' @param x number
#' @return Factorial of x
bin_factorial <- function(x) {
if(x == 0) {
return(1)
}
else {
y <- 1
for (i in 1:x) {
y <- y * ((1:x)[i])
}
return(y)
}
}
bin_factorial(5)
bin_factorial(0)
#Function bin_combinations()
#' @title Binomial Combination
#' @description Calculates the number of combinations in which k successes can occur in n trials
#' @param n trials
#' @param k successes
#' @return Number of combinations in which k successes can occur in k trials
bin_combinations <- function(n, k) {
y <- (bin_factorial(n)) / (
bin_factorial(k) * bin_factorial(n - k))
return(y)
}
bin_combinations (n = 5, k = 2)
bin_combinations (10, 3)
bin_combinations (4, 4)
#Function bin_probability()
#' @title Binomial Probability
#' @description Calculates the probability of a given number of k successes in n trials
#' @param trials number of trials
#' @param success number of desired successes
#' @param prob the probability the a success in 1 trial
#' @return The probability of a given number of k successes in n trals
bin_probability <- function(trials, success, prob) {
if(is_nonneg_integer(trials) == FALSE) {stop('number of trials must be positive')}
else if(is_nonneg_integer(success) == FALSE) {stop('number of successes must be positive')}
else if(is_probability(prob) == FALSE) {stop('prob must be a valid probability number between 0 and 1')}
y <- bin_combinations(trials, success) * (prob ^ success) * (
(1 - prob) ^ (trials - success))
return(y)
}
bin_probability(trials = 5, success = 2, prob = 0.5)
#Function bin_distribution()
#' @title Binomial Distribution
#' @description Creates a data frame with the probability distribution given the number of trials and the probability of success
#' @param trials number of trials
#' @param prob probability of a success for each trial
#' @return A data frame with the probability distribution for a number of trials given the success rate of a single trial
bin_distribution <- function(trials, prob) {
i <- 0
success <- c(rep(0, trials))
probability <- c(rep(0, trials))
while(i <= trials) {
result <- bin_probability(trials, i, prob)
success[i+1] <- i
probability[i+1] <- result
i <- i + 1
}
data.frame(success, probability)
}
bin_distribution(trials = 5, p = 0.5)
|
/hw03/code/binomial-functions.R
|
no_license
|
JustinRiverNg/Computing-Data
|
R
| false
| false
| 5,225
|
r
|
#Function is_integer()
#' @title Is Integer
#' @description Determines whether a number is an integer or not
#' @param x number
#' @return TRUE if x is an intenger
#' @return FALSE if x is not an integer
is_integer <- function(x) {
if (x %% 1 == 0) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_integer(-1)
is_integer(0)
is_integer(2L)
is_integer(2)
is_integer(2.1)
is_integer(pi)
is_integer(0.01)
#Function is_positive()
#' @title Is Positive
#' @description Determines whether the indicated value is positive or negative
#' @param x number
#' @return TRUE if number is positive
#' @return FALSE if number is negative
is_positive <- function(x) {
if (x > 0) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_positive(0.01)
is_positive(2)
is_positive(-2)
is_positive(0)
#Function is_nonnegative()
#' @title Is Nonnegative
#' @description Determines whether a number is not negative or not
#' @param x number
#' @return TRUE if number is not a negative number
#' @return FALSE if number is a negative number
is_nonnegative <- function (x) {
if (x >= 0) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_nonnegative(0)
is_nonnegative(2)
is_nonnegative(-0.00001)
is_nonnegative(-2)
#Function is_positive_integer()
#' @title Is Positive Integer
#' @description Determines whether a number is positive and an integer or not
#' @param x number
#' @return TRUE if number is both positive and an integer
#' @return FALSE if number is not both positive and an integer
is_positive_integer <- function(x) {
if (is_integer(x) == TRUE & is_positive(x) == TRUE) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_positive_integer(2)
is_positive_integer(2L)
is_positive_integer(0)
is_positive_integer(-2)
#Function is_nonneg_integer()
#' @title Is Nonnegative Integer
#' @description Determines whether a number is both nonnegative and an integer or not
#' @param x number
#' @return TRUE if number is both nonnegative and an integer
#' @return FALSE if number is not both nonnegative and an integer
is_nonneg_integer <- function(x) {
if(is_nonnegative(x) == TRUE & is_integer(x) == TRUE) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_nonneg_integer(0)
is_nonneg_integer(1)
is_nonneg_integer(-1)
is_nonneg_integer(-2.5)
#Function is_probability()
#' @title Is Probability
#' @description Determines whether a given number x is a valid probability value: 0 <= x <= 1
#' @param x number
#' @return TRUE if x is a valid probability number: 0 <= x <= 1
#' @return FALSE if x is not a valid probability number: x > 1 or x < 0
is_probability <- function(x) {
if(x >= 0 & x <= 1) {
return(TRUE)
}
else {
return(FALSE)
}
}
is_probability(0)
is_probability(0.5)
is_probability(1)
is_probability(-1)
is_probability(1.0000001)
#Function bin_factorial()
#' @title Binomial Factorial
#' @description Takes a number x and returns the factorial of that number
#' @param x number
#' @return Factorial of x
bin_factorial <- function(x) {
if(x == 0) {
return(1)
}
else {
y <- 1
for (i in 1:x) {
y <- y * ((1:x)[i])
}
return(y)
}
}
bin_factorial(5)
bin_factorial(0)
#Function bin_combinations()
#' @title Binomial Combination
#' @description Calculates the number of combinations in which k successes can occur in n trials
#' @param n trials
#' @param k successes
#' @return Number of combinations in which k successes can occur in k trials
bin_combinations <- function(n, k) {
y <- (bin_factorial(n)) / (
bin_factorial(k) * bin_factorial(n - k))
return(y)
}
bin_combinations (n = 5, k = 2)
bin_combinations (10, 3)
bin_combinations (4, 4)
#Function bin_probability()
#' @title Binomial Probability
#' @description Calculates the probability of a given number of k successes in n trials
#' @param trials number of trials
#' @param success number of desired successes
#' @param prob the probability the a success in 1 trial
#' @return The probability of a given number of k successes in n trals
bin_probability <- function(trials, success, prob) {
if(is_nonneg_integer(trials) == FALSE) {stop('number of trials must be positive')}
else if(is_nonneg_integer(success) == FALSE) {stop('number of successes must be positive')}
else if(is_probability(prob) == FALSE) {stop('prob must be a valid probability number between 0 and 1')}
y <- bin_combinations(trials, success) * (prob ^ success) * (
(1 - prob) ^ (trials - success))
return(y)
}
bin_probability(trials = 5, success = 2, prob = 0.5)
#Function bin_distribution()
#' @title Binomial Distribution
#' @description Creates a data frame with the probability distribution given the number of trials and the probability of success
#' @param trials number of trials
#' @param prob probability of a success for each trial
#' @return A data frame with the probability distribution for a number of trials given the success rate of a single trial
bin_distribution <- function(trials, prob) {
i <- 0
success <- c(rep(0, trials))
probability <- c(rep(0, trials))
while(i <= trials) {
result <- bin_probability(trials, i, prob)
success[i+1] <- i
probability[i+1] <- result
i <- i + 1
}
data.frame(success, probability)
}
bin_distribution(trials = 5, p = 0.5)
|
library(unmarked)
library(MuMIn)
library(rgdal)
library(rgeos)
library(foreign)
library(foreach)
source("f.AIC_cut.occu.sig.used.15.10.23.R")
source("getDesign.R")
ObsCovarSet <- c("DAY", "TIME", "COUNT_TYPE", "EFFORT_HRS", "EFFORT_DISTANCE_KM", "NUMBER_OBSERVERS")
covar <- read.csv("./input/eBird_occu_covar_site_obs.csv")
covar <- covar[,-c(3:22)]
scov <- read.csv("./input/eBird_landcape_covariates_scaled.csv")
#correlation matrix suggests removing Urban
scov <- scov[,-which(names(scov) %in% c("URB_1km","URB_100m"))]
SiteCovarSet <- names(scov)[-1]
covar <- cbind(covar,scov[,-1])
########################################################
########################################################
##### To go in loop
########################################################
########################################################
files <- list.files("./input",pattern="_eBird")
counter= 7
bird <- read.csv(sprintf("./input/%s",files[counter]))
tt <- strsplit(files[counter],"[.]")
bird_name <- unlist(tt[[1]])[1]
NewTab <- cbind(bird,covar[,-1])
MyOccuData <- formatWide(dfin=NewTab, type="unmarkedFrameOccu")
#
#Now fit a few different models for fun.
um.frame <- MyOccuData
fm0 <- occu(~1 ~1, data=um.frame)
#rm(um.frame)
tt <- paste(SiteCovarSet,collapse="+")
form <- as.formula(paste("~. ~. + ", tt))
#maybe not needed
#fm.og <- update(fm0,form)
#fm.d <- f.AICc.occu.sig(fm.og, ObsCovarSet, max.iter=30, detocc = 2, AICcut = 2, p.crit = 0.15, flag_se = TRUE)
fm.d0 <- f.AICc.occu.sig(fm0, ObsCovarSet, max.iter=30, detocc=2, AICcut=2, um.frame=MyOccuData)
dd <- occu.subset(fm.d0$modlst,cutoff=7)
if (length(dd)>1){
aa <- model.avg(dd)
tt <- attributes(aa$importance)$names[grep("p\\(", attributes(aa$importance)$names)]
tt2<-substring(tt, 3)
#finally, the names of the variables to include for modelling the occu side
tt2 <- substr(tt2, 1, nchar(tt2)-1)
} else {
tt2 <- strsplit(sub("~","",as.character(dd[[1]]@formula[2])),"\\s\\+\\s")[[1]]
}
tt2 <- paste0(tt2,sep="+",collapse="")
tt2 <- substr(tt2, 1, nchar(tt2)-1)
##################################################
form.p <- as.formula(paste("~. + ", tt2, "~. "))
#update null model with p variables from above
#equals occu null model
fm.p <- update(fm0, form.p)
flush.console()
##################################################
#start occu part of the models
f.occ <- f.AICc.occu.sig(fm.p, SiteCovarSet, max.iter=20, AICcut=2, ,um.frame=MyOccuData)
save.image(sprintf("%s_01.RData",bird_name))
f.occ.sig <- occu.rem.non.sig(f.occ,p.crit=0.15)
f.occ.sig.sub <- occu.subset(f.occ.sig,cutoff=7)
if (length(f.occ.sig.sub)>1){
f.occ.avg <- model.avg(f.occ.sig.sub)
f.occ.avg <- add.coefmat(f.occ.avg)
designMats <- getDesign(MyOccuData, formula(f.occ.avg))
fixed <- f.occ.avg$coefmat[,1]
se <- f.occ.avg$coefmat[,2]
est <- cbind(fixed,se)
psi <- as.data.frame(est[grep("psi\\(",row.names(est)),])
p <- as.data.frame(est[grep("p\\(",row.names(est)),])
} else {
f.occ.avg <- f.occ.sig.sub
designMats <- getDesign(MyOccuData, formula(f.occ.avg[[1]]))
fixed <- coef(f.occ.avg[[1]])
se <- sqrt(diag(vcov(f.occ.avg[[1]])))
est <- cbind(fixed,se)
psi <- as.data.frame(est[grep("psi\\(",row.names(est)),])
p <- as.data.frame(est[grep("p\\(",row.names(est)),])
}
X <- designMats$X; V <- designMats$V; y <- designMats$y
removed <- designMats$removed.sites
X.offset <- designMats$X.offset; V.offset <- designMats$V.offset
if(is.null(X.offset)) {
X.offset <- rep(0, nrow(X))
}
if(is.null(V.offset)) {
V.offset <- rep(0, nrow(V))
}
y <- truncateToBinary(y)
J <- ncol(y)
M <- nrow(y)
if (length(f.occ.sig.sub)>1){
coefmat.compl <- f.occ.avg$coefmat
coefmat <- coefmat.compl[,1]
} else {
coefmat <- coef(f.occ.avg[[1]])
# ff.se <- sqrt(diag(vcov(f.occ.avg[[1]])))
ff.se <- vcov(f.occ.avg[[1]])
ff.se <- diag(ff.se)
ff.se <- sqrt(ff.se)
ff.z <- coefmat/ff.se
ff.p <- 2*pnorm(-abs(ff.z))
coefmat.compl <- data.frame(Estimate =coefmat,
SE = ff.se,
z = ff.z,
p=ff.p)
row.names(coefmat.compl) <- names(coefmat)
}
write.csv(coefmat.compl,sprintf("./estimates/%s.csv",bird_name))
rm(f.occ,f.occ.sig,fm.d0)
gc()
save.image(sprintf("%s_02.RData",bird_name))
file.remove(sprintf("%s_01.RData",bird_name))
|
/occu_models/BARS_eBird.R
|
no_license
|
ricschuster/Tradeoffs-biodiversity-cost
|
R
| false
| false
| 4,337
|
r
|
library(unmarked)
library(MuMIn)
library(rgdal)
library(rgeos)
library(foreign)
library(foreach)
source("f.AIC_cut.occu.sig.used.15.10.23.R")
source("getDesign.R")
ObsCovarSet <- c("DAY", "TIME", "COUNT_TYPE", "EFFORT_HRS", "EFFORT_DISTANCE_KM", "NUMBER_OBSERVERS")
covar <- read.csv("./input/eBird_occu_covar_site_obs.csv")
covar <- covar[,-c(3:22)]
scov <- read.csv("./input/eBird_landcape_covariates_scaled.csv")
#correlation matrix suggests removing Urban
scov <- scov[,-which(names(scov) %in% c("URB_1km","URB_100m"))]
SiteCovarSet <- names(scov)[-1]
covar <- cbind(covar,scov[,-1])
########################################################
########################################################
##### To go in loop
########################################################
########################################################
files <- list.files("./input",pattern="_eBird")
counter= 7
bird <- read.csv(sprintf("./input/%s",files[counter]))
tt <- strsplit(files[counter],"[.]")
bird_name <- unlist(tt[[1]])[1]
NewTab <- cbind(bird,covar[,-1])
MyOccuData <- formatWide(dfin=NewTab, type="unmarkedFrameOccu")
#
#Now fit a few different models for fun.
um.frame <- MyOccuData
fm0 <- occu(~1 ~1, data=um.frame)
#rm(um.frame)
tt <- paste(SiteCovarSet,collapse="+")
form <- as.formula(paste("~. ~. + ", tt))
#maybe not needed
#fm.og <- update(fm0,form)
#fm.d <- f.AICc.occu.sig(fm.og, ObsCovarSet, max.iter=30, detocc = 2, AICcut = 2, p.crit = 0.15, flag_se = TRUE)
fm.d0 <- f.AICc.occu.sig(fm0, ObsCovarSet, max.iter=30, detocc=2, AICcut=2, um.frame=MyOccuData)
dd <- occu.subset(fm.d0$modlst,cutoff=7)
if (length(dd)>1){
aa <- model.avg(dd)
tt <- attributes(aa$importance)$names[grep("p\\(", attributes(aa$importance)$names)]
tt2<-substring(tt, 3)
#finally, the names of the variables to include for modelling the occu side
tt2 <- substr(tt2, 1, nchar(tt2)-1)
} else {
tt2 <- strsplit(sub("~","",as.character(dd[[1]]@formula[2])),"\\s\\+\\s")[[1]]
}
tt2 <- paste0(tt2,sep="+",collapse="")
tt2 <- substr(tt2, 1, nchar(tt2)-1)
##################################################
form.p <- as.formula(paste("~. + ", tt2, "~. "))
#update null model with p variables from above
#equals occu null model
fm.p <- update(fm0, form.p)
flush.console()
##################################################
#start occu part of the models
f.occ <- f.AICc.occu.sig(fm.p, SiteCovarSet, max.iter=20, AICcut=2, ,um.frame=MyOccuData)
save.image(sprintf("%s_01.RData",bird_name))
f.occ.sig <- occu.rem.non.sig(f.occ,p.crit=0.15)
f.occ.sig.sub <- occu.subset(f.occ.sig,cutoff=7)
if (length(f.occ.sig.sub)>1){
f.occ.avg <- model.avg(f.occ.sig.sub)
f.occ.avg <- add.coefmat(f.occ.avg)
designMats <- getDesign(MyOccuData, formula(f.occ.avg))
fixed <- f.occ.avg$coefmat[,1]
se <- f.occ.avg$coefmat[,2]
est <- cbind(fixed,se)
psi <- as.data.frame(est[grep("psi\\(",row.names(est)),])
p <- as.data.frame(est[grep("p\\(",row.names(est)),])
} else {
f.occ.avg <- f.occ.sig.sub
designMats <- getDesign(MyOccuData, formula(f.occ.avg[[1]]))
fixed <- coef(f.occ.avg[[1]])
se <- sqrt(diag(vcov(f.occ.avg[[1]])))
est <- cbind(fixed,se)
psi <- as.data.frame(est[grep("psi\\(",row.names(est)),])
p <- as.data.frame(est[grep("p\\(",row.names(est)),])
}
X <- designMats$X; V <- designMats$V; y <- designMats$y
removed <- designMats$removed.sites
X.offset <- designMats$X.offset; V.offset <- designMats$V.offset
if(is.null(X.offset)) {
X.offset <- rep(0, nrow(X))
}
if(is.null(V.offset)) {
V.offset <- rep(0, nrow(V))
}
y <- truncateToBinary(y)
J <- ncol(y)
M <- nrow(y)
if (length(f.occ.sig.sub)>1){
coefmat.compl <- f.occ.avg$coefmat
coefmat <- coefmat.compl[,1]
} else {
coefmat <- coef(f.occ.avg[[1]])
# ff.se <- sqrt(diag(vcov(f.occ.avg[[1]])))
ff.se <- vcov(f.occ.avg[[1]])
ff.se <- diag(ff.se)
ff.se <- sqrt(ff.se)
ff.z <- coefmat/ff.se
ff.p <- 2*pnorm(-abs(ff.z))
coefmat.compl <- data.frame(Estimate =coefmat,
SE = ff.se,
z = ff.z,
p=ff.p)
row.names(coefmat.compl) <- names(coefmat)
}
write.csv(coefmat.compl,sprintf("./estimates/%s.csv",bird_name))
rm(f.occ,f.occ.sig,fm.d0)
gc()
save.image(sprintf("%s_02.RData",bird_name))
file.remove(sprintf("%s_01.RData",bird_name))
|
testlist <- list(latLongs = structure(c(2.28704985607018e-269, 0, 0, 0, 0 ), .Dim = c(1L, 5L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
/MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612727581-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 159
|
r
|
testlist <- list(latLongs = structure(c(2.28704985607018e-269, 0, 0, 0, 0 ), .Dim = c(1L, 5L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
pacman::p_load(data.table, dplyr)
path = "c:/models/freightFlows/output/assignmentFull/truckFlows.csv"
aux = fread(path)
aux = aux %>% filter(trucks >0)
fwrite(aux, "c:/models/freightFlows/output/assignmentFull/truckFlows_v2.csv", row.names= F )
aux = aux %>% rowwise() %>%
mutate(tt_h = max(round(tt/3600, digits = 0),1))
aux = aux %>% group_by(tt_h) %>% summarize(trucks = sum(trucks))
fwrite(aux, "c:/models/freightFlows/output/assignmentFull/truckFlows_v3.csv", row.names= F )
#aggregate_by_distance
distance_bins = c(0,50,100,200,500,10000)
aux = aux %>% mutate(bin = cut(distanceBin, distance_bins))
aux = aux %>% group_by(bin) %>% summarize(sum(volume_tn))
#aggregate+by_commodity
aux = aux %>% group_by(commodity) %>% summarize(sum(volume_tn))
|
/data_process/removeZerosInFlowListFromJava.R
|
no_license
|
cllorca1/freightFlowsAnalyses
|
R
| false
| false
| 769
|
r
|
pacman::p_load(data.table, dplyr)
path = "c:/models/freightFlows/output/assignmentFull/truckFlows.csv"
aux = fread(path)
aux = aux %>% filter(trucks >0)
fwrite(aux, "c:/models/freightFlows/output/assignmentFull/truckFlows_v2.csv", row.names= F )
aux = aux %>% rowwise() %>%
mutate(tt_h = max(round(tt/3600, digits = 0),1))
aux = aux %>% group_by(tt_h) %>% summarize(trucks = sum(trucks))
fwrite(aux, "c:/models/freightFlows/output/assignmentFull/truckFlows_v3.csv", row.names= F )
#aggregate_by_distance
distance_bins = c(0,50,100,200,500,10000)
aux = aux %>% mutate(bin = cut(distanceBin, distance_bins))
aux = aux %>% group_by(bin) %>% summarize(sum(volume_tn))
#aggregate+by_commodity
aux = aux %>% group_by(commodity) %>% summarize(sum(volume_tn))
|
#' set_TextType
#'
#' For any EML element of class TextType, this function can be used to generate the appropriate EML from a markdown-formatted file.
#' @param text a plain text character string which will be used directly as the content of the node if no file is given
#' @param file path to a file providing formatted input text, see details.
#' @import XML
#' @return a TextType object that can be coerced into any element inheriting from TextType, see examples
#' @importFrom tools file_ext
#' @details If the `rmarkdown` package is installed, then the input file can
#' be a Microsoft Word (.docx) file, a markdown file, or other file
#' recognized by Pandoc (see http://pandoc.org), which will automate the conversion
#' to a docbook. Otherwise, the input file should already be in docbook format (with
#' .xml or .dbk extension). Note that pandoc comes pre-installed in RStudio and is
#' required for the rmarkdown package.
#' @export
#' @examples
#' \donttest{
#' ## using a simple character string
#' a <- set_TextType(text = "This is the abstract")
#' as(a, "abstract")
#'
#' ## Using an external markdown file
#' f <- system.file("examples/hf205-abstract.md", package = "EML")
#' a <- set_TextType(f)
#' as(a, "abstract")
#'
#' ## Can also import from methods written in a .docx MS Word file.
#' f <- system.file("examples/hf205-abstract.docx", package = "EML")
#' a <- set_TextType(f)
#' as(a, "abstract")
#'
#' ## Documents with title headings use `section` instead of `para` notation
#' f <- system.file("examples/hf205-methods.docx", package = "EML")
#' d <- set_TextType(f)
#' as(d, "description")
#'
#' }
#'
#'
set_TextType <- function(file = NULL, text = NULL) {
if (!is.null(text)) {
TextType <- new("TextType", .Data = text)
} else if (!is.null(file)) {
docbook <- to_docbook(file)
TextType <-
new("TextType",
section = set_section(docbook),
para = set_para(docbook))
}
TextType
}
set_section <- function(docbook) {
sections <-
XML::xpathApply(docbook, "/article/sect1", XML::xmlChildren)
s <- lapply(sections, function(x)
new("section", as(x, "list")))
as(s, "ListOfsection")
}
set_para <- function(docbook) {
para <- XML::xpathApply(docbook, "/article/para", XML::xmlChildren)
s <- lapply(para, function(x)
new("para", as(x, "list")))
as(s, "ListOfpara")
}
to_docbook <- function(file = NULL) {
if (!tools::file_ext(file) %in% c("xml", "dbk")) {
## Not xml yet, so use pandoc to generate docbook
if (!requireNamespace("rmarkdown", quietly = TRUE)) {
stop("rmarkdown package required to convert to Docbook format",
call. = FALSE)
}
pandoc_convert <-
getExportedValue("rmarkdown", "pandoc_convert")
wd <- getwd()
dir <- tempdir()
file.copy(file, file.path(dir, basename(file)), overwrite = TRUE)
setwd(dir)
docbook_file <- tempfile(tmpdir = ".", fileext = ".xml")
pandoc_convert(
basename(file),
to = "docbook",
output = normalizePath(docbook_file, winslash = "/", mustWork = FALSE),
options = "-s"
)
docbook <- XML::xmlParse(docbook_file)
setwd(wd)
} else {
## File is already xml/docbook, so no need for pandoc
docbook <- XML::xmlParse(file)
}
docbook
}
|
/R/set_TextType.R
|
no_license
|
nicolasfstgelais/EML
|
R
| false
| false
| 3,277
|
r
|
#' set_TextType
#'
#' For any EML element of class TextType, this function can be used to generate the appropriate EML from a markdown-formatted file.
#' @param text a plain text character string which will be used directly as the content of the node if no file is given
#' @param file path to a file providing formatted input text, see details.
#' @import XML
#' @return a TextType object that can be coerced into any element inheriting from TextType, see examples
#' @importFrom tools file_ext
#' @details If the `rmarkdown` package is installed, then the input file can
#' be a Microsoft Word (.docx) file, a markdown file, or other file
#' recognized by Pandoc (see http://pandoc.org), which will automate the conversion
#' to a docbook. Otherwise, the input file should already be in docbook format (with
#' .xml or .dbk extension). Note that pandoc comes pre-installed in RStudio and is
#' required for the rmarkdown package.
#' @export
#' @examples
#' \donttest{
#' ## using a simple character string
#' a <- set_TextType(text = "This is the abstract")
#' as(a, "abstract")
#'
#' ## Using an external markdown file
#' f <- system.file("examples/hf205-abstract.md", package = "EML")
#' a <- set_TextType(f)
#' as(a, "abstract")
#'
#' ## Can also import from methods written in a .docx MS Word file.
#' f <- system.file("examples/hf205-abstract.docx", package = "EML")
#' a <- set_TextType(f)
#' as(a, "abstract")
#'
#' ## Documents with title headings use `section` instead of `para` notation
#' f <- system.file("examples/hf205-methods.docx", package = "EML")
#' d <- set_TextType(f)
#' as(d, "description")
#'
#' }
#'
#'
set_TextType <- function(file = NULL, text = NULL) {
if (!is.null(text)) {
TextType <- new("TextType", .Data = text)
} else if (!is.null(file)) {
docbook <- to_docbook(file)
TextType <-
new("TextType",
section = set_section(docbook),
para = set_para(docbook))
}
TextType
}
set_section <- function(docbook) {
sections <-
XML::xpathApply(docbook, "/article/sect1", XML::xmlChildren)
s <- lapply(sections, function(x)
new("section", as(x, "list")))
as(s, "ListOfsection")
}
set_para <- function(docbook) {
para <- XML::xpathApply(docbook, "/article/para", XML::xmlChildren)
s <- lapply(para, function(x)
new("para", as(x, "list")))
as(s, "ListOfpara")
}
to_docbook <- function(file = NULL) {
if (!tools::file_ext(file) %in% c("xml", "dbk")) {
## Not xml yet, so use pandoc to generate docbook
if (!requireNamespace("rmarkdown", quietly = TRUE)) {
stop("rmarkdown package required to convert to Docbook format",
call. = FALSE)
}
pandoc_convert <-
getExportedValue("rmarkdown", "pandoc_convert")
wd <- getwd()
dir <- tempdir()
file.copy(file, file.path(dir, basename(file)), overwrite = TRUE)
setwd(dir)
docbook_file <- tempfile(tmpdir = ".", fileext = ".xml")
pandoc_convert(
basename(file),
to = "docbook",
output = normalizePath(docbook_file, winslash = "/", mustWork = FALSE),
options = "-s"
)
docbook <- XML::xmlParse(docbook_file)
setwd(wd)
} else {
## File is already xml/docbook, so no need for pandoc
docbook <- XML::xmlParse(file)
}
docbook
}
|
# Me conecto a la vista de la db mongo y traigo los datos que necesito
# db.createView("mas_escuchados_2020", "charts", [
# {$match: {week_start: {$regex: "^2020.*"}}},
# {$sort: { Streams: -1 }}
# ])
library(mongolite)
library(stringi)
conx_escuchados = mongo(collection = "mas_escuchados_2020", db = "DMUBA_SPOTIFY")
df_escuchados <- conx_escuchados$find()[, c(2:4)]
# Genero una conexión a una nueva colección denominada lyrics (para las letras)
conx_lyrics = mongo(collection = "lyrics", db = "DMUBA_SPOTIFY")
# Me quedo con la fila de cada canción con más Streams y lo ordeno en forma decreciente
df_escuchados = aggregate(Streams ~ ., data=df_escuchados, FUN=max)
df_escuchados <- df_escuchados[order(df_escuchados$Streams, decreasing = TRUE),]
# Limito a los K más escuchados de 2020
K = 20
df_top_K <- head(df_escuchados, K)
df_top_K$Track_Name = tolower(stri_trans_general(df_top_K$Track_Name, 'latin-ascii'))
df_top_K$Artist = tolower(stri_trans_general(df_top_K$Artist, 'latin-ascii'))
# Cargo el paquete para el scraping e instancio la URL de búsqueda de azlyrics.com
library(rvest)
search_url = "https://search.azlyrics.com/search.php?q="
# Genero el for para recorrer las K canciones para recuperar la letra
K = 1 # Esta instanciación es mientras dure la puesta a punto del script
for (i in 1:K) {
# Como token de búsqueda uno artista y tema y separo por '+'
tokens_search = paste(df_top_K$Track_Name[i], df_top_K$Artist[i])
tokens_search = gsub(" ", "+", tokens_search)
# Uno la url para busqueda con los tokens
search_url = paste0(search_url, tokens_search)
# Tomo la url de búsqueda y recupero el html
html_data <- read_html(search_url)
results_search = html_data %>% html_element("table") %>% html_table()
} # Cierro el for
|
/2021/laboratorios/LAB08/scripts/adicionales/script-topK-AZlyrics.R
|
no_license
|
dm-uba/dm-uba.github.io
|
R
| false
| false
| 1,899
|
r
|
# Me conecto a la vista de la db mongo y traigo los datos que necesito
# db.createView("mas_escuchados_2020", "charts", [
# {$match: {week_start: {$regex: "^2020.*"}}},
# {$sort: { Streams: -1 }}
# ])
library(mongolite)
library(stringi)
conx_escuchados = mongo(collection = "mas_escuchados_2020", db = "DMUBA_SPOTIFY")
df_escuchados <- conx_escuchados$find()[, c(2:4)]
# Genero una conexión a una nueva colección denominada lyrics (para las letras)
conx_lyrics = mongo(collection = "lyrics", db = "DMUBA_SPOTIFY")
# Me quedo con la fila de cada canción con más Streams y lo ordeno en forma decreciente
df_escuchados = aggregate(Streams ~ ., data=df_escuchados, FUN=max)
df_escuchados <- df_escuchados[order(df_escuchados$Streams, decreasing = TRUE),]
# Limito a los K más escuchados de 2020
K = 20
df_top_K <- head(df_escuchados, K)
df_top_K$Track_Name = tolower(stri_trans_general(df_top_K$Track_Name, 'latin-ascii'))
df_top_K$Artist = tolower(stri_trans_general(df_top_K$Artist, 'latin-ascii'))
# Cargo el paquete para el scraping e instancio la URL de búsqueda de azlyrics.com
library(rvest)
search_url = "https://search.azlyrics.com/search.php?q="
# Genero el for para recorrer las K canciones para recuperar la letra
K = 1 # Esta instanciación es mientras dure la puesta a punto del script
for (i in 1:K) {
# Como token de búsqueda uno artista y tema y separo por '+'
tokens_search = paste(df_top_K$Track_Name[i], df_top_K$Artist[i])
tokens_search = gsub(" ", "+", tokens_search)
# Uno la url para busqueda con los tokens
search_url = paste0(search_url, tokens_search)
# Tomo la url de búsqueda y recupero el html
html_data <- read_html(search_url)
results_search = html_data %>% html_element("table") %>% html_table()
} # Cierro el for
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fishSim_dev.R
\name{archive_dead}
\alias{archive_dead}
\title{take dead individuals and copy them to an archive}
\usage{
archive_dead(indiv = mort(), archive = make_archive())
}
\arguments{
\item{indiv}{A matrix of individuals, as from makeFounders(), move(), mate(), or mort().}
\item{archive}{A matrix of individuals, probably from make_archive() or a previous call of
archive_dead().}
}
\description{
For larger simulations, the matrix 'indiv' may grow very large and slow down the simulation.
In these cases, run-times may be improved by periodically moving dead individuals into an
archive that is read and written less frequently than 'indiv'.
}
\examples{
archive <- make_archive()
indiv <- makeFounders()
ages <- min(indiv[,8]):200
ageMort <- 0.1 + (0.2*1/(ages+1)) ## placeholder ageMort with (extreme) negative senescence
stocks <- c(0.3,0.3,0.4) ## matches defaults in makeFounders
admix.m <- matrix(NA, nrow = length(stocks), ncol = length(stocks))
for (i in 1:nrow(admix.m)) {
admix.m[i,] <- stocks*stocks[i]
}
indiv <- makeFounders()
for(k in 1:30) {
indiv <- move(indiv = indiv, moveMat = admix.m)
indiv <- mate(indiv = indiv, osr = c(0.55,0.45), year = k)
indiv <- mort(indiv = indiv, type = "age", ageMort = ageMort, year = k)
indiv <- birthdays(indiv = indiv)
archive <- archive_dead(indiv = indiv, archive = archive) # archives a copy of dead animals'
# data
indiv <- remove_dead(indiv = indiv) # actually removes the dead from 'indiv'.
}
}
\seealso{
[fishSim::remove_dead(), fishSim::make_archive()]
}
|
/man/archive_dead.Rd
|
permissive
|
SMBaylis/fishSim
|
R
| false
| true
| 1,687
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fishSim_dev.R
\name{archive_dead}
\alias{archive_dead}
\title{take dead individuals and copy them to an archive}
\usage{
archive_dead(indiv = mort(), archive = make_archive())
}
\arguments{
\item{indiv}{A matrix of individuals, as from makeFounders(), move(), mate(), or mort().}
\item{archive}{A matrix of individuals, probably from make_archive() or a previous call of
archive_dead().}
}
\description{
For larger simulations, the matrix 'indiv' may grow very large and slow down the simulation.
In these cases, run-times may be improved by periodically moving dead individuals into an
archive that is read and written less frequently than 'indiv'.
}
\examples{
archive <- make_archive()
indiv <- makeFounders()
ages <- min(indiv[,8]):200
ageMort <- 0.1 + (0.2*1/(ages+1)) ## placeholder ageMort with (extreme) negative senescence
stocks <- c(0.3,0.3,0.4) ## matches defaults in makeFounders
admix.m <- matrix(NA, nrow = length(stocks), ncol = length(stocks))
for (i in 1:nrow(admix.m)) {
admix.m[i,] <- stocks*stocks[i]
}
indiv <- makeFounders()
for(k in 1:30) {
indiv <- move(indiv = indiv, moveMat = admix.m)
indiv <- mate(indiv = indiv, osr = c(0.55,0.45), year = k)
indiv <- mort(indiv = indiv, type = "age", ageMort = ageMort, year = k)
indiv <- birthdays(indiv = indiv)
archive <- archive_dead(indiv = indiv, archive = archive) # archives a copy of dead animals'
# data
indiv <- remove_dead(indiv = indiv) # actually removes the dead from 'indiv'.
}
}
\seealso{
[fishSim::remove_dead(), fishSim::make_archive()]
}
|
testlist <- list(a = 33816326L, b = -134217984L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131637-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 125
|
r
|
testlist <- list(a = 33816326L, b = -134217984L, x = integer(0))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
# install.packages('formatR') library(formatR)
# install.packages('gWidgetsRGtk2') tidy_app()
# formatR::tidy_app()
#' Title get_xy_from_DATA_C2
#'
#' @param DATA Full data matrix, includes all observations for all the variables
#' @param META_DATA Need to have at least 2 columns, one with all variables name, another one which indicate
#' the type of each variable (CM, DX, PB)
#'
#' @return a list of important variables
#'
#' @export
#'
#' @examples
#' # x <- get_xy_from_DATA_C2(DATA, META_DATA)[[1]]
#' # y <- get_xy_from_DATA_C2(DATA, META_DATA)[[2]]
get_xy_from_DATA_C2 <- function(DATA, META_DATA) {
# DATA META_DATA
x <- DATA[, META_DATA$varName[META_DATA$varCategory == "CM"]]
y <- DATA[, META_DATA$varName[META_DATA$varCategory == "DX"]]
list(x = x, y = y)
}
####################
# Feature Selection #
#' Title Features Selection
#'
#' @param x Data matrix
#' @param y Dependent variable
#' @param method The method to be used for the feature selection: Random forest, AIC, AIC with MSFDR or BIC
#' @param ... further arguments to be passed to or from other methods
#'
#' @return a list of important variables
#'
#' @export
#'
#' @examples
#' # feature_selection(x, y, method='RF')
#' # feature_selection(x[, 1:30], y, method='BIC')
#' # feature_selection(x, y, method='FDR_screening')
feature_selection <- function(x, y, method = "RF", ...) {
if (method == "RF") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_RF,
...) # ('...' : p)
}
if (method == "AIC_MSFDR") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_AIC_MSFDR,
...) # ('...' : q, print.the.steps)
}
if (method == "BIC") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_BIC,
...) # ('...' : nbest, nvmax, nmin, plot)
}
if (method == "AIC") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_AIC)
}
if (method == "FDR_screening") {
output <- Feature_Selection_dummy_regressions(x, y, FDR_selection,
...) # ('...' : q, eta)
}
if (method == "LASSO") {
output <- Feature_Selection_dummy_regressions(x, y, LASSO_selection)
}
return(output)
}
#' Finds a subset of variables based on all dummy regressions
#' Title Feature Selection Dummy Regression
#'
#' @param x Data matrix
#' @param y Dependent variable
#' @param FUN Indicating which method to use for feature selection
#' @param ... further arguments to be passed to or from other methods
#'
#' @return a vector with the names of the important variables
#' @export
#'
#' @examples
#' Feature_Selection_dummy_regressions(x, y, Feature_Selection_RF)
#'
Feature_Selection_dummy_regressions <- function(x, y, FUN, ...) {
u_y <- unique(y)
selected_variables <- list()
for (i in seq_along(u_y)) {
dummy_y <- as.numeric(y == u_y[i])
# FUN(x, y, ...)
selected_variables[[i]] <- FUN(x, dummy_y, ...)
}
# Union of all selected variables
unique(unlist(selected_variables))
}
###################################
# Feature Selection - sub-functions #
### Random Forests ###
#' Title Feature Selection Using Random Forest
#'
#' @param x Data matrix
#' @param y Categorial dependent variable (factor)
#' @param p Precentage of the number of variables to be chosen from x. Default value is 0.1.
#' @return list of p precentage of the variables chosen by their Gini importance index.
#'
#' @export
#'
#' @examples
#' # Feature_Selection_RF(x, y, p = 0.1)
#'
Feature_Selection_RF <- function(x, y, p = 0.1) {
library(randomForest)
if (!is.factor(y)) {
warning("y is not a factor - but was coerced into one.")
y <- as.factor(y)
}
rf_DX_by_CM <- randomForest(y ~ ., data = x, importance = TRUE, proximity = TRUE)
var_import <- importance(rf_DX_by_CM)[, "MeanDecreaseAccuracy"]
m <- round(dim(x)[2] * p) # We'll save just 10% of the variables, the precentage can be changed
subset_vars <- sort(var_import, decreasing = TRUE)[1:m] # Sort the variables by their Gini importance index
important_var_RF <- names(subset_vars)
return(unlist(important_var_RF))
}
### BIC ###
#' Title Feature Selection Using BIC
#'
#' @param x Data matrix
#' @param y response vector (must be numeric?)
#' @param nbest number of subsets of each size to record
#' @param nvmax maximum size of subsets to examine
#' @param nmin number of minimum varibles to be included in the suggested final model
#' @param plot.BIC if TRUE (default) the function plots a table of models showing which variables are in each model.
#' The models are ordered by the specified model selection statistic.
#' @return
#' vector with the names of variables of the model with minimum BIC between the models including more then 'nmin' variables' of regsubsets object
#' @export
#'
#' @examples
#' # Feature_Selection_BIC(x[, 1:30], y, nbest=1, nvmax=5, plot.BIC=TRUE, nmin=4)
Feature_Selection_BIC <- function(x, y, nbest = 1, nvmax = 12, nmin = 4,
plot.BIC = FALSE) {
library(leaps)
library(car)
fulldata <- data.frame(x, y) # Creating one joint data.frame of the data
RET <- regsubsets(y ~ ., data = fulldata, nbest = nbest, nvmax = nvmax,
really.big = TRUE)
# if (plot.BIC) { plot(RET, scale = 'bic') }
summary_RET <- summary(RET) # Saving the summary of the rugsubsets output
help_mat <- matrix(as.numeric(summary_RET$which), nrow = (nvmax * nbest),
ncol = (dim(x)[2] + 1)) # Which variables were chosen for each model
num_var_each_model <- apply(help_mat, 1, sum) # Counting the number of variables chosen for each model
chosen_models <- summary_RET$bic[which(num_var_each_model >= nmin)] # Saving the BIC value of the models which includes more then 'nmin' variables
ind_model_min_BIC <- which(chosen_models == min(chosen_models)) # Which model with more then 3 variables have the minimum BIC
return(unlist(colnames(x)[which(help_mat[ind_model_min_BIC, ] == 1) -
1]))
}
### AIC with FDR ###
#' Title Forward Selection Using AIC Criteria and MSFDR Procedure
#'
#' @param minimal.lm lm function output of model which includes an intercept
#' @param maximal.lm lm function output of model which not includes an intercept
#' @param q Significant level. default as 0.05
#' @param print.the.steps if TRUE the Lambda, model size, and final model at each iteration will be printed;
#' Default as FALSE
#' @param print.running.time If TRUE the running time will be printed, it is equal to the value of print.the.steps
#' Default as False.
#' @return
#' Final model, running time, summary of AIC_MSFDR object
#' @export
#'
#' @examples
#' # Feature_Selection_AIC_MSFDR(x, y, q = 0.5, print.the.steps = FALSE)
#'
MSFDR <- function(minimal.lm, maximal.lm, q, print.the.steps, print.running.time = print.the.steps) {
# computes forward model selection using the multiple stage FDR
# controlling procedure (MSFDR)
if (!(class(minimal.lm) == "lm" & class(maximal.lm) == "lm")) {
print("one of the models you entered aren't linear models (lm), please try fitting lm only")
break
}
if (print.running.time)
time <- proc.time()
library(MASS)
algorithm.direction <- "forward" # always forward
the.scope <- list(lower = minimal.lm, upper = maximal.lm)
trace.stepAIC <- ifelse(print.the.steps, 1, 0)
iteration.number <- 1
m <- extractAIC(maximal.lm)[1] - 1 # check if the full model should include the intercept or not !!!!!!
i <- max(extractAIC(minimal.lm)[1] - 1, 1) # so if the model is with intercept only, the i size won't be 0.
# q = .05 # default
Lambda <- qnorm((1 - 0.5 * q * i/(m + 1 - i * (1 - q))))^2
if (print.the.steps) {
print(paste("Starting Lambda is: ", Lambda))
}
# first step of the algorithm
new.lm <- stepAIC(minimal.lm, direction = algorithm.direction, scope = the.scope,
k = Lambda, trace = trace.stepAIC)
new.lm.model.size <- extractAIC(new.lm)[1] - 1
while (new.lm.model.size > i) {
iteration.number <- iteration.number + 1
if (print.the.steps) {
print("=========================================")
print("=========================================")
print(paste("iteration number: ", iteration.number))
print(paste("current model size is:", new.lm.model.size, ">",
i, " (which is bigger then the old model size)"))
}
i <- new.lm.model.size
Lambda <- qnorm((1 - 0.5 * q * i/(m + 1 - i * (1 - q))))^2
if (print.the.steps) {
print(paste("new Lambda is: ", Lambda))
}
new.lm <- stepAIC(new.lm, direction = algorithm.direction, scope = the.scope,
k = Lambda, trace = trace.stepAIC)
new.lm.model.size <- extractAIC(new.lm)[1] - 1
}
if (print.the.steps) {
print("=========================================")
print("=========================================")
print("=========================================")
print("The final model is: ")
print(new.lm$call)
}
if (print.running.time) {
print("")
print("Algorithm running time was:")
print(proc.time() - time)
}
return(new.lm)
}
# TODO: MSFDR does NOT (!!!) work with non-numeric values. Using it for
# factors, will produce very wrong results It should be considered if
# to extend it to also work with factors (e.g.: through multinomial
# regression)
Feature_Selection_AIC_MSFDR <- function(x, y, q = 0.05, print.the.steps = FALSE) {
y <- as.numeric(y)
fulldata <- data.frame(x, y = y)
# Creating one joint data.frame of the data defining the smallest and
# largest lm we wish to progress through
smallest_linear_model <- lm(y ~ +1, data = fulldata)
largest_linear_model <- lm(y ~ ., data = fulldata)
# Implementing the MSFDR functions (with q = 0.05)
AIC_MSDFR <- MSFDR(minimal.lm = smallest_linear_model, maximal.lm = largest_linear_model,
q, print.the.steps)
sum <- summary(AIC_MSDFR) # Saving the summary of the AIC.MSFDR procedure
important_var_FDR <- which(!is.na(AIC_MSDFR$coeff))
important_var_FDR <- names(important_var_FDR)
return(unlist(important_var_FDR[2:length(important_var_FDR)]))
}
### AIC without FDR ###
#' Title Feature Selection Using AIC
#'
#' @param x data matrix
#' @param y categorical variable (factor)
#'
#' @return
#' Returns a list with two items. The first is a list of important variables. The second
#' is NA if print.summary.AIC==FALSE or the summary of AIC if TRUE.
#' @export
#'
#' @examples
#' # Feature_Selection_AIC(x, y)
Feature_Selection_AIC <- function(x, y) {
library(MASS)
y <- as.numeric(y)
fulldata <- data.frame(x, y) # Creating one joint data.frame of the data
smallest_linear_model <- lm(y ~ +1, data = fulldata)
largest_linear_model <- lm(y ~ . + 1, data = fulldata)
AIC_procedure <- stepAIC(object = smallest_linear_model, scope = list(lower = smallest_linear_model,
upper = largest_linear_model), direction = "forward", trace = FALSE)
important_var_AIC <- names(AIC_procedure$coeff)
return(unlist(important_var_AIC[2:length(important_var_AIC)])) # Extracting the print of 'Intercept'
}
### FDR Selection (F and Chi-sq tests)
#' Title Feature Selection Using FDR selection
#'
#' @param x data matrix
#' @param y categorical variable (factor)
#' @param q adjusted p value threshold level. The chosen variables will have adjusted p value smaller than q
#' @param eta eta squared threshold, the chosen variables will have eta value greater then eta.
#'
#' @return
#' Returns a list of the selected variables
#' @export
#'
#' @examples
#' # FDR_selection(x, y, q = 0.001, eta = 0.1)
FDR_selection <- function(x, y, q = 0.05, eta = 0.1) {
if (!is.factor(y)) {
warning("y is not a factor - but was coerced into one.")
y <- as.factor(y)
}
eta_squared <- rep(NA, dim(x)[2])
original_p_val <- rep(NA, dim(x)[2])
for (i in 1:dim(x)[2]) {
# variable is discrete
if (sum(floor(x[, i]) == x[, i]) == dim(x)[2])
{
original_p_val[i] <- chisq.test(x = x[, i], y)$p.value
eta_squared[i] <- summary.lm(lm(as.factor(x[, i]) ~ as.factor(y)))$r.squared
} # variable is not discrete
else {
anova_model <- anova(lm(x[, i] ~ y + 0))
original_p_val[i] <- anova_model[[5]][1]
eta_squared[i] <- summary.lm(lm(x[, i] ~ as.factor(y)))$r.squared
}
}
names(original_p_val) <- colnames(x)
adjust_p_val <- p.adjust(original_p_val, method = "BH")
is_smaller <- ifelse(adjust_p_val < q & eta_squared > eta, 1, 0)
screening <- data.frame("var" = names(original_p_val), original_p_val, adjust_p_val,
eta_squared, is_smaller, row.names = c(1:length(original_p_val)))
keep_vars <- screening$var[which(is_smaller == 1)]
screening <- screening[order(original_p_val), ]
return(as.character(keep_vars))
}
#' Title LASSO
#'
#' @param x Data matrix
#' @param y Dependent variable
#'
#' @return
#' plot and table which advises how many clusters should be
#'
#' @export
#'
#' @examples
#' # LASSO_selection(x, y)
# LASSO_selection<-function(x, y) { cvfit <- cv.glmnet(as.matrix(x), y)
# important_var_LASSO <- as.matrix(coef(cvfit, s = 'lambda.1se'))
# important_var_LASSO <- important_var_LASSO[important_var_LASSO[, 1]
# != 0, ] important_var_LASSO <-
# important_var_LASSO[names(important_var_LASSO) != '(Intercept)']
# reduced_x <- x[, names(important_var_LASSO)] return(reduced_x) }
#########################################################
# Deciding on number of clusters and clustering the data #
#' Title Deciding on Number of Clusters
#'
#' @param x Data matrix
#' @param method character string indicating how the “optimal” number of clusters: Euclidean (default), Manhattan,
#' heirarchical euclidean or heirarchcal manhattan
#' @param K.max the maximum number of clusters to consider, must be at least two. Default value is 10.
#' @param B integer, number of Monte Carlo (“bootstrap”) samples. Default value is 100.
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample. Default value is FALSE.
#' @param scale if TRUE (default) the data matrix will be scaled.
#' @param diss if TRUE (default as FALSE) x will be considered as a dissimilarity matrix.
#' @param cluster.only if true (default as FALSE) only the clustering will be computed and returned, see details.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return
#' plot and table which advises how many clusters should be
#'
#' @export
#'
#' @examples
#' # number_of_clusters(subx, B=50, method='Euclidean')
#'
number_of_clusters <- function(x, method = "Euclidean", K.max = 10, B = 100,
verbose = FALSE, plot.num.clus = TRUE, scale = TRUE, diss = FALSE,
cluster.only = TRUE) {
# scale
if (scale) {
x <- scale(x)
}
# TODO: what we SHOULD do is pass Euclidean/Man to the functions, as
# well as hclust vs pam...
if (method == "Euclidean") {
k_clusters <- k_euclidean(x, K.max, B, verbose, plot.num.clus)
}
if (method == "Manhattan") {
k_clusters <- k_manhattan(x, K.max, diss, B, cluster.only, verbose,
plot.num.clus)
}
if (method == "hclust_Euclidean") {
k_clusters <- khclust_euc(x, K.max, B, verbose, plot.num.clus)
}
if (method == "hclust_Manhattan") {
k_clusters <- khclust_man(x, K.max, B, verbose, plot.num.clus)
}
return(list(k_clusters))
}
#' Title Gap statisic with k-medoids euclidean
#'
#' @param x Data matrix
#' @param K.max the maximum number of clusters to consider, must be at least two. Default value is 10.
#' @param B integer, number of Monte Carlo (“bootstrap”) samples. Default value is 100.
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample. Default value is FALSE.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return the clusGap function' values
#' @export
#'
#' @examples
#' # k_euclidean(subx, K.max=8, B=50, verbose=FALSE, plot.num.clus=TRUE)
#'
k_euclidean <- function(x, K.max, B, verbose, plot.num.clus) {
library(cluster)
library(clusterCrit)
clusGap_best <- cluster::clusGap(x, FUN = pam, K.max = K.max, B, verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap Statistic for k-medoids Euclidean")
}
# # Silhouette Criteria for k-medoids sil <- c(rep(NA, 10)) sil[1] <- 0
# max_sil <- 0 clust_num_sil <- 0 for (i in 2:10) { clust <- pam(x, i,
# diss = FALSE) sil[i] <- intCriteria(x, clust$cluster, 'Silhouette')
# if (as.numeric(sil[i]) > max_sil) { max_sil_means <- sil[i]
# clust_num_sil <- i } } if (plot.num.clus) { plot(as.numeric(sil),
# type = 'l', main = 'Silhouette criteria k-medoids Euclidean') }
# return(list(clusGap_best, clust))
return(list(clusGap_best))
}
#' Title Gap statisic with k-medoids manhattan
#'
#' @param x data matrix
#' @param K.max positive integer specifying the number of clusters, less than the number of observations.
#' Default value is 10.
#' @param diss if TRUE (default as FALSE) x will be considered as a dissimilarity matrix
#' @param B integer, number of Monte Carlo (“bootstrap”) samples. Default value is 100.
#' @param cluster.only if true (default) only the clustering will be computed and returned, see details.
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample. Default as FALSE.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#' @param ... another objects of pam function
#'
#' @return clusGap function' output
#' @export
#'
#' @examples
#' # k_manhattan (subx, K.max = 8, diss=FALSE, B = 50, cluster.only = TRUE, verbose = FALSE)
#'
k_manhattan <- function(x, K.max, diss, B, cluster.only, verbose, plot.num.clus) {
library(cluster)
library(clusterCrit)
library(magrittr)
library(fpc)
pam_1 <- function(x, k, ...) {
clusters <- x %>% pam(k = k, diss = diss, metric = "manhattan",
cluster.only = cluster.only)
list(clusters = clusters)
}
set.seed(40)
clusGap_best <- clusGap(x, FUN = pam_1, K.max = K.max, B = B, verbose = verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap Statistic for k-medoids Manhattan")
}
# #Silhouette criteria with k-medoids manhattan
# sil_med_m<-c(rep(NA,10)) sil_med_m[1]<-0 max_sil_med_m<-0
# clust_num_sil_med_m<-0 for (i in 2:10) {
# clust_med_m<-pam(Scaled_Reduced_CM_trans,i,diss=FALSE,metric='manhattan')
# sil_med_m[i]<-intCriteria(Scaled_Reduced_CM_trans,clust_med_m$cluster,'Silhouette')
# if (as.numeric(sil_med_m[i]) > max_sil_med_m) {
# max_sil_med_m<-sil_med_m[i] clust_num_sil_med_m<-i } }
# plot(as.numeric(sil_med_m),type='l',main='Silhouette criteria,
# k-medoids manhattan')
return(list(clusGap_best))
}
#' Title Gap statistics for hclust Euclidean
#'
#' @param x data matrix
#' @param K.max positive integer specifying the number of clusters, less than the number of observations.
#' @param B integer, number of Monte Carlo (“bootstrap”) samples
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return the clusGap function output
#' @export
#'
#' @examples
#' # khclust_euc(subx,K.max=10, B=60, verbose = FALSE, plot.num.clus=TRUE )
#'
khclust_euc <- function(x, K.max, B, verbose, plot.num.clus) {
hclust_k_euc <- function(x, k, ...) {
library(magrittr)
library(cluster)
clusters <- x %>% dist %>% hclust %>% cutree(k = k)
list(clusters = clusters)
}
clusGap_best <- clusGap(x, FUN = hclust_k_euc, K.max = K.max, B = B,
verbose = verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap statistic, hclust Euclidean")
}
return(clusGap_best)
}
#' Title Gap statistics for hclust Manhattan
#'
#' @param x data matrix
#' @param K.max positive integer specifying the number of clusters, less than the number of observations.
#' Default value is 10
#' @param B integer, number of Monte Carlo (“bootstrap”) samples. Default value is 100.
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample. Default value is FALSE.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return the clusGap function output
#' @export
#'
#' @examples
#' # khclust_man(subx, K.max=8, B=60, verbose=FALSE, plot.num.clus=TRUE)
#'
khclust_man <- function(x, K.max, B, verbose, plot.num.clus) {
hclust_k_man <- function(x, k, ...) {
library(magrittr)
clusters <- x %>% dist(method = "manhattan") %>% hclust %>% cutree(k = k)
list(clusters = clusters)
}
clusGap_best <- clusGap(x, FUN = hclust_k_man, K.max = K.max, B = B,
verbose = verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap statistic, hclust Manhattan")
}
return(list(clusGap_best))
}
#####################
# Clustering the data #
#' Title Clustering
#'
#' @param x data matrix
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param method Indicating which method to use for clustering. Default is 'Euclidean'.
#' @param plot.clustering if TRUE (default) a 2-dimensional “clusplot” plot will be printed
#'
#' @return vector withnew assigned clusters
#' @export
#'
#' @examples
#' clustering(subx, k.gap = 5, method='Euclidean', plot.clustering=TRUE)
#'
clustering <- function(x, k.gap = 2, method = "Euclidean", plot.clustering = FALSE) {
if (method == "Euclidean") {
clusters <- cluster_euclidean(x, k.gap, plot.clustering)
}
if (method == "Manhattan") {
clusters <- cluster_manhattan(x, k.gap, plot.clustering)
}
if (method == "Heuclidean") {
clusters <- cluster_euclidean(x, k.gap, plot.clustering)
}
if (method == "Hmanhattan") {
clusters <- cluster_manhattan(x, k.gap, plot.clustering)
}
return(clusters)
}
### Euclidean ###
#' Title Clustering Using Euclidean distances
#'
#' @param x data matrix
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param plot.clustering if TRUE (default) a 2-dimensional “clusplot” plot will be printed
#'
#' @return
#' vector with the new assigned clusters
#'
#' @export
#'
#' @examples
#' # cluster_euclidean(subx, k.gap = 5, plot.clustering = TRUE)
#'
cluster_euclidean <- function(x, k.gap, plot.clustering) {
library(cluster)
pam_4 <- pam(x, k.gap, diss = FALSE)
if (plot.clustering) {
clusplot(x, pam_4$cluster, color = TRUE, main = c("k-medoids,",
paste = k.gap, "clusters"))
}
clusters <- pam_4$cluster
return(unlist(clusters))
}
### Manhattan ###
#' Title Clustering Using Manhattan Distances
#'
#' @param x data matrix
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param plot.clustering if TRUE (default) a 2-dimensional “clusplot” plot will be printed
#'
#' @return
#' vector with the new assigned clusters
#' @export
#'
#' @examples
#' # cluster_manhattan(subx, k.gap=4, plot.clustering=TRUE)
#'
cluster_manhattan <- function(x, k.gap, plot.clustering) {
pam_3_man <- pam(x, k.gap, diss = FALSE, metric = "manhattan")
if (plot.clustering) {
clusplot(x, pam_3_man$cluster, color = TRUE, main = c("k-medoids,manhattan",
paste(k.gap), "clusters"))
}
clusters <- pam_3_man$cluster
return(unlist(clusters))
}
### Hierarchical clustering euclidean ###
#' Title Deciding on number of clusters by using Hierarchical clustering euclidean
#'
#' @param x data matrix
#' @param y Dependent variable
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param plot.clustering if TRUE (default) a 2-dimensional “clusplot” plot will be printed
#'
#'
#' @return
#' summary table of the distribution to clusters
#' @export
#'
#' @examples
#' hclust_euc(subx, k.gap = 5, plot.clustering=TRUE)
#'
hclust_euc <- function(x, k.gap, plot.clustering) {
d <- dist(x, method = "euclidean")
fit_best <- hclust(d, method = "ward.D")
if (plot.clustering) {
plot(fit_best, main = c("hclust , euclidean,", paste(k.gap), " clusters"))
}
groups_best_4 <- cutree(fit_best, k = k.gap)
rect.hclust(fit_best, k = k.gap, border = "blue")
clusters <- groups_best_4
return(unlist(clusters))
}
### Hierarchical clustering manhattan ###
#' Title Deciding on number of clusters by Hierarchical clustering manhattan
#'
#' @param x data matrix
#' @param plot.clustering if TRUE (default) a 2-dimensional 'clusplot' plot will be printed
#'
#' @return
#' a list of two variables the hclust function description and a summary table
#' of the distribution to clusters
#' @export
#'
#' @examples
#' hclust_man(subx, k.gap = 5, plot.clustering=TRUE)
#'
hclust_man <- function(x, k.gap, plot.clustering) {
d_man <- dist(x, method = "manhattan")
fit_best_man <- hclust(d_man, method = "ward.D")
if (plot.clustering) {
plot(fit_best_man, main = c("hclust, manhattan,", paste(k.gap),
"7 clusters"))
}
groups_best_4_man <- cutree(fit_best_man, k = k.gap)
rect.hclust(fit_best_man, k = k.gap, border = "red")
clusters <- groups_best_4_man
return(unlist(clusters))
}
###############
# 3 C functions #
#' Title C2
#'
#' @param x data matrix
#' @param y Dependent variable
#' @param feature_selection_method method for the feature selection of the clinical measurements stage. Default RF.
#' @param num_clusters_method method for the choosing number of clusters by using the clinical measurements. Default Euclidean.
#' @param k number of clusters to use. If missing, we use a detection method. Defaukt as NULL
#' @param clustering_method method for clustering using the reduced clinical measures. Default is Hmanhattan,
#'
#' @return a list of three variables:
#' 1) vector with the names of the omportant variables chosen.
#' 2) number of classes that will be used for clustering
#' 3) vector of the new assigned clusterst
#'
#' @export
#'
#' @examples
#' resultC2 <- C2(x, y, feature_selection_method='RF', num_clusters_method='Manhattan', clustering_method='Manhattan', plot.num.clus=TRUE, plot.clustering=TRUE)
#' C2(x, y, feature_selection_method='BIC', num_clusters_method='Manhattan', clustering_method='Hmanhattan', plot.num.clus=TRUE, plot.clustering=FALSE, nbest=1, nvmax=8, B=50)
C2 <- function(x, y, feature_selection_method, num_clusters_method, k = NULL,
clustering_method, ...) {
# Feature selection
imp_var <- feature_selection(x, y, method = feature_selection_method)
# print(imp_var) CM_final_vars <- imp_var[[1]][2] # Extracting a list
# of inportant CM variables
subx <- x[, unlist(imp_var)]
# Deciding on number of clusters
if (missing(k)) {
num_clust <- number_of_clusters(x = subx, method = num_clusters_method)
print(num_clust)
# library(car)
user_choise <- function() {
k <- readline(prompt = paste("Enter the chosen number of clusters",
":\n"))
k <- as.numeric(k)
return(k)
}
num_clust <- user_choise()
} else {
num_clust <- k
}
# Final clustering
final_cluster <- clustering(subx, k.gap = num_clust)
# print(final_cluster)
return(list(imp_var, num_clust, final_cluster))
}
#' Title get_PBx_from_DATA_C3
#'
#' @param DATA Full data matrix, includes all observations for all the variables
#' @param META_DATA Need to have at least 2 columns, one with all variables name, another one which indicate
#' the type of each variable (CM, DX, PB)
#'
#' @return a list of important variables
#'
#' @export
#'
#' @examples
#' # PBx <- get_PBx_from_DATA_C3(DATA, META_DATA)
#'
get_PBx_from_DATA_C3 <- function(DATA, META_DATA) {
x <- DATA[, META_DATA$varName[META_DATA$varCategory == "PB"]]
return(PBx = x)
}
#' Title C3
#'
#' @param PBx data matrix
#' @param newy new assigned clusters, results from C2.
#' @param feature_selection_method method for the feature selection of the Potential Bio-Markers
#' @param classification_method method for classification using the potential bio-markers
#'
#' @return a list of two variables:
#' 1) vector with the names of important variables chosen
#' 2) classification result for each observation
#' @export
#'
#' @examples
#' C3(PBx, newy, feature_selection_method='RF', classification_method='RF')
#'
C3 <- function(PBx, newy, feature_selection_method, classification_method) {
# Feature selection if(!factor(newy)){ newy <- as.factor(newy) }
imp_var <- feature_selection(PBx, newy, method = feature_selection_method)
sub_PBx <- PBx[, imp_var]
# Classification
classification <- classification_fun(PBx, newy, method = classification_method)
return(list(imp_var, unname(classification)))
}
####################################### Potential biomarkers classification #
#' Title Classification for the potential Biomarkers
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#' @param method Classification method for the function to use
#'
#' @return Predicted values for each observation
#'
#' @export
#'
#' @examples
#' # classification_fun(PBx, newy, method='RF')
classification_fun <- function(PBx, newy, method = "RF") {
if (method == "RF") {
output <- RF_classify(PBx, newy)
}
if (method == "RF_downsampling") {
output <- RF_one_by_one(PBx, newy)
}
if (method == "CART_information") {
output <- cart_function(PBx, newy, criteria = "information")
}
if (method == "CART_gini") {
output <- cart_function(PBx, newy, criteria = "gini")
}
return(output)
}
### Random Forest Without Down Sampling ###
#' Title Classification Using Random Forest Without Down Sampling
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#'
#' @return The predicted values for each observation
#'
#' @export
#'
#' @examples
#' # RF_classify(PBx, newy)
library(randomForest)
RF_classify <- function(PBx, newy) {
if (!is.factor(newy)) {
warning("y is not a factor - but was coerced into one.")
newy <- as.factor(newy)
}
fulldata <- data.frame(PBx, newy)
rf_clus_PB <- randomForest(newy ~ ., data = fulldata, ntree = 50)
model <<- rf_clus_PB
return(rf_clus_PB$predicted)
}
### Random forest with down sampling ###
#' Title Classification Using Random Forest Without Down Sampling
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#'
#' @return a list of two variables: the hclust function description and a summary table
#' of the distribution to clusters
#' @export
#'
#' @examples
#' # RF_one_by_one(PBx, newy)
RF_one_by_one <- function(PBx, newy) {
if (!is.factor(newy)) {
warning("y is not a factor - but was coerced into one.")
newy <- as.numeric(as.factor(newy))
}
rflist_names <- paste("cluster", c(1:length(unique(newy))))
rflist <- sapply(rflist_names, function(x) NULL)
for (i in 1:length(unique(newy))) {
class_2 <- ifelse(newy == i, 1, 0)
nmin <- sum(class_2 == 1)
rflist[[i]] <- randomForest(factor(class_2) ~ ., data = PBx, ntree = 1000,
importance = TRUE, proximity = TRUE, sampsize = rep(nmin, 2))
}
return(rflist)
}
### CART ###
#' Title Classification Using CART
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#' @param criteria gini or information
#'
#' @return a list of two variables: the hclust function description and a summary table
#' of the distribution to clusters
#' @export
#'
#' @examples
#' # cart_function(PBx, newy, 'information')
cart_function <- function(PBx, newy, criteria = "gini") {
fulldata <- data.frame(PBx, newy)
cart <- rpart(newy ~ ., data = fulldata, method = "class", parms = list(split = criteria))
model <<- cart
pred <- predict(cart, type = "class")
return(pred)
}
|
/R/functions.R
|
no_license
|
HBPMedical/CCC
|
R
| false
| false
| 33,130
|
r
|
# install.packages('formatR') library(formatR)
# install.packages('gWidgetsRGtk2') tidy_app()
# formatR::tidy_app()
#' Title get_xy_from_DATA_C2
#'
#' @param DATA Full data matrix, includes all observations for all the variables
#' @param META_DATA Need to have at least 2 columns, one with all variables name, another one which indicate
#' the type of each variable (CM, DX, PB)
#'
#' @return a list of important variables
#'
#' @export
#'
#' @examples
#' # x <- get_xy_from_DATA_C2(DATA, META_DATA)[[1]]
#' # y <- get_xy_from_DATA_C2(DATA, META_DATA)[[2]]
get_xy_from_DATA_C2 <- function(DATA, META_DATA) {
# DATA META_DATA
x <- DATA[, META_DATA$varName[META_DATA$varCategory == "CM"]]
y <- DATA[, META_DATA$varName[META_DATA$varCategory == "DX"]]
list(x = x, y = y)
}
####################
# Feature Selection #
#' Title Features Selection
#'
#' @param x Data matrix
#' @param y Dependent variable
#' @param method The method to be used for the feature selection: Random forest, AIC, AIC with MSFDR or BIC
#' @param ... further arguments to be passed to or from other methods
#'
#' @return a list of important variables
#'
#' @export
#'
#' @examples
#' # feature_selection(x, y, method='RF')
#' # feature_selection(x[, 1:30], y, method='BIC')
#' # feature_selection(x, y, method='FDR_screening')
feature_selection <- function(x, y, method = "RF", ...) {
if (method == "RF") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_RF,
...) # ('...' : p)
}
if (method == "AIC_MSFDR") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_AIC_MSFDR,
...) # ('...' : q, print.the.steps)
}
if (method == "BIC") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_BIC,
...) # ('...' : nbest, nvmax, nmin, plot)
}
if (method == "AIC") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_AIC)
}
if (method == "FDR_screening") {
output <- Feature_Selection_dummy_regressions(x, y, FDR_selection,
...) # ('...' : q, eta)
}
if (method == "LASSO") {
output <- Feature_Selection_dummy_regressions(x, y, LASSO_selection)
}
return(output)
}
#' Finds a subset of variables based on all dummy regressions
#' Title Feature Selection Dummy Regression
#'
#' @param x Data matrix
#' @param y Dependent variable
#' @param FUN Indicating which method to use for feature selection
#' @param ... further arguments to be passed to or from other methods
#'
#' @return a vector with the names of the important variables
#' @export
#'
#' @examples
#' Feature_Selection_dummy_regressions(x, y, Feature_Selection_RF)
#'
Feature_Selection_dummy_regressions <- function(x, y, FUN, ...) {
u_y <- unique(y)
selected_variables <- list()
for (i in seq_along(u_y)) {
dummy_y <- as.numeric(y == u_y[i])
# FUN(x, y, ...)
selected_variables[[i]] <- FUN(x, dummy_y, ...)
}
# Union of all selected variables
unique(unlist(selected_variables))
}
###################################
# Feature Selection - sub-functions #
### Random Forests ###
#' Title Feature Selection Using Random Forest
#'
#' @param x Data matrix
#' @param y Categorial dependent variable (factor)
#' @param p Precentage of the number of variables to be chosen from x. Default value is 0.1.
#' @return list of p precentage of the variables chosen by their Gini importance index.
#'
#' @export
#'
#' @examples
#' # Feature_Selection_RF(x, y, p = 0.1)
#'
Feature_Selection_RF <- function(x, y, p = 0.1) {
library(randomForest)
if (!is.factor(y)) {
warning("y is not a factor - but was coerced into one.")
y <- as.factor(y)
}
rf_DX_by_CM <- randomForest(y ~ ., data = x, importance = TRUE, proximity = TRUE)
var_import <- importance(rf_DX_by_CM)[, "MeanDecreaseAccuracy"]
m <- round(dim(x)[2] * p) # We'll save just 10% of the variables, the precentage can be changed
subset_vars <- sort(var_import, decreasing = TRUE)[1:m] # Sort the variables by their Gini importance index
important_var_RF <- names(subset_vars)
return(unlist(important_var_RF))
}
### BIC ###
#' Title Feature Selection Using BIC
#'
#' @param x Data matrix
#' @param y response vector (must be numeric?)
#' @param nbest number of subsets of each size to record
#' @param nvmax maximum size of subsets to examine
#' @param nmin number of minimum varibles to be included in the suggested final model
#' @param plot.BIC if TRUE (default) the function plots a table of models showing which variables are in each model.
#' The models are ordered by the specified model selection statistic.
#' @return
#' vector with the names of variables of the model with minimum BIC between the models including more then 'nmin' variables' of regsubsets object
#' @export
#'
#' @examples
#' # Feature_Selection_BIC(x[, 1:30], y, nbest=1, nvmax=5, plot.BIC=TRUE, nmin=4)
Feature_Selection_BIC <- function(x, y, nbest = 1, nvmax = 12, nmin = 4,
plot.BIC = FALSE) {
library(leaps)
library(car)
fulldata <- data.frame(x, y) # Creating one joint data.frame of the data
RET <- regsubsets(y ~ ., data = fulldata, nbest = nbest, nvmax = nvmax,
really.big = TRUE)
# if (plot.BIC) { plot(RET, scale = 'bic') }
summary_RET <- summary(RET) # Saving the summary of the rugsubsets output
help_mat <- matrix(as.numeric(summary_RET$which), nrow = (nvmax * nbest),
ncol = (dim(x)[2] + 1)) # Which variables were chosen for each model
num_var_each_model <- apply(help_mat, 1, sum) # Counting the number of variables chosen for each model
chosen_models <- summary_RET$bic[which(num_var_each_model >= nmin)] # Saving the BIC value of the models which includes more then 'nmin' variables
ind_model_min_BIC <- which(chosen_models == min(chosen_models)) # Which model with more then 3 variables have the minimum BIC
return(unlist(colnames(x)[which(help_mat[ind_model_min_BIC, ] == 1) -
1]))
}
### AIC with FDR ###
#' Title Forward Selection Using AIC Criteria and MSFDR Procedure
#'
#' @param minimal.lm lm function output of model which includes an intercept
#' @param maximal.lm lm function output of model which not includes an intercept
#' @param q Significant level. default as 0.05
#' @param print.the.steps if TRUE the Lambda, model size, and final model at each iteration will be printed;
#' Default as FALSE
#' @param print.running.time If TRUE the running time will be printed, it is equal to the value of print.the.steps
#' Default as False.
#' @return
#' Final model, running time, summary of AIC_MSFDR object
#' @export
#'
#' @examples
#' # Feature_Selection_AIC_MSFDR(x, y, q = 0.5, print.the.steps = FALSE)
#'
MSFDR <- function(minimal.lm, maximal.lm, q, print.the.steps, print.running.time = print.the.steps) {
# computes forward model selection using the multiple stage FDR
# controlling procedure (MSFDR)
if (!(class(minimal.lm) == "lm" & class(maximal.lm) == "lm")) {
print("one of the models you entered aren't linear models (lm), please try fitting lm only")
break
}
if (print.running.time)
time <- proc.time()
library(MASS)
algorithm.direction <- "forward" # always forward
the.scope <- list(lower = minimal.lm, upper = maximal.lm)
trace.stepAIC <- ifelse(print.the.steps, 1, 0)
iteration.number <- 1
m <- extractAIC(maximal.lm)[1] - 1 # check if the full model should include the intercept or not !!!!!!
i <- max(extractAIC(minimal.lm)[1] - 1, 1) # so if the model is with intercept only, the i size won't be 0.
# q = .05 # default
Lambda <- qnorm((1 - 0.5 * q * i/(m + 1 - i * (1 - q))))^2
if (print.the.steps) {
print(paste("Starting Lambda is: ", Lambda))
}
# first step of the algorithm
new.lm <- stepAIC(minimal.lm, direction = algorithm.direction, scope = the.scope,
k = Lambda, trace = trace.stepAIC)
new.lm.model.size <- extractAIC(new.lm)[1] - 1
while (new.lm.model.size > i) {
iteration.number <- iteration.number + 1
if (print.the.steps) {
print("=========================================")
print("=========================================")
print(paste("iteration number: ", iteration.number))
print(paste("current model size is:", new.lm.model.size, ">",
i, " (which is bigger then the old model size)"))
}
i <- new.lm.model.size
Lambda <- qnorm((1 - 0.5 * q * i/(m + 1 - i * (1 - q))))^2
if (print.the.steps) {
print(paste("new Lambda is: ", Lambda))
}
new.lm <- stepAIC(new.lm, direction = algorithm.direction, scope = the.scope,
k = Lambda, trace = trace.stepAIC)
new.lm.model.size <- extractAIC(new.lm)[1] - 1
}
if (print.the.steps) {
print("=========================================")
print("=========================================")
print("=========================================")
print("The final model is: ")
print(new.lm$call)
}
if (print.running.time) {
print("")
print("Algorithm running time was:")
print(proc.time() - time)
}
return(new.lm)
}
# TODO: MSFDR does NOT (!!!) work with non-numeric values. Using it for
# factors, will produce very wrong results It should be considered if
# to extend it to also work with factors (e.g.: through multinomial
# regression)
Feature_Selection_AIC_MSFDR <- function(x, y, q = 0.05, print.the.steps = FALSE) {
y <- as.numeric(y)
fulldata <- data.frame(x, y = y)
# Creating one joint data.frame of the data defining the smallest and
# largest lm we wish to progress through
smallest_linear_model <- lm(y ~ +1, data = fulldata)
largest_linear_model <- lm(y ~ ., data = fulldata)
# Implementing the MSFDR functions (with q = 0.05)
AIC_MSDFR <- MSFDR(minimal.lm = smallest_linear_model, maximal.lm = largest_linear_model,
q, print.the.steps)
sum <- summary(AIC_MSDFR) # Saving the summary of the AIC.MSFDR procedure
important_var_FDR <- which(!is.na(AIC_MSDFR$coeff))
important_var_FDR <- names(important_var_FDR)
return(unlist(important_var_FDR[2:length(important_var_FDR)]))
}
### AIC without FDR ###
#' Title Feature Selection Using AIC
#'
#' @param x data matrix
#' @param y categorical variable (factor)
#'
#' @return
#' Returns a list with two items. The first is a list of important variables. The second
#' is NA if print.summary.AIC==FALSE or the summary of AIC if TRUE.
#' @export
#'
#' @examples
#' # Feature_Selection_AIC(x, y)
Feature_Selection_AIC <- function(x, y) {
library(MASS)
y <- as.numeric(y)
fulldata <- data.frame(x, y) # Creating one joint data.frame of the data
smallest_linear_model <- lm(y ~ +1, data = fulldata)
largest_linear_model <- lm(y ~ . + 1, data = fulldata)
AIC_procedure <- stepAIC(object = smallest_linear_model, scope = list(lower = smallest_linear_model,
upper = largest_linear_model), direction = "forward", trace = FALSE)
important_var_AIC <- names(AIC_procedure$coeff)
return(unlist(important_var_AIC[2:length(important_var_AIC)])) # Extracting the print of 'Intercept'
}
### FDR Selection (F and Chi-sq tests)
#' Title Feature Selection Using FDR selection
#'
#' @param x data matrix
#' @param y categorical variable (factor)
#' @param q adjusted p value threshold level. The chosen variables will have adjusted p value smaller than q
#' @param eta eta squared threshold, the chosen variables will have eta value greater then eta.
#'
#' @return
#' Returns a list of the selected variables
#' @export
#'
#' @examples
#' # FDR_selection(x, y, q = 0.001, eta = 0.1)
FDR_selection <- function(x, y, q = 0.05, eta = 0.1) {
if (!is.factor(y)) {
warning("y is not a factor - but was coerced into one.")
y <- as.factor(y)
}
eta_squared <- rep(NA, dim(x)[2])
original_p_val <- rep(NA, dim(x)[2])
for (i in 1:dim(x)[2]) {
# variable is discrete
if (sum(floor(x[, i]) == x[, i]) == dim(x)[2])
{
original_p_val[i] <- chisq.test(x = x[, i], y)$p.value
eta_squared[i] <- summary.lm(lm(as.factor(x[, i]) ~ as.factor(y)))$r.squared
} # variable is not discrete
else {
anova_model <- anova(lm(x[, i] ~ y + 0))
original_p_val[i] <- anova_model[[5]][1]
eta_squared[i] <- summary.lm(lm(x[, i] ~ as.factor(y)))$r.squared
}
}
names(original_p_val) <- colnames(x)
adjust_p_val <- p.adjust(original_p_val, method = "BH")
is_smaller <- ifelse(adjust_p_val < q & eta_squared > eta, 1, 0)
screening <- data.frame("var" = names(original_p_val), original_p_val, adjust_p_val,
eta_squared, is_smaller, row.names = c(1:length(original_p_val)))
keep_vars <- screening$var[which(is_smaller == 1)]
screening <- screening[order(original_p_val), ]
return(as.character(keep_vars))
}
#' Title LASSO
#'
#' @param x Data matrix
#' @param y Dependent variable
#'
#' @return
#' plot and table which advises how many clusters should be
#'
#' @export
#'
#' @examples
#' # LASSO_selection(x, y)
# LASSO_selection<-function(x, y) { cvfit <- cv.glmnet(as.matrix(x), y)
# important_var_LASSO <- as.matrix(coef(cvfit, s = 'lambda.1se'))
# important_var_LASSO <- important_var_LASSO[important_var_LASSO[, 1]
# != 0, ] important_var_LASSO <-
# important_var_LASSO[names(important_var_LASSO) != '(Intercept)']
# reduced_x <- x[, names(important_var_LASSO)] return(reduced_x) }
#########################################################
# Deciding on number of clusters and clustering the data #
#' Title Deciding on Number of Clusters
#'
#' @param x Data matrix
#' @param method character string indicating how the “optimal” number of clusters: Euclidean (default), Manhattan,
#' heirarchical euclidean or heirarchcal manhattan
#' @param K.max the maximum number of clusters to consider, must be at least two. Default value is 10.
#' @param B integer, number of Monte Carlo (“bootstrap”) samples. Default value is 100.
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample. Default value is FALSE.
#' @param scale if TRUE (default) the data matrix will be scaled.
#' @param diss if TRUE (default as FALSE) x will be considered as a dissimilarity matrix.
#' @param cluster.only if true (default as FALSE) only the clustering will be computed and returned, see details.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return
#' plot and table which advises how many clusters should be
#'
#' @export
#'
#' @examples
#' # number_of_clusters(subx, B=50, method='Euclidean')
#'
number_of_clusters <- function(x, method = "Euclidean", K.max = 10, B = 100,
verbose = FALSE, plot.num.clus = TRUE, scale = TRUE, diss = FALSE,
cluster.only = TRUE) {
# scale
if (scale) {
x <- scale(x)
}
# TODO: what we SHOULD do is pass Euclidean/Man to the functions, as
# well as hclust vs pam...
if (method == "Euclidean") {
k_clusters <- k_euclidean(x, K.max, B, verbose, plot.num.clus)
}
if (method == "Manhattan") {
k_clusters <- k_manhattan(x, K.max, diss, B, cluster.only, verbose,
plot.num.clus)
}
if (method == "hclust_Euclidean") {
k_clusters <- khclust_euc(x, K.max, B, verbose, plot.num.clus)
}
if (method == "hclust_Manhattan") {
k_clusters <- khclust_man(x, K.max, B, verbose, plot.num.clus)
}
return(list(k_clusters))
}
#' Title Gap statisic with k-medoids euclidean
#'
#' @param x Data matrix
#' @param K.max the maximum number of clusters to consider, must be at least two. Default value is 10.
#' @param B integer, number of Monte Carlo (“bootstrap”) samples. Default value is 100.
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample. Default value is FALSE.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return the clusGap function' values
#' @export
#'
#' @examples
#' # k_euclidean(subx, K.max=8, B=50, verbose=FALSE, plot.num.clus=TRUE)
#'
k_euclidean <- function(x, K.max, B, verbose, plot.num.clus) {
library(cluster)
library(clusterCrit)
clusGap_best <- cluster::clusGap(x, FUN = pam, K.max = K.max, B, verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap Statistic for k-medoids Euclidean")
}
# # Silhouette Criteria for k-medoids sil <- c(rep(NA, 10)) sil[1] <- 0
# max_sil <- 0 clust_num_sil <- 0 for (i in 2:10) { clust <- pam(x, i,
# diss = FALSE) sil[i] <- intCriteria(x, clust$cluster, 'Silhouette')
# if (as.numeric(sil[i]) > max_sil) { max_sil_means <- sil[i]
# clust_num_sil <- i } } if (plot.num.clus) { plot(as.numeric(sil),
# type = 'l', main = 'Silhouette criteria k-medoids Euclidean') }
# return(list(clusGap_best, clust))
return(list(clusGap_best))
}
#' Title Gap statisic with k-medoids manhattan
#'
#' @param x data matrix
#' @param K.max positive integer specifying the number of clusters, less than the number of observations.
#' Default value is 10.
#' @param diss if TRUE (default as FALSE) x will be considered as a dissimilarity matrix
#' @param B integer, number of Monte Carlo (“bootstrap”) samples. Default value is 100.
#' @param cluster.only if true (default) only the clustering will be computed and returned, see details.
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample. Default as FALSE.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#' @param ... another objects of pam function
#'
#' @return clusGap function' output
#' @export
#'
#' @examples
#' # k_manhattan (subx, K.max = 8, diss=FALSE, B = 50, cluster.only = TRUE, verbose = FALSE)
#'
k_manhattan <- function(x, K.max, diss, B, cluster.only, verbose, plot.num.clus) {
library(cluster)
library(clusterCrit)
library(magrittr)
library(fpc)
pam_1 <- function(x, k, ...) {
clusters <- x %>% pam(k = k, diss = diss, metric = "manhattan",
cluster.only = cluster.only)
list(clusters = clusters)
}
set.seed(40)
clusGap_best <- clusGap(x, FUN = pam_1, K.max = K.max, B = B, verbose = verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap Statistic for k-medoids Manhattan")
}
# #Silhouette criteria with k-medoids manhattan
# sil_med_m<-c(rep(NA,10)) sil_med_m[1]<-0 max_sil_med_m<-0
# clust_num_sil_med_m<-0 for (i in 2:10) {
# clust_med_m<-pam(Scaled_Reduced_CM_trans,i,diss=FALSE,metric='manhattan')
# sil_med_m[i]<-intCriteria(Scaled_Reduced_CM_trans,clust_med_m$cluster,'Silhouette')
# if (as.numeric(sil_med_m[i]) > max_sil_med_m) {
# max_sil_med_m<-sil_med_m[i] clust_num_sil_med_m<-i } }
# plot(as.numeric(sil_med_m),type='l',main='Silhouette criteria,
# k-medoids manhattan')
return(list(clusGap_best))
}
#' Title Gap statistics for hclust Euclidean
#'
#' @param x data matrix
#' @param K.max positive integer specifying the number of clusters, less than the number of observations.
#' @param B integer, number of Monte Carlo (“bootstrap”) samples
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return the clusGap function output
#' @export
#'
#' @examples
#' # khclust_euc(subx,K.max=10, B=60, verbose = FALSE, plot.num.clus=TRUE )
#'
khclust_euc <- function(x, K.max, B, verbose, plot.num.clus) {
hclust_k_euc <- function(x, k, ...) {
library(magrittr)
library(cluster)
clusters <- x %>% dist %>% hclust %>% cutree(k = k)
list(clusters = clusters)
}
clusGap_best <- clusGap(x, FUN = hclust_k_euc, K.max = K.max, B = B,
verbose = verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap statistic, hclust Euclidean")
}
return(clusGap_best)
}
#' Title Gap statistics for hclust Manhattan
#'
#' @param x data matrix
#' @param K.max positive integer specifying the number of clusters, less than the number of observations.
#' Default value is 10
#' @param B integer, number of Monte Carlo (“bootstrap”) samples. Default value is 100.
#' @param verbose integer or logical, determining if “progress” output should be printed. The default prints
#' one bit per bootstrap sample. Default value is FALSE.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return the clusGap function output
#' @export
#'
#' @examples
#' # khclust_man(subx, K.max=8, B=60, verbose=FALSE, plot.num.clus=TRUE)
#'
khclust_man <- function(x, K.max, B, verbose, plot.num.clus) {
hclust_k_man <- function(x, k, ...) {
library(magrittr)
clusters <- x %>% dist(method = "manhattan") %>% hclust %>% cutree(k = k)
list(clusters = clusters)
}
clusGap_best <- clusGap(x, FUN = hclust_k_man, K.max = K.max, B = B,
verbose = verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap statistic, hclust Manhattan")
}
return(list(clusGap_best))
}
#####################
# Clustering the data #
#' Title Clustering
#'
#' @param x data matrix
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param method Indicating which method to use for clustering. Default is 'Euclidean'.
#' @param plot.clustering if TRUE (default) a 2-dimensional “clusplot” plot will be printed
#'
#' @return vector withnew assigned clusters
#' @export
#'
#' @examples
#' clustering(subx, k.gap = 5, method='Euclidean', plot.clustering=TRUE)
#'
clustering <- function(x, k.gap = 2, method = "Euclidean", plot.clustering = FALSE) {
if (method == "Euclidean") {
clusters <- cluster_euclidean(x, k.gap, plot.clustering)
}
if (method == "Manhattan") {
clusters <- cluster_manhattan(x, k.gap, plot.clustering)
}
if (method == "Heuclidean") {
clusters <- cluster_euclidean(x, k.gap, plot.clustering)
}
if (method == "Hmanhattan") {
clusters <- cluster_manhattan(x, k.gap, plot.clustering)
}
return(clusters)
}
### Euclidean ###
#' Title Clustering Using Euclidean distances
#'
#' @param x data matrix
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param plot.clustering if TRUE (default) a 2-dimensional “clusplot” plot will be printed
#'
#' @return
#' vector with the new assigned clusters
#'
#' @export
#'
#' @examples
#' # cluster_euclidean(subx, k.gap = 5, plot.clustering = TRUE)
#'
cluster_euclidean <- function(x, k.gap, plot.clustering) {
library(cluster)
pam_4 <- pam(x, k.gap, diss = FALSE)
if (plot.clustering) {
clusplot(x, pam_4$cluster, color = TRUE, main = c("k-medoids,",
paste = k.gap, "clusters"))
}
clusters <- pam_4$cluster
return(unlist(clusters))
}
### Manhattan ###
#' Title Clustering Using Manhattan Distances
#'
#' @param x data matrix
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param plot.clustering if TRUE (default) a 2-dimensional “clusplot” plot will be printed
#'
#' @return
#' vector with the new assigned clusters
#' @export
#'
#' @examples
#' # cluster_manhattan(subx, k.gap=4, plot.clustering=TRUE)
#'
cluster_manhattan <- function(x, k.gap, plot.clustering) {
pam_3_man <- pam(x, k.gap, diss = FALSE, metric = "manhattan")
if (plot.clustering) {
clusplot(x, pam_3_man$cluster, color = TRUE, main = c("k-medoids,manhattan",
paste(k.gap), "clusters"))
}
clusters <- pam_3_man$cluster
return(unlist(clusters))
}
### Hierarchical clustering euclidean ###
#' Title Deciding on number of clusters by using Hierarchical clustering euclidean
#'
#' @param x data matrix
#' @param y Dependent variable
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param plot.clustering if TRUE (default) a 2-dimensional “clusplot” plot will be printed
#'
#'
#' @return
#' summary table of the distribution to clusters
#' @export
#'
#' @examples
#' hclust_euc(subx, k.gap = 5, plot.clustering=TRUE)
#'
hclust_euc <- function(x, k.gap, plot.clustering) {
d <- dist(x, method = "euclidean")
fit_best <- hclust(d, method = "ward.D")
if (plot.clustering) {
plot(fit_best, main = c("hclust , euclidean,", paste(k.gap), " clusters"))
}
groups_best_4 <- cutree(fit_best, k = k.gap)
rect.hclust(fit_best, k = k.gap, border = "blue")
clusters <- groups_best_4
return(unlist(clusters))
}
### Hierarchical clustering manhattan ###
#' Title Deciding on number of clusters by Hierarchical clustering manhattan
#'
#' @param x data matrix
#' @param plot.clustering if TRUE (default) a 2-dimensional 'clusplot' plot will be printed
#'
#' @return
#' a list of two variables the hclust function description and a summary table
#' of the distribution to clusters
#' @export
#'
#' @examples
#' hclust_man(subx, k.gap = 5, plot.clustering=TRUE)
#'
hclust_man <- function(x, k.gap, plot.clustering) {
d_man <- dist(x, method = "manhattan")
fit_best_man <- hclust(d_man, method = "ward.D")
if (plot.clustering) {
plot(fit_best_man, main = c("hclust, manhattan,", paste(k.gap),
"7 clusters"))
}
groups_best_4_man <- cutree(fit_best_man, k = k.gap)
rect.hclust(fit_best_man, k = k.gap, border = "red")
clusters <- groups_best_4_man
return(unlist(clusters))
}
###############
# 3 C functions #
#' Title C2
#'
#' @param x data matrix
#' @param y Dependent variable
#' @param feature_selection_method method for the feature selection of the clinical measurements stage. Default RF.
#' @param num_clusters_method method for the choosing number of clusters by using the clinical measurements. Default Euclidean.
#' @param k number of clusters to use. If missing, we use a detection method. Defaukt as NULL
#' @param clustering_method method for clustering using the reduced clinical measures. Default is Hmanhattan,
#'
#' @return a list of three variables:
#' 1) vector with the names of the omportant variables chosen.
#' 2) number of classes that will be used for clustering
#' 3) vector of the new assigned clusterst
#'
#' @export
#'
#' @examples
#' resultC2 <- C2(x, y, feature_selection_method='RF', num_clusters_method='Manhattan', clustering_method='Manhattan', plot.num.clus=TRUE, plot.clustering=TRUE)
#' C2(x, y, feature_selection_method='BIC', num_clusters_method='Manhattan', clustering_method='Hmanhattan', plot.num.clus=TRUE, plot.clustering=FALSE, nbest=1, nvmax=8, B=50)
C2 <- function(x, y, feature_selection_method, num_clusters_method, k = NULL,
clustering_method, ...) {
# Feature selection
imp_var <- feature_selection(x, y, method = feature_selection_method)
# print(imp_var) CM_final_vars <- imp_var[[1]][2] # Extracting a list
# of inportant CM variables
subx <- x[, unlist(imp_var)]
# Deciding on number of clusters
if (missing(k)) {
num_clust <- number_of_clusters(x = subx, method = num_clusters_method)
print(num_clust)
# library(car)
user_choise <- function() {
k <- readline(prompt = paste("Enter the chosen number of clusters",
":\n"))
k <- as.numeric(k)
return(k)
}
num_clust <- user_choise()
} else {
num_clust <- k
}
# Final clustering
final_cluster <- clustering(subx, k.gap = num_clust)
# print(final_cluster)
return(list(imp_var, num_clust, final_cluster))
}
#' Title get_PBx_from_DATA_C3
#'
#' @param DATA Full data matrix, includes all observations for all the variables
#' @param META_DATA Need to have at least 2 columns, one with all variables name, another one which indicate
#' the type of each variable (CM, DX, PB)
#'
#' @return a list of important variables
#'
#' @export
#'
#' @examples
#' # PBx <- get_PBx_from_DATA_C3(DATA, META_DATA)
#'
get_PBx_from_DATA_C3 <- function(DATA, META_DATA) {
x <- DATA[, META_DATA$varName[META_DATA$varCategory == "PB"]]
return(PBx = x)
}
#' Title C3
#'
#' @param PBx data matrix
#' @param newy new assigned clusters, results from C2.
#' @param feature_selection_method method for the feature selection of the Potential Bio-Markers
#' @param classification_method method for classification using the potential bio-markers
#'
#' @return a list of two variables:
#' 1) vector with the names of important variables chosen
#' 2) classification result for each observation
#' @export
#'
#' @examples
#' C3(PBx, newy, feature_selection_method='RF', classification_method='RF')
#'
C3 <- function(PBx, newy, feature_selection_method, classification_method) {
# Feature selection if(!factor(newy)){ newy <- as.factor(newy) }
imp_var <- feature_selection(PBx, newy, method = feature_selection_method)
sub_PBx <- PBx[, imp_var]
# Classification
classification <- classification_fun(PBx, newy, method = classification_method)
return(list(imp_var, unname(classification)))
}
####################################### Potential biomarkers classification #
#' Title Classification for the potential Biomarkers
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#' @param method Classification method for the function to use
#'
#' @return Predicted values for each observation
#'
#' @export
#'
#' @examples
#' # classification_fun(PBx, newy, method='RF')
classification_fun <- function(PBx, newy, method = "RF") {
if (method == "RF") {
output <- RF_classify(PBx, newy)
}
if (method == "RF_downsampling") {
output <- RF_one_by_one(PBx, newy)
}
if (method == "CART_information") {
output <- cart_function(PBx, newy, criteria = "information")
}
if (method == "CART_gini") {
output <- cart_function(PBx, newy, criteria = "gini")
}
return(output)
}
### Random Forest Without Down Sampling ###
#' Title Classification Using Random Forest Without Down Sampling
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#'
#' @return The predicted values for each observation
#'
#' @export
#'
#' @examples
#' # RF_classify(PBx, newy)
library(randomForest)
RF_classify <- function(PBx, newy) {
if (!is.factor(newy)) {
warning("y is not a factor - but was coerced into one.")
newy <- as.factor(newy)
}
fulldata <- data.frame(PBx, newy)
rf_clus_PB <- randomForest(newy ~ ., data = fulldata, ntree = 50)
model <<- rf_clus_PB
return(rf_clus_PB$predicted)
}
### Random forest with down sampling ###
#' Title Classification Using Random Forest Without Down Sampling
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#'
#' @return a list of two variables: the hclust function description and a summary table
#' of the distribution to clusters
#' @export
#'
#' @examples
#' # RF_one_by_one(PBx, newy)
RF_one_by_one <- function(PBx, newy) {
if (!is.factor(newy)) {
warning("y is not a factor - but was coerced into one.")
newy <- as.numeric(as.factor(newy))
}
rflist_names <- paste("cluster", c(1:length(unique(newy))))
rflist <- sapply(rflist_names, function(x) NULL)
for (i in 1:length(unique(newy))) {
class_2 <- ifelse(newy == i, 1, 0)
nmin <- sum(class_2 == 1)
rflist[[i]] <- randomForest(factor(class_2) ~ ., data = PBx, ntree = 1000,
importance = TRUE, proximity = TRUE, sampsize = rep(nmin, 2))
}
return(rflist)
}
### CART ###
#' Title Classification Using CART
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#' @param criteria gini or information
#'
#' @return a list of two variables: the hclust function description and a summary table
#' of the distribution to clusters
#' @export
#'
#' @examples
#' # cart_function(PBx, newy, 'information')
cart_function <- function(PBx, newy, criteria = "gini") {
fulldata <- data.frame(PBx, newy)
cart <- rpart(newy ~ ., data = fulldata, method = "class", parms = list(split = criteria))
model <<- cart
pred <- predict(cart, type = "class")
return(pred)
}
|
\name{Qfn}
\alias{Qfn}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to compute the criteria values Q.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Qfn(X, S, N)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
%% ~~Describe \code{X} here~~
}
\item{S}{
%% ~~Describe \code{S} here~~
}
\item{N}{
%% ~~Describe \code{N} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (X, S, N)
{
beta1 <- -1 * (1/(sigma^2))
dmat <- e2dist(X, S)^2
lammat <- exp(beta0 + beta1 * dmat)
lamvec <- exp(beta0 + beta1 * dmat[1:length(dmat)])
lamJ <- as.vector(t(lammat) \%*\% rep(1, nrow(X)))
pbar <- as.vector(1 - exp(-t(lammat) \%*\% rep(1, nrow(X))))
pbar <- mean(pbar)
M1 <- rep(1, ntraps * nrow(S))
M2 <- dmat[1:length(dmat)]
I11 <- (1/nrow(S)) * sum(lamvec)
I12 <- (1/nrow(S)) * sum(lamvec * M2)
I21 <- (1/nrow(S)) * sum(lamvec * M2)
I22 <- (1/nrow(S)) * sum(lamvec * M2 * M2)
I <- matrix(c(I11, I12, I21, I22), nrow = 2, byrow = TRUE)
I <- N * pbar * I
V <- solve(I)
Q1 <- sum(diag(V))
sumsJ <- as.vector(t(lammat * lammat * (diag(V)[1] + (dmat^2) *
diag(V)[2] - 2 * dmat * V[1, 2])) \%*\% rep(1, nrow(X)))
var.pbar <- ((1/nrow(S))^2) * sum(exp(-lamJ) * exp(-lamJ) *
sumsJ)
part1 <- (N * N * var.pbar)
part2 <- N * (1 - pbar)/pbar
total <- part1 + part2
newpart2 <- N * (1 - pbar) * (var.pbar + 1)/pbar
old <- N * N * var.pbar + newpart2
fixed <- N * pbar * ((1 - pbar) + N * pbar) * (var.pbar/(pbar^4))
Q1 <- part1
Q2 <- newpart2
Q3 <- total
Q4 <- Q1
Q5 <- fixed
Q6 <- 1 - pbar
Q7 <- var.pbar
c(Q1, Q2, Q3, Q4, Q5, Q6, Q7)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/Qfn.Rd
|
no_license
|
jaroyle/scrDesign
|
R
| false
| false
| 2,654
|
rd
|
\name{Qfn}
\alias{Qfn}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to compute the criteria values Q.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Qfn(X, S, N)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
%% ~~Describe \code{X} here~~
}
\item{S}{
%% ~~Describe \code{S} here~~
}
\item{N}{
%% ~~Describe \code{N} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (X, S, N)
{
beta1 <- -1 * (1/(sigma^2))
dmat <- e2dist(X, S)^2
lammat <- exp(beta0 + beta1 * dmat)
lamvec <- exp(beta0 + beta1 * dmat[1:length(dmat)])
lamJ <- as.vector(t(lammat) \%*\% rep(1, nrow(X)))
pbar <- as.vector(1 - exp(-t(lammat) \%*\% rep(1, nrow(X))))
pbar <- mean(pbar)
M1 <- rep(1, ntraps * nrow(S))
M2 <- dmat[1:length(dmat)]
I11 <- (1/nrow(S)) * sum(lamvec)
I12 <- (1/nrow(S)) * sum(lamvec * M2)
I21 <- (1/nrow(S)) * sum(lamvec * M2)
I22 <- (1/nrow(S)) * sum(lamvec * M2 * M2)
I <- matrix(c(I11, I12, I21, I22), nrow = 2, byrow = TRUE)
I <- N * pbar * I
V <- solve(I)
Q1 <- sum(diag(V))
sumsJ <- as.vector(t(lammat * lammat * (diag(V)[1] + (dmat^2) *
diag(V)[2] - 2 * dmat * V[1, 2])) \%*\% rep(1, nrow(X)))
var.pbar <- ((1/nrow(S))^2) * sum(exp(-lamJ) * exp(-lamJ) *
sumsJ)
part1 <- (N * N * var.pbar)
part2 <- N * (1 - pbar)/pbar
total <- part1 + part2
newpart2 <- N * (1 - pbar) * (var.pbar + 1)/pbar
old <- N * N * var.pbar + newpart2
fixed <- N * pbar * ((1 - pbar) + N * pbar) * (var.pbar/(pbar^4))
Q1 <- part1
Q2 <- newpart2
Q3 <- total
Q4 <- Q1
Q5 <- fixed
Q6 <- 1 - pbar
Q7 <- var.pbar
c(Q1, Q2, Q3, Q4, Q5, Q6, Q7)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
"fourthcorner" <- function(tabR, tabL, tabQ, modeltype = 6,nrepet = 999, tr01 = FALSE, p.adjust.method.G = p.adjust.methods, p.adjust.method.D = p.adjust.methods, p.adjust.D = c("global","levels"), ...) {
## tabR ,tabL, tabQ are 3 data frames containing the data
## permut.model is the permutational model and can take 6 values (1:6) 6 corresponds to the combination of 2 and 4
## -------------------------------
## Test of the different arguments
## -------------------------------
if (!is.data.frame(tabR))
stop("data.frame expected")
if (!is.data.frame(tabL))
stop("data.frame expected")
if (!is.data.frame(tabQ))
stop("data.frame expected")
if (any(is.na(tabR)))
stop("na entries in table")
if (any(is.na(tabL)))
stop("na entries in table")
if (any(tabL<0))
stop("negative values in table L")
if (any(is.na(tabQ)))
stop("na entries in table")
p.adjust.D <- match.arg(p.adjust.D)
p.adjust.method.D <- match.arg(p.adjust.method.D)
p.adjust.method.G <- match.arg(p.adjust.method.G)
if (sum(modeltype==(1:6))!=1)
stop("modeltype should be 1, 2, 3, 4, 5 or 6")
if(modeltype == 6){
test1 <- fourthcorner(tabR, tabL, tabQ, modeltype = 2,nrepet = nrepet, tr01 = tr01, p.adjust.method.G = p.adjust.method.G, p.adjust.method.D = p.adjust.method.D, p.adjust.D = p.adjust.D, ...)
test2 <- fourthcorner(tabR, tabL, tabQ, modeltype = 4,nrepet = nrepet, tr01 = tr01, p.adjust.method.G = p.adjust.method.G, p.adjust.method.D = p.adjust.method.D, p.adjust.D = p.adjust.D, ...)
res <- combine.4thcorner(test1,test2)
res$call <- res$tabD2$call <- res$tabD$call <- res$tabG$call <- match.call()
return(res)
}
nrowL <- nrow(tabL)
ncolL <- ncol(tabL)
nrowR <- nrow(tabR)
nrowQ <- nrow(tabQ)
nvarQ <- ncol(tabQ)
nvarR <- ncol(tabR)
if (nrowR != nrowL)
stop("Non equal row numbers")
if (nrowQ != ncolL)
stop("Non equal row numbers")
## transform the data into presence-absence if trO1 = TRUE
if (tr01)
{
cat("Values in table L are 0-1 transformed\n")
tabL <- ifelse(tabL==0,0,1)
}
## ------------------------------------------
## Create the data matrices for R and Q
## Transform factors into disjunctive tables
## tabR becomes matR and tabQ becomes matQ
## ------------------------------------------
## For tabR
matR <- matrix(0, nrowR, 1)
provinames <- "tmp"
assignR <- NULL
k <- 0
indexR <- rep(0, nvarR)
for (j in 1:nvarR) {
## Get the type of data
## The type is store in the index vector (1 for numeric / 2 for factor)
if (is.numeric(tabR[, j])) {
indexR[j] <- 1
matR <- cbind(matR, tabR[, j])
provinames <- c(provinames, names(tabR)[j])
k <- k + 1
assignR <- c(assignR, k)
}
else if (is.factor(tabR[, j])) {
indexR[j] <- 2
if (is.ordered(tabR[, j]))
warning("ordered variables will be considered as factor")
w <- fac2disj(tabR[, j], drop = TRUE)
cha <- paste(substr(names(tabR)[j], 1, 5), ".", names(w), sep = "")
matR <- cbind(matR, w)
provinames <- c(provinames, cha)
k <- k + 1
assignR <- c(assignR, rep(k, length(cha)))
} else stop("'tabR' must contain only numeric values or factors (see the '", names(tabR)[j] ,"' variable in 'tabR').")
}
matR <- data.frame(matR[, -1])
names(matR) <- provinames[-1]
ncolR <- ncol(matR)
## ----------
## For tabQ
matQ <- matrix(0, nrowQ, 1)
provinames <- "tmp"
assignQ <- NULL
k <- 0
indexQ <- rep(0, nvarQ)
for (j in 1:nvarQ) {
## Get the type of data
## The type is stored in the index vector (1 for numeric / 2 for factor)
if (is.numeric(tabQ[, j])) {
indexQ[j] <- 1
matQ <- cbind(matQ, tabQ[, j])
provinames <- c(provinames, names(tabQ)[j])
k <- k + 1
assignQ <- c(assignQ, k)
}
else if (is.factor(tabQ[, j])) {
indexQ[j] <- 2
if (is.ordered(tabQ[, j]))
warning("ordered variables will be considered as factor")
w <- fac2disj(tabQ[, j], drop = TRUE)
cha <- paste(substr(names(tabQ)[j], 1, 5), ".", names(w), sep = "")
matQ <- cbind(matQ, w)
provinames <- c(provinames, cha)
k <- k + 1
assignQ <- c(assignQ, rep(k, length(cha)))
} else stop("'tabQ' must contain only numeric values or factors (see the '", names(tabQ)[j] ,"' variable in 'tabQ').")
}
matQ <- data.frame(matQ[, -1])
names(matQ) <- provinames[-1]
ncolQ <- ncol(matQ)
## ----------
##----- create objects to store results -------#
tabD <- matrix(0,nrepet + 1, ncolR * ncolQ)
tabD2 <- matrix(0,nrepet + 1, ncolR * ncolQ)
tabG <- matrix(0,nrepet + 1, nvarR * nvarQ)
res <- list()
##------------------
## Call the C code
##------------------
res <- .C("quatriemecoin",
as.double(t(matR)),
as.double(t(tabL)),
as.double(t(matQ)),
as.integer(ncolR),
as.integer(nvarR),
as.integer(nrowL),
as.integer(ncolL),
as.integer(ncolQ),
as.integer(nvarQ),
as.integer(nrepet),
modeltype = as.integer(modeltype),
tabD = as.double(tabD),
tabD2 = as.double(tabD2),
tabG = as.double(tabG),
as.integer(indexR),
as.integer(indexQ),
as.integer(assignR),
as.integer(assignQ),
PACKAGE="ade4")[c("tabD","tabD2","tabG")]
##-------------------------------------------------------------------#
## Outputs #
##-------------------------------------------------------------------#
res$varnames.R <- names(tabR)
res$colnames.R <- names(matR)
res$varnames.Q <- names(tabQ)
res$colnames.Q <- names(matQ)
res$indexQ <- indexQ
res$assignQ <- assignQ
res$assignR <- assignR
res$indexR <- indexR
## set invalid permutation to NA (in the case of levels of a factor with no observation)
res$tabD <- ifelse(res$tabD < (-998), NA, res$tabD)
res$tabG <- ifelse(res$tabG < (-998), NA, res$tabG)
## Reshape the tables
res$tabD <- matrix(res$tabD, nrepet + 1, ncolR * ncolQ, byrow=TRUE)
res$tabD2 <- matrix(res$tabD2, nrepet + 1, ncolR * ncolQ, byrow=TRUE)
res$tabG <- matrix(res$tabG, nrepet + 1, nvarR * nvarQ, byrow=TRUE)
## Create vectors to store type of statistics and alternative hypotheses
names.stat.D <- vector(mode="character")
names.stat.D2 <- vector(mode="character")
names.stat.G <- vector(mode="character")
alter.G <- vector(mode="character")
alter.D <- vector(mode="character")
alter.D2 <- vector(mode="character")
for (i in 1:nvarQ){
for (j in 1:nvarR){
## Type of statistics for G and alternative hypotheses
if ((res$indexR[j]==1)&(res$indexQ[i]==1)){
names.stat.G <- c(names.stat.G, "r")
alter.G <- c(alter.G, "two-sided")
}
if ((res$indexR[j]==1)&(res$indexQ[i]==2)){
names.stat.G <- c(names.stat.G, "F")
alter.G <- c(alter.G, "greater")
}
if ((res$indexR[j]==2)&(res$indexQ[i]==1)){
names.stat.G <- c(names.stat.G, "F")
alter.G <- c(alter.G, "greater")
}
if ((res$indexR[j]==2)&(res$indexQ[i]==2)){
names.stat.G <- c(names.stat.G, "Chi2")
alter.G <- c(alter.G, "greater")
}
}
}
for (i in 1:ncolQ){
for (j in 1:ncolR){
## Type of statistics for D and alternative hypotheses
idx.vars <- ncolR * (i-1) + j
if ((res$indexR[res$assignR[j]]==1)&(res$indexQ[res$assignQ[i]]==1)){
names.stat.D <- c(names.stat.D, "r")
names.stat.D2 <- c(names.stat.D2, "r")
alter.D <- c(alter.D, "two-sided")
alter.D2 <- c(alter.D2, "two-sided")
}
if ((res$indexR[res$assignR[j]]==1)&(res$indexQ[res$assignQ[i]]==2)){
names.stat.D <- c(names.stat.D, "Homog.")
names.stat.D2 <- c(names.stat.D2, "r")
alter.D <- c(alter.D, "less")
alter.D2 <- c(alter.D2, "two-sided")
}
if ((res$indexR[res$assignR[j]]==2)&(res$indexQ[res$assignQ[i]]==1)){
names.stat.D <- c(names.stat.D, "Homog.")
names.stat.D2 <- c(names.stat.D2, "r")
alter.D <- c(alter.D, "less")
alter.D2 <- c(alter.D2, "two-sided")
}
if ((res$indexR[res$assignR[j]]==2)&(res$indexQ[res$assignQ[i]]==2)){
names.stat.D <- c(names.stat.D, "N")
names.stat.D2 <- c(names.stat.D2, "N")
alter.D <- c(alter.D, "two-sided")
alter.D2 <- c(alter.D2, "two-sided")
}
}
}
provinames <- apply(expand.grid(res$colnames.R, res$colnames.Q), 1, paste, collapse=" / ")
res$tabD <- as.krandtest(obs = res$tabD[1, ], sim = res$tabD[-1, , drop = FALSE], names = provinames, alter = alter.D, call = match.call(), p.adjust.method = p.adjust.method.D, ...)
res$tabD2 <- as.krandtest(obs = res$tabD2[1, ], sim = res$tabD2[-1, , drop = FALSE], names = provinames, alter = alter.D2, call = match.call(), p.adjust.method = p.adjust.method.D, ...)
if(p.adjust.D == "levels"){
## adjustment only between levels of a factor (corresponds to the original paper of Legendre et al. 1997)
for (i in 1:nvarQ){
for (j in 1:nvarR){
idx.varR <- which(res$assignR == j)
idx.varQ <- which(res$assignQ == i)
idx.vars <- nvarR * (idx.varQ - 1) + idx.varR
res$tabD$adj.pvalue[idx.vars] <- p.adjust(res$tabD$pvalue[idx.vars], method = p.adjust.method.D)
res$tabD2$adj.pvalue[idx.vars] <- p.adjust(res$tabD2$pvalue[idx.vars], method = p.adjust.method.D)
}
}
res$tabD$adj.method <- res$tabD2$adj.method <- paste(p.adjust.method.D, "by levels")
}
provinames <- apply(expand.grid(res$varnames.R, res$varnames.Q), 1, paste, collapse=" / ")
res$tabG <- as.krandtest(obs = res$tabG[1, ], sim = res$tabG[-1, ,drop = FALSE], names = provinames, alter = alter.G, call = match.call(), p.adjust.method = p.adjust.method.G, ...)
res$tabD$statnames <- names.stat.D
res$tabD2$statnames <- names.stat.D2
res$tabG$statnames <- names.stat.G
res$call <- match.call()
res$model <- modeltype
res$npermut <- nrepet
class(res) <- "4thcorner"
return(res)
}
|
/R/fourthcorner.R
|
no_license
|
cran/ade4
|
R
| false
| false
| 10,483
|
r
|
"fourthcorner" <- function(tabR, tabL, tabQ, modeltype = 6,nrepet = 999, tr01 = FALSE, p.adjust.method.G = p.adjust.methods, p.adjust.method.D = p.adjust.methods, p.adjust.D = c("global","levels"), ...) {
## tabR ,tabL, tabQ are 3 data frames containing the data
## permut.model is the permutational model and can take 6 values (1:6) 6 corresponds to the combination of 2 and 4
## -------------------------------
## Test of the different arguments
## -------------------------------
if (!is.data.frame(tabR))
stop("data.frame expected")
if (!is.data.frame(tabL))
stop("data.frame expected")
if (!is.data.frame(tabQ))
stop("data.frame expected")
if (any(is.na(tabR)))
stop("na entries in table")
if (any(is.na(tabL)))
stop("na entries in table")
if (any(tabL<0))
stop("negative values in table L")
if (any(is.na(tabQ)))
stop("na entries in table")
p.adjust.D <- match.arg(p.adjust.D)
p.adjust.method.D <- match.arg(p.adjust.method.D)
p.adjust.method.G <- match.arg(p.adjust.method.G)
if (sum(modeltype==(1:6))!=1)
stop("modeltype should be 1, 2, 3, 4, 5 or 6")
if(modeltype == 6){
test1 <- fourthcorner(tabR, tabL, tabQ, modeltype = 2,nrepet = nrepet, tr01 = tr01, p.adjust.method.G = p.adjust.method.G, p.adjust.method.D = p.adjust.method.D, p.adjust.D = p.adjust.D, ...)
test2 <- fourthcorner(tabR, tabL, tabQ, modeltype = 4,nrepet = nrepet, tr01 = tr01, p.adjust.method.G = p.adjust.method.G, p.adjust.method.D = p.adjust.method.D, p.adjust.D = p.adjust.D, ...)
res <- combine.4thcorner(test1,test2)
res$call <- res$tabD2$call <- res$tabD$call <- res$tabG$call <- match.call()
return(res)
}
nrowL <- nrow(tabL)
ncolL <- ncol(tabL)
nrowR <- nrow(tabR)
nrowQ <- nrow(tabQ)
nvarQ <- ncol(tabQ)
nvarR <- ncol(tabR)
if (nrowR != nrowL)
stop("Non equal row numbers")
if (nrowQ != ncolL)
stop("Non equal row numbers")
## transform the data into presence-absence if trO1 = TRUE
if (tr01)
{
cat("Values in table L are 0-1 transformed\n")
tabL <- ifelse(tabL==0,0,1)
}
## ------------------------------------------
## Create the data matrices for R and Q
## Transform factors into disjunctive tables
## tabR becomes matR and tabQ becomes matQ
## ------------------------------------------
## For tabR
matR <- matrix(0, nrowR, 1)
provinames <- "tmp"
assignR <- NULL
k <- 0
indexR <- rep(0, nvarR)
for (j in 1:nvarR) {
## Get the type of data
## The type is store in the index vector (1 for numeric / 2 for factor)
if (is.numeric(tabR[, j])) {
indexR[j] <- 1
matR <- cbind(matR, tabR[, j])
provinames <- c(provinames, names(tabR)[j])
k <- k + 1
assignR <- c(assignR, k)
}
else if (is.factor(tabR[, j])) {
indexR[j] <- 2
if (is.ordered(tabR[, j]))
warning("ordered variables will be considered as factor")
w <- fac2disj(tabR[, j], drop = TRUE)
cha <- paste(substr(names(tabR)[j], 1, 5), ".", names(w), sep = "")
matR <- cbind(matR, w)
provinames <- c(provinames, cha)
k <- k + 1
assignR <- c(assignR, rep(k, length(cha)))
} else stop("'tabR' must contain only numeric values or factors (see the '", names(tabR)[j] ,"' variable in 'tabR').")
}
matR <- data.frame(matR[, -1])
names(matR) <- provinames[-1]
ncolR <- ncol(matR)
## ----------
## For tabQ
matQ <- matrix(0, nrowQ, 1)
provinames <- "tmp"
assignQ <- NULL
k <- 0
indexQ <- rep(0, nvarQ)
for (j in 1:nvarQ) {
## Get the type of data
## The type is stored in the index vector (1 for numeric / 2 for factor)
if (is.numeric(tabQ[, j])) {
indexQ[j] <- 1
matQ <- cbind(matQ, tabQ[, j])
provinames <- c(provinames, names(tabQ)[j])
k <- k + 1
assignQ <- c(assignQ, k)
}
else if (is.factor(tabQ[, j])) {
indexQ[j] <- 2
if (is.ordered(tabQ[, j]))
warning("ordered variables will be considered as factor")
w <- fac2disj(tabQ[, j], drop = TRUE)
cha <- paste(substr(names(tabQ)[j], 1, 5), ".", names(w), sep = "")
matQ <- cbind(matQ, w)
provinames <- c(provinames, cha)
k <- k + 1
assignQ <- c(assignQ, rep(k, length(cha)))
} else stop("'tabQ' must contain only numeric values or factors (see the '", names(tabQ)[j] ,"' variable in 'tabQ').")
}
matQ <- data.frame(matQ[, -1])
names(matQ) <- provinames[-1]
ncolQ <- ncol(matQ)
## ----------
##----- create objects to store results -------#
tabD <- matrix(0,nrepet + 1, ncolR * ncolQ)
tabD2 <- matrix(0,nrepet + 1, ncolR * ncolQ)
tabG <- matrix(0,nrepet + 1, nvarR * nvarQ)
res <- list()
##------------------
## Call the C code
##------------------
res <- .C("quatriemecoin",
as.double(t(matR)),
as.double(t(tabL)),
as.double(t(matQ)),
as.integer(ncolR),
as.integer(nvarR),
as.integer(nrowL),
as.integer(ncolL),
as.integer(ncolQ),
as.integer(nvarQ),
as.integer(nrepet),
modeltype = as.integer(modeltype),
tabD = as.double(tabD),
tabD2 = as.double(tabD2),
tabG = as.double(tabG),
as.integer(indexR),
as.integer(indexQ),
as.integer(assignR),
as.integer(assignQ),
PACKAGE="ade4")[c("tabD","tabD2","tabG")]
##-------------------------------------------------------------------#
## Outputs #
##-------------------------------------------------------------------#
res$varnames.R <- names(tabR)
res$colnames.R <- names(matR)
res$varnames.Q <- names(tabQ)
res$colnames.Q <- names(matQ)
res$indexQ <- indexQ
res$assignQ <- assignQ
res$assignR <- assignR
res$indexR <- indexR
## set invalid permutation to NA (in the case of levels of a factor with no observation)
res$tabD <- ifelse(res$tabD < (-998), NA, res$tabD)
res$tabG <- ifelse(res$tabG < (-998), NA, res$tabG)
## Reshape the tables
res$tabD <- matrix(res$tabD, nrepet + 1, ncolR * ncolQ, byrow=TRUE)
res$tabD2 <- matrix(res$tabD2, nrepet + 1, ncolR * ncolQ, byrow=TRUE)
res$tabG <- matrix(res$tabG, nrepet + 1, nvarR * nvarQ, byrow=TRUE)
## Create vectors to store type of statistics and alternative hypotheses
names.stat.D <- vector(mode="character")
names.stat.D2 <- vector(mode="character")
names.stat.G <- vector(mode="character")
alter.G <- vector(mode="character")
alter.D <- vector(mode="character")
alter.D2 <- vector(mode="character")
for (i in 1:nvarQ){
for (j in 1:nvarR){
## Type of statistics for G and alternative hypotheses
if ((res$indexR[j]==1)&(res$indexQ[i]==1)){
names.stat.G <- c(names.stat.G, "r")
alter.G <- c(alter.G, "two-sided")
}
if ((res$indexR[j]==1)&(res$indexQ[i]==2)){
names.stat.G <- c(names.stat.G, "F")
alter.G <- c(alter.G, "greater")
}
if ((res$indexR[j]==2)&(res$indexQ[i]==1)){
names.stat.G <- c(names.stat.G, "F")
alter.G <- c(alter.G, "greater")
}
if ((res$indexR[j]==2)&(res$indexQ[i]==2)){
names.stat.G <- c(names.stat.G, "Chi2")
alter.G <- c(alter.G, "greater")
}
}
}
for (i in 1:ncolQ){
for (j in 1:ncolR){
## Type of statistics for D and alternative hypotheses
idx.vars <- ncolR * (i-1) + j
if ((res$indexR[res$assignR[j]]==1)&(res$indexQ[res$assignQ[i]]==1)){
names.stat.D <- c(names.stat.D, "r")
names.stat.D2 <- c(names.stat.D2, "r")
alter.D <- c(alter.D, "two-sided")
alter.D2 <- c(alter.D2, "two-sided")
}
if ((res$indexR[res$assignR[j]]==1)&(res$indexQ[res$assignQ[i]]==2)){
names.stat.D <- c(names.stat.D, "Homog.")
names.stat.D2 <- c(names.stat.D2, "r")
alter.D <- c(alter.D, "less")
alter.D2 <- c(alter.D2, "two-sided")
}
if ((res$indexR[res$assignR[j]]==2)&(res$indexQ[res$assignQ[i]]==1)){
names.stat.D <- c(names.stat.D, "Homog.")
names.stat.D2 <- c(names.stat.D2, "r")
alter.D <- c(alter.D, "less")
alter.D2 <- c(alter.D2, "two-sided")
}
if ((res$indexR[res$assignR[j]]==2)&(res$indexQ[res$assignQ[i]]==2)){
names.stat.D <- c(names.stat.D, "N")
names.stat.D2 <- c(names.stat.D2, "N")
alter.D <- c(alter.D, "two-sided")
alter.D2 <- c(alter.D2, "two-sided")
}
}
}
provinames <- apply(expand.grid(res$colnames.R, res$colnames.Q), 1, paste, collapse=" / ")
res$tabD <- as.krandtest(obs = res$tabD[1, ], sim = res$tabD[-1, , drop = FALSE], names = provinames, alter = alter.D, call = match.call(), p.adjust.method = p.adjust.method.D, ...)
res$tabD2 <- as.krandtest(obs = res$tabD2[1, ], sim = res$tabD2[-1, , drop = FALSE], names = provinames, alter = alter.D2, call = match.call(), p.adjust.method = p.adjust.method.D, ...)
if(p.adjust.D == "levels"){
## adjustment only between levels of a factor (corresponds to the original paper of Legendre et al. 1997)
for (i in 1:nvarQ){
for (j in 1:nvarR){
idx.varR <- which(res$assignR == j)
idx.varQ <- which(res$assignQ == i)
idx.vars <- nvarR * (idx.varQ - 1) + idx.varR
res$tabD$adj.pvalue[idx.vars] <- p.adjust(res$tabD$pvalue[idx.vars], method = p.adjust.method.D)
res$tabD2$adj.pvalue[idx.vars] <- p.adjust(res$tabD2$pvalue[idx.vars], method = p.adjust.method.D)
}
}
res$tabD$adj.method <- res$tabD2$adj.method <- paste(p.adjust.method.D, "by levels")
}
provinames <- apply(expand.grid(res$varnames.R, res$varnames.Q), 1, paste, collapse=" / ")
res$tabG <- as.krandtest(obs = res$tabG[1, ], sim = res$tabG[-1, ,drop = FALSE], names = provinames, alter = alter.G, call = match.call(), p.adjust.method = p.adjust.method.G, ...)
res$tabD$statnames <- names.stat.D
res$tabD2$statnames <- names.stat.D2
res$tabG$statnames <- names.stat.G
res$call <- match.call()
res$model <- modeltype
res$npermut <- nrepet
class(res) <- "4thcorner"
return(res)
}
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/central_nervous_system/central_nervous_system_003.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/central_nervous_system/central_nervous_system_003.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 405
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/central_nervous_system/central_nervous_system_003.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## ----echo=FALSE----------------------------------------------------------
knitr::opts_chunk$set(fig.width=8, fig.height=8)
## ----eval = FALSE--------------------------------------------------------
# install.packages("FateID")
## ------------------------------------------------------------------------
library(FateID)
## ------------------------------------------------------------------------
data(intestine)
## ------------------------------------------------------------------------
x <- intestine$x
head(x[,1:5])
## ------------------------------------------------------------------------
y <- intestine$y
head(y)
## ------------------------------------------------------------------------
tar <- c(6,9,13)
## ------------------------------------------------------------------------
FMarker <- list(c("Defa20__chr8","Defa24__chr8"), "Clca3__chr3", "Alpi__chr1")
xf <- getPart(x,FMarker,fthr=NULL,n=5)
head(xf$part)
head(xf$tar)
tar <- xf$tar
y <- xf$part
## ------------------------------------------------------------------------
rc <- reclassify(x, y, tar, clthr=.75, nbfactor=5, use.dist=FALSE, seed=12345, nbtree=NULL, q=0.9)
y <- rc$part
## ------------------------------------------------------------------------
v <- intestine$v
rc <- reclassify(v, y, tar, clthr=.75, nbfactor=5, use.dist=FALSE, seed=12345, nbtree=NULL, q=0.9)
y <- rc$part
## ------------------------------------------------------------------------
x <- rc$xf
## ------------------------------------------------------------------------
x <- getFeat(v,y,tar,fpv=0.01)
## ------------------------------------------------------------------------
tar <- c(6,9,13)
x <- intestine$x
y <- intestine$y
fb <- fateBias(x, y, tar, z=NULL, minnr=5, minnrh=10, adapt=TRUE, confidence=0.75, nbfactor=5, use.dist=FALSE, seed=12345, nbtree=NULL)
## ------------------------------------------------------------------------
dr <- compdr(x, z=NULL, m=c("tsne","cmd","dm","lle","umap"), k=c(2,3), lle.n=30, dm.sigma="local", dm.distance="euclidean", tsne.perplexity=30, seed=12345)
## ------------------------------------------------------------------------
plotFateMap(y,dr,k=2,m="tsne")
## ----eval=FALSE----------------------------------------------------------
# plotFateMap(y,dr,k=3,m="tsne")
## ------------------------------------------------------------------------
plotFateMap(y,dr,k=2,m="tsne",fb=fb,g="t6")
## ------------------------------------------------------------------------
pr <- plotFateMap(y,dr,k=2,m="tsne",trthr=.33,fb=fb,prc=TRUE)
## ------------------------------------------------------------------------
v <- intestine$v
pr <-plotFateMap(y, dr, k=2, m="tsne", g=c("Defa20__chr8", "Defa24__chr8"), n="Defa", x=v)
## ------------------------------------------------------------------------
E <- plotFateMap(y,dr,k=2,m="tsne",g="E",fb=fb)
head(E)
## ------------------------------------------------------------------------
pr <- prcurve(y,fb,dr,k=2,m="tsne",trthr=0.4,start=3)
## ------------------------------------------------------------------------
n <- pr$trc[["t6"]]
## ------------------------------------------------------------------------
trc <- dptTraj(x,y,fb,trthr=.25,distance="euclidean",sigma=1000)
#n <- trc[["t6"]]
## ------------------------------------------------------------------------
v <- intestine$v
fs <- filterset(v,n=n,minexpr=2,minnumber=1)
## ------------------------------------------------------------------------
s1d <- getsom(fs,nb=1000,alpha=.5)
## ------------------------------------------------------------------------
ps <- procsom(s1d,corthr=.85,minsom=3)
## ------------------------------------------------------------------------
set.seed(111111)
fcol <- sample(rainbow(max(y)))
## ------------------------------------------------------------------------
plotheatmap(ps$nodes.z, xpart=y[n], xcol=fcol, ypart=unique(ps$nodes), xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## ------------------------------------------------------------------------
plotheatmap(ps$all.z, xpart=y[n], xcol=fcol, ypart=ps$nodes, xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## ------------------------------------------------------------------------
plotheatmap(ps$all.e, xpart=y[n], xcol=fcol, ypart=ps$nodes, xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## ------------------------------------------------------------------------
plotheatmap(ps$all.b, xpart=y[n], xcol=fcol, ypart=ps$nodes, xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## ------------------------------------------------------------------------
g <- names(ps$nodes)[ps$nodes == 1]
## ------------------------------------------------------------------------
plotexpression(fs, y, g, n, col=fcol, name="Node 1", cluster=FALSE, alpha=.5, types=NULL)
## ------------------------------------------------------------------------
plotexpression(fs, y, "Clca4__chr3", n, col=fcol, cluster=FALSE, alpha=.5, types=NULL)
## ------------------------------------------------------------------------
plotexpression(fs, y, g, n, col=fcol, name="Node 1", cluster=FALSE, types=sub("\\_\\d+","",n))
## ------------------------------------------------------------------------
group <- head(g,6)
plotexpressionProfile(fs, y, g, n, name="Node 1", cluster=FALSE)
## ------------------------------------------------------------------------
thr <- .5
a <- "t13"
b <- "t6"
cl <- c(3,4,5)
A <- rownames(fb$probs)[fb$probs[,a] > thr]
A <- A[y[A] %in% cl]
B <- rownames(fb$probs)[fb$probs[,b] > thr]
B <- B[y[B] %in% cl]
de <- diffexpnb(v,A=A,B=B,DESeq=FALSE,norm=FALSE,vfit=NULL,locreg=FALSE)
## ------------------------------------------------------------------------
plotdiffgenesnb(de,mthr=-4,lthr=0,Aname=a,Bname=b,padj=FALSE)
## ------------------------------------------------------------------------
gene2gene(intestine$v,intestine$y,"Muc2__chr7","Apoa1__chr9")
## ------------------------------------------------------------------------
gene2gene(intestine$v, intestine$y, "Muc2__chr7", "Apoa1__chr9", fb=fb, tn="t6", plotnum=FALSE)
## ------------------------------------------------------------------------
k <- impGenes(fb,"t6")
|
/inst/doc/FateID.R
|
no_license
|
lyc-1995/FateID
|
R
| false
| false
| 6,118
|
r
|
## ----echo=FALSE----------------------------------------------------------
knitr::opts_chunk$set(fig.width=8, fig.height=8)
## ----eval = FALSE--------------------------------------------------------
# install.packages("FateID")
## ------------------------------------------------------------------------
library(FateID)
## ------------------------------------------------------------------------
data(intestine)
## ------------------------------------------------------------------------
x <- intestine$x
head(x[,1:5])
## ------------------------------------------------------------------------
y <- intestine$y
head(y)
## ------------------------------------------------------------------------
tar <- c(6,9,13)
## ------------------------------------------------------------------------
FMarker <- list(c("Defa20__chr8","Defa24__chr8"), "Clca3__chr3", "Alpi__chr1")
xf <- getPart(x,FMarker,fthr=NULL,n=5)
head(xf$part)
head(xf$tar)
tar <- xf$tar
y <- xf$part
## ------------------------------------------------------------------------
rc <- reclassify(x, y, tar, clthr=.75, nbfactor=5, use.dist=FALSE, seed=12345, nbtree=NULL, q=0.9)
y <- rc$part
## ------------------------------------------------------------------------
v <- intestine$v
rc <- reclassify(v, y, tar, clthr=.75, nbfactor=5, use.dist=FALSE, seed=12345, nbtree=NULL, q=0.9)
y <- rc$part
## ------------------------------------------------------------------------
x <- rc$xf
## ------------------------------------------------------------------------
x <- getFeat(v,y,tar,fpv=0.01)
## ------------------------------------------------------------------------
tar <- c(6,9,13)
x <- intestine$x
y <- intestine$y
fb <- fateBias(x, y, tar, z=NULL, minnr=5, minnrh=10, adapt=TRUE, confidence=0.75, nbfactor=5, use.dist=FALSE, seed=12345, nbtree=NULL)
## ------------------------------------------------------------------------
dr <- compdr(x, z=NULL, m=c("tsne","cmd","dm","lle","umap"), k=c(2,3), lle.n=30, dm.sigma="local", dm.distance="euclidean", tsne.perplexity=30, seed=12345)
## ------------------------------------------------------------------------
plotFateMap(y,dr,k=2,m="tsne")
## ----eval=FALSE----------------------------------------------------------
# plotFateMap(y,dr,k=3,m="tsne")
## ------------------------------------------------------------------------
plotFateMap(y,dr,k=2,m="tsne",fb=fb,g="t6")
## ------------------------------------------------------------------------
pr <- plotFateMap(y,dr,k=2,m="tsne",trthr=.33,fb=fb,prc=TRUE)
## ------------------------------------------------------------------------
v <- intestine$v
pr <-plotFateMap(y, dr, k=2, m="tsne", g=c("Defa20__chr8", "Defa24__chr8"), n="Defa", x=v)
## ------------------------------------------------------------------------
E <- plotFateMap(y,dr,k=2,m="tsne",g="E",fb=fb)
head(E)
## ------------------------------------------------------------------------
pr <- prcurve(y,fb,dr,k=2,m="tsne",trthr=0.4,start=3)
## ------------------------------------------------------------------------
n <- pr$trc[["t6"]]
## ------------------------------------------------------------------------
trc <- dptTraj(x,y,fb,trthr=.25,distance="euclidean",sigma=1000)
#n <- trc[["t6"]]
## ------------------------------------------------------------------------
v <- intestine$v
fs <- filterset(v,n=n,minexpr=2,minnumber=1)
## ------------------------------------------------------------------------
s1d <- getsom(fs,nb=1000,alpha=.5)
## ------------------------------------------------------------------------
ps <- procsom(s1d,corthr=.85,minsom=3)
## ------------------------------------------------------------------------
set.seed(111111)
fcol <- sample(rainbow(max(y)))
## ------------------------------------------------------------------------
plotheatmap(ps$nodes.z, xpart=y[n], xcol=fcol, ypart=unique(ps$nodes), xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## ------------------------------------------------------------------------
plotheatmap(ps$all.z, xpart=y[n], xcol=fcol, ypart=ps$nodes, xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## ------------------------------------------------------------------------
plotheatmap(ps$all.e, xpart=y[n], xcol=fcol, ypart=ps$nodes, xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## ------------------------------------------------------------------------
plotheatmap(ps$all.b, xpart=y[n], xcol=fcol, ypart=ps$nodes, xgrid=FALSE, ygrid=TRUE, xlab=FALSE)
## ------------------------------------------------------------------------
g <- names(ps$nodes)[ps$nodes == 1]
## ------------------------------------------------------------------------
plotexpression(fs, y, g, n, col=fcol, name="Node 1", cluster=FALSE, alpha=.5, types=NULL)
## ------------------------------------------------------------------------
plotexpression(fs, y, "Clca4__chr3", n, col=fcol, cluster=FALSE, alpha=.5, types=NULL)
## ------------------------------------------------------------------------
plotexpression(fs, y, g, n, col=fcol, name="Node 1", cluster=FALSE, types=sub("\\_\\d+","",n))
## ------------------------------------------------------------------------
group <- head(g,6)
plotexpressionProfile(fs, y, g, n, name="Node 1", cluster=FALSE)
## ------------------------------------------------------------------------
thr <- .5
a <- "t13"
b <- "t6"
cl <- c(3,4,5)
A <- rownames(fb$probs)[fb$probs[,a] > thr]
A <- A[y[A] %in% cl]
B <- rownames(fb$probs)[fb$probs[,b] > thr]
B <- B[y[B] %in% cl]
de <- diffexpnb(v,A=A,B=B,DESeq=FALSE,norm=FALSE,vfit=NULL,locreg=FALSE)
## ------------------------------------------------------------------------
plotdiffgenesnb(de,mthr=-4,lthr=0,Aname=a,Bname=b,padj=FALSE)
## ------------------------------------------------------------------------
gene2gene(intestine$v,intestine$y,"Muc2__chr7","Apoa1__chr9")
## ------------------------------------------------------------------------
gene2gene(intestine$v, intestine$y, "Muc2__chr7", "Apoa1__chr9", fb=fb, tn="t6", plotnum=FALSE)
## ------------------------------------------------------------------------
k <- impGenes(fb,"t6")
|
\name{showShinyApp}
\alias{showShinyApp}
\title{
Display a 'shiny' application
}
\description{
Displays one of the built-in interactive 'shiny' applications in the browser. See Details for the apps available.
}
\usage{
showShinyApp(topic)
}
\arguments{
\item{topic}{
The name of the shiny app to display.
}
}
\details{
Two apps are currently included in the \pkg{wiqid} package:
\emph{"Beta"} displays a beta distribution and sliders which allow you to change the parameters. You can also input binomial data and obtain the conjugate beta posterior distribution.
\emph{"Gamma"} displays a gamma distribution with variable parameters, and can produce the conjugate gamma posterior for Poisson-distributed count data.
}
\value{
Nothing useful. The function is run for its side effect.
}
\author{
A much simplified version of code by Jason Bryer on Github at https://github.com/jbryer/IS606, adapted by Mike Meredith.
}
\examples{
showShinyApp() # Shows a list of available apps
\donttest{
showShinyApp("Beta")
}
}
|
/man/showShinyApp.Rd
|
no_license
|
BlueMaple/wiqid
|
R
| false
| false
| 1,022
|
rd
|
\name{showShinyApp}
\alias{showShinyApp}
\title{
Display a 'shiny' application
}
\description{
Displays one of the built-in interactive 'shiny' applications in the browser. See Details for the apps available.
}
\usage{
showShinyApp(topic)
}
\arguments{
\item{topic}{
The name of the shiny app to display.
}
}
\details{
Two apps are currently included in the \pkg{wiqid} package:
\emph{"Beta"} displays a beta distribution and sliders which allow you to change the parameters. You can also input binomial data and obtain the conjugate beta posterior distribution.
\emph{"Gamma"} displays a gamma distribution with variable parameters, and can produce the conjugate gamma posterior for Poisson-distributed count data.
}
\value{
Nothing useful. The function is run for its side effect.
}
\author{
A much simplified version of code by Jason Bryer on Github at https://github.com/jbryer/IS606, adapted by Mike Meredith.
}
\examples{
showShinyApp() # Shows a list of available apps
\donttest{
showShinyApp("Beta")
}
}
|
# Assignment: ASSIGNMENT 4
# Name: Gunasekaran, Ragunath
# Date: 2020-09-21
## Load the ggplot2 package
library(ggplot2)
theme_set(theme_minimal())
## Set the working directory to the root of your DSC 520 directory
setwd("C:/Users/ragun/Documents/GitHub/dsc520-master/DSC520-new")
## Load the `data/r4ds/heights.csv` to
heights_df <- read.csv("data/r4ds/heights.csv")
# https://ggplot2.tidyverse.org/reference/geom_boxplot.html
## Create boxplots of sex vs. earn and race vs. earn using `geom_point()` and `geom_boxplot()`
## sex vs. earn
ggplot(heights_df, aes(x=sex, y=earn)) + geom_point()+ geom_boxplot()
## race vs. earn
ggplot(heights_df, aes(x=race, y=earn)) + geom_point()+ geom_boxplot()
# https://ggplot2.tidyverse.org/reference/geom_bar.html
## Using `geom_bar()` plot a bar chart of the number of records for each `sex`
ggplot(heights_df, aes(sex)) + geom_bar()
## Using `geom_bar()` plot a bar chart of the number of records for each race
ggplot(heights_df, aes(race)) + geom_bar()
## Create a horizontal bar chart by adding `coord_flip()` to the previous plot
ggplot(heights_df, aes(race)) + geom_bar()+ coord_flip()
# https://www.rdocumentation.org/packages/ggplot2/versions/3.3.0/topics/geom_path
## Load the file `"data/nytimes/covid-19-data/us-states.csv"` and
## assign it to the `covid_df` dataframe
covid_df <- read.csv("data/nytimes/covid-19-data/us-states.csv")
## Parse the date column using `as.Date()``
covid_df$date <- as.Date(covid_df$date)
## Create three dataframes named `california_df`, `ny_df`, and `florida_df`
## containing the data from California, New York, and Florida
california_df <- covid_df[ which( covid_df$state == "California"), ]
ny_df <- covid_df[ which( covid_df$state == "New York"), ]
florida_df <- covid_df[ which( covid_df$state == "Florida"), ]
## Plot the number of cases in Florida using `geom_line()`
ggplot(data=florida_df, aes(x=date, y=cases, group=1)) + geom_line()
## Add lines for New York and California to the plot
ggplot(data=florida_df, aes(x=date, group=1)) +
geom_line(aes(y = cases)) +
geom_line(data=ny_df, aes(y = cases)) +
geom_line(data=california_df, aes(y = cases))
## Use the colors "darkred", "darkgreen", and "steelblue" for Florida, New York, and California
ggplot(data=florida_df, aes(x=date, group=1)) +
geom_line(aes(y = cases), color = "darkred") +
geom_line(data=ny_df, aes(y = cases), color="darkgreen") +
geom_line(data=california_df, aes(y = cases), color="steelblue")
## Add a legend to the plot using `scale_colour_manual`
## Add a blank (" ") label to the x-axis and the label "Cases" to the y axis
ggplot(data=florida_df, aes(x=date, group=1)) +
geom_line(aes(y = cases, colour = "Florida")) +
geom_line(data=ny_df, aes(y = cases,colour="New York")) +
geom_line(data=california_df, aes(y = cases, colour="California")) +
scale_colour_manual("",
breaks = c("Florida", "New York", "California"),
values = c("darkred","darkgreen", "steelblue" )) +
xlab(" ") + ylab("Cases")
## Scale the y axis using `scale_y_log10()`
ggplot(data=florida_df, aes(x=date, group=1)) +
geom_line(aes(y = cases, colour = "Florida")) +
geom_line(data=ny_df, aes(y = cases,colour="New York")) +
geom_line(data=california_df, aes(y = cases, colour="California")) +
scale_colour_manual("",
breaks = c("Florida", "New York", "California"),
values = c("darkred","darkgreen", "steelblue" )) +
xlab(" ") + ylab("Cases") + scale_y_log10()
|
/assignments/assignment04/assignment_04_RagunathGunasekaran.R
|
permissive
|
RGunasekaran21249030/DSC520-new
|
R
| false
| false
| 3,532
|
r
|
# Assignment: ASSIGNMENT 4
# Name: Gunasekaran, Ragunath
# Date: 2020-09-21
## Load the ggplot2 package
library(ggplot2)
theme_set(theme_minimal())
## Set the working directory to the root of your DSC 520 directory
setwd("C:/Users/ragun/Documents/GitHub/dsc520-master/DSC520-new")
## Load the `data/r4ds/heights.csv` to
heights_df <- read.csv("data/r4ds/heights.csv")
# https://ggplot2.tidyverse.org/reference/geom_boxplot.html
## Create boxplots of sex vs. earn and race vs. earn using `geom_point()` and `geom_boxplot()`
## sex vs. earn
ggplot(heights_df, aes(x=sex, y=earn)) + geom_point()+ geom_boxplot()
## race vs. earn
ggplot(heights_df, aes(x=race, y=earn)) + geom_point()+ geom_boxplot()
# https://ggplot2.tidyverse.org/reference/geom_bar.html
## Using `geom_bar()` plot a bar chart of the number of records for each `sex`
ggplot(heights_df, aes(sex)) + geom_bar()
## Using `geom_bar()` plot a bar chart of the number of records for each race
ggplot(heights_df, aes(race)) + geom_bar()
## Create a horizontal bar chart by adding `coord_flip()` to the previous plot
ggplot(heights_df, aes(race)) + geom_bar()+ coord_flip()
# https://www.rdocumentation.org/packages/ggplot2/versions/3.3.0/topics/geom_path
## Load the file `"data/nytimes/covid-19-data/us-states.csv"` and
## assign it to the `covid_df` dataframe
covid_df <- read.csv("data/nytimes/covid-19-data/us-states.csv")
## Parse the date column using `as.Date()``
covid_df$date <- as.Date(covid_df$date)
## Create three dataframes named `california_df`, `ny_df`, and `florida_df`
## containing the data from California, New York, and Florida
california_df <- covid_df[ which( covid_df$state == "California"), ]
ny_df <- covid_df[ which( covid_df$state == "New York"), ]
florida_df <- covid_df[ which( covid_df$state == "Florida"), ]
## Plot the number of cases in Florida using `geom_line()`
ggplot(data=florida_df, aes(x=date, y=cases, group=1)) + geom_line()
## Add lines for New York and California to the plot
ggplot(data=florida_df, aes(x=date, group=1)) +
geom_line(aes(y = cases)) +
geom_line(data=ny_df, aes(y = cases)) +
geom_line(data=california_df, aes(y = cases))
## Use the colors "darkred", "darkgreen", and "steelblue" for Florida, New York, and California
ggplot(data=florida_df, aes(x=date, group=1)) +
geom_line(aes(y = cases), color = "darkred") +
geom_line(data=ny_df, aes(y = cases), color="darkgreen") +
geom_line(data=california_df, aes(y = cases), color="steelblue")
## Add a legend to the plot using `scale_colour_manual`
## Add a blank (" ") label to the x-axis and the label "Cases" to the y axis
ggplot(data=florida_df, aes(x=date, group=1)) +
geom_line(aes(y = cases, colour = "Florida")) +
geom_line(data=ny_df, aes(y = cases,colour="New York")) +
geom_line(data=california_df, aes(y = cases, colour="California")) +
scale_colour_manual("",
breaks = c("Florida", "New York", "California"),
values = c("darkred","darkgreen", "steelblue" )) +
xlab(" ") + ylab("Cases")
## Scale the y axis using `scale_y_log10()`
ggplot(data=florida_df, aes(x=date, group=1)) +
geom_line(aes(y = cases, colour = "Florida")) +
geom_line(data=ny_df, aes(y = cases,colour="New York")) +
geom_line(data=california_df, aes(y = cases, colour="California")) +
scale_colour_manual("",
breaks = c("Florida", "New York", "California"),
values = c("darkred","darkgreen", "steelblue" )) +
xlab(" ") + ylab("Cases") + scale_y_log10()
|
#
# Template for hold-out-subjects cross-validation. You need to change 6 things here.
#
# 1) SPECIFIFY PACKAGES TO USE DURING LEARNING HERE
# this is needed because we need to pass them to each parallel cluster separately
packages=c('pROC', 'caret')
library('foreach')
library('doParallel')
library('parallel')
source('../functions.r')
for (pkg in packages) {
library(pkg, character.only=T)
}
# On the server the global package directory is not writable
# you might want to specify your local one here
.libPaths('/home/kuzovkin/R/x86_64-unknown-linux-gnu-library/3.0')
# 2) SPECIFY THE DATA FOLDER (WITH THE dataset.rds FILE PRODUCED BY ONE OF Code/preprocessing/extract_*.r SCRIPTS)
datafolder <- 'eye8ch1300ms80pca'
dataset <- readRDS(paste('../../Data/', datafolder, '/dataset.rds', sep=''))
# 3) SPECIFY THE METHOD YOU USE (NEEDED JUST FOR RECORD)
mlmethod <- 'gbm'
# 4) ENLIST PARAMETERS HERE
parameters <- list()
parameters[['n.trees']] <- c(500)
parameters[['shrinkage']] <- c(0.05)
parameters[['interaction.depth']] <- c(1)
# 5) THIS FUNCITON SHOULD RETURN classifier OBJECT
# @param p: current set of parameters
# @param trainingset: set to train model on
buildmodel <- function(p, trainingset) {
gbmGrid <- expand.grid(interaction.depth=p$interaction.depth, n.trees=p$n.trees, shrinkage=p$shrinkage)
trcontrol <- trainControl(method='none', classProbs=T)
classifier <- train(class ~., data=trainingset, 'gbm', trControl=trcontrol, tuneGrid = gbmGrid)
return(classifier)
}
# 6) THIS FUNCITON SHOULD RETURN VECTOR OF PREDICTED PROBABILITIES
# @param classifier: classifier to use to predict
# @param validset: set to validate results on
makeprediction <- function(classifier, validset) {
predicted <- predict(classifier, newdata=validset, type='prob')$positive
return(predicted)
}
# ------- In happy circumstances you should not look below this line ------- #
# measure time
timestart <- Sys.time()
# configure parallel foreach execution
ncores <- floor(detectCores() * 0.5) # take 1/3 of available processors
cl <- makeCluster(ncores)
registerDoParallel(cl)
# initalize parameter search grid
results <- buildgrid(parameters)
# read in current parameter set
p <- results[1, ]
# loop over cross-validation (training, validation) pairs
scores <- foreach(cv = 1:length(dataset$cvpairs), .combine='rbind', .packages=packages) %dopar% {
# take cv pair
cvpair <- dataset$cvpairs[[cv]]
# train a model
classifier <- buildmodel(p, cvpair$train)
# make a prediciton on a validation and training sets
predicted.prob.out <- makeprediction(classifier, cvpair$valid)
predicted.prob.in <- makeprediction(classifier, cvpair$train)
# identify current subject rows in the training set
subjectlist <- read.table('../../Data/train_subject_list.csv', sep=',', header=F)
subjects <- sort(as.numeric(unique(subjectlist)$V1))
cvsubject <- subjects[cv]
train.idx <- which(subjectlist != cvsubject)
valid.idx <- which(subjectlist == cvsubject)
# load meta predictions on the training set
predicted.meta <- read.table('../../Data/train_meta_predictions.csv', sep=',', header=T)
predicted.meta <- predicted.meta$Prediction
# combine brain and meta predictions
predicted.meta.out <- predicted.meta[valid.idx]
predicted.meta.in <- predicted.meta[train.idx]
predicted.prob.out <- (predicted.prob.out + predicted.meta.out) / 2
predicted.prob.in <- (predicted.prob.in + predicted.meta.in) / 2
# add record to results table
if (is.na(predicted.prob.out[1])) {
cat('WARNING: Was not able to predict probabilities. Deal with it.')
score.out <- -1
score.in <- -1
} else {
score.out <- as.numeric(roc(cvpair$valid$class, predicted.prob.out)$auc)
score.in <- as.numeric(roc(cvpair$train$class, predicted.prob.in)$auc)
}
data.frame('in-score'=score.in, 'out-score'=score.out)
}
# stop parallel processing cluster
stopCluster(cl)
# Tell how long the whole process took
print(Sys.time() - timestart)
# show results
print(scores)
print(colMeans(scores))
# build final classifier
"""
classifier <- buildmodel(p, dataset$train)
"""
# predict on training dataset and store the file
"""
predicted <- makeprediction(classifier, dataset$train)
result <- data.frame(read.table('../../Data/TrainLabels.csv', sep = ',', header = T))
result$Prediction = predicted
write.table(result, paste('../../Data/train_', datafolder, '_', mlmethod, '.csv', sep=''), sep = ',', quote = F, row.names = F, col.names = T)
"""
# predict on test dataset and store the file
"""
predicted <- makeprediction(classifier, dataset$test)
result <- data.frame(read.table('../../Results/SampleSubmission.csv', sep = ',', header = T))
result$Prediction = predicted
write.table(result, paste('../../Results/subX_', datafolder, '_', mlmethod, '.csv', sep=''), sep=',', quote=F, row.names=F, col.names=T)
"""
|
/Code/sandbox/run_one_gbm_meta.r
|
no_license
|
KnightofDawn/Kaggle-BCI-Challenge
|
R
| false
| false
| 4,986
|
r
|
#
# Template for hold-out-subjects cross-validation. You need to change 6 things here.
#
# 1) SPECIFIFY PACKAGES TO USE DURING LEARNING HERE
# this is needed because we need to pass them to each parallel cluster separately
packages=c('pROC', 'caret')
library('foreach')
library('doParallel')
library('parallel')
source('../functions.r')
for (pkg in packages) {
library(pkg, character.only=T)
}
# On the server the global package directory is not writable
# you might want to specify your local one here
.libPaths('/home/kuzovkin/R/x86_64-unknown-linux-gnu-library/3.0')
# 2) SPECIFY THE DATA FOLDER (WITH THE dataset.rds FILE PRODUCED BY ONE OF Code/preprocessing/extract_*.r SCRIPTS)
datafolder <- 'eye8ch1300ms80pca'
dataset <- readRDS(paste('../../Data/', datafolder, '/dataset.rds', sep=''))
# 3) SPECIFY THE METHOD YOU USE (NEEDED JUST FOR RECORD)
mlmethod <- 'gbm'
# 4) ENLIST PARAMETERS HERE
parameters <- list()
parameters[['n.trees']] <- c(500)
parameters[['shrinkage']] <- c(0.05)
parameters[['interaction.depth']] <- c(1)
# 5) THIS FUNCITON SHOULD RETURN classifier OBJECT
# @param p: current set of parameters
# @param trainingset: set to train model on
buildmodel <- function(p, trainingset) {
gbmGrid <- expand.grid(interaction.depth=p$interaction.depth, n.trees=p$n.trees, shrinkage=p$shrinkage)
trcontrol <- trainControl(method='none', classProbs=T)
classifier <- train(class ~., data=trainingset, 'gbm', trControl=trcontrol, tuneGrid = gbmGrid)
return(classifier)
}
# 6) THIS FUNCITON SHOULD RETURN VECTOR OF PREDICTED PROBABILITIES
# @param classifier: classifier to use to predict
# @param validset: set to validate results on
makeprediction <- function(classifier, validset) {
predicted <- predict(classifier, newdata=validset, type='prob')$positive
return(predicted)
}
# ------- In happy circumstances you should not look below this line ------- #
# measure time
timestart <- Sys.time()
# configure parallel foreach execution
ncores <- floor(detectCores() * 0.5) # take 1/3 of available processors
cl <- makeCluster(ncores)
registerDoParallel(cl)
# initalize parameter search grid
results <- buildgrid(parameters)
# read in current parameter set
p <- results[1, ]
# loop over cross-validation (training, validation) pairs
scores <- foreach(cv = 1:length(dataset$cvpairs), .combine='rbind', .packages=packages) %dopar% {
# take cv pair
cvpair <- dataset$cvpairs[[cv]]
# train a model
classifier <- buildmodel(p, cvpair$train)
# make a prediciton on a validation and training sets
predicted.prob.out <- makeprediction(classifier, cvpair$valid)
predicted.prob.in <- makeprediction(classifier, cvpair$train)
# identify current subject rows in the training set
subjectlist <- read.table('../../Data/train_subject_list.csv', sep=',', header=F)
subjects <- sort(as.numeric(unique(subjectlist)$V1))
cvsubject <- subjects[cv]
train.idx <- which(subjectlist != cvsubject)
valid.idx <- which(subjectlist == cvsubject)
# load meta predictions on the training set
predicted.meta <- read.table('../../Data/train_meta_predictions.csv', sep=',', header=T)
predicted.meta <- predicted.meta$Prediction
# combine brain and meta predictions
predicted.meta.out <- predicted.meta[valid.idx]
predicted.meta.in <- predicted.meta[train.idx]
predicted.prob.out <- (predicted.prob.out + predicted.meta.out) / 2
predicted.prob.in <- (predicted.prob.in + predicted.meta.in) / 2
# add record to results table
if (is.na(predicted.prob.out[1])) {
cat('WARNING: Was not able to predict probabilities. Deal with it.')
score.out <- -1
score.in <- -1
} else {
score.out <- as.numeric(roc(cvpair$valid$class, predicted.prob.out)$auc)
score.in <- as.numeric(roc(cvpair$train$class, predicted.prob.in)$auc)
}
data.frame('in-score'=score.in, 'out-score'=score.out)
}
# stop parallel processing cluster
stopCluster(cl)
# Tell how long the whole process took
print(Sys.time() - timestart)
# show results
print(scores)
print(colMeans(scores))
# build final classifier
"""
classifier <- buildmodel(p, dataset$train)
"""
# predict on training dataset and store the file
"""
predicted <- makeprediction(classifier, dataset$train)
result <- data.frame(read.table('../../Data/TrainLabels.csv', sep = ',', header = T))
result$Prediction = predicted
write.table(result, paste('../../Data/train_', datafolder, '_', mlmethod, '.csv', sep=''), sep = ',', quote = F, row.names = F, col.names = T)
"""
# predict on test dataset and store the file
"""
predicted <- makeprediction(classifier, dataset$test)
result <- data.frame(read.table('../../Results/SampleSubmission.csv', sep = ',', header = T))
result$Prediction = predicted
write.table(result, paste('../../Results/subX_', datafolder, '_', mlmethod, '.csv', sep=''), sep=',', quote=F, row.names=F, col.names=T)
"""
|
#Example 18, section 3.1, page 191
#Verify that det(AB) = det(A)*det(B) ,if A and B are two matrices.
A<-matrix(c(1,3,2,4),c(2,2))
print(A)
B<-matrix(c(2,1,-1,2),c(2,2))
print(B)
mulAB<- A%*%B
print(mulAB)
det_AB= det(mulAB)
print(det_AB)
det_A= det(A)
det_B= det(B)
det_product = det_A * det_B
print(det_product)
x<-all.equal.numeric(det_AB,det_product)
if(x==TRUE)
print("Property is true")
|
/ashiq/R codes/chapter 3/example18_sec3_1.R
|
no_license
|
sahridhaya/BitPlease
|
R
| false
| false
| 400
|
r
|
#Example 18, section 3.1, page 191
#Verify that det(AB) = det(A)*det(B) ,if A and B are two matrices.
A<-matrix(c(1,3,2,4),c(2,2))
print(A)
B<-matrix(c(2,1,-1,2),c(2,2))
print(B)
mulAB<- A%*%B
print(mulAB)
det_AB= det(mulAB)
print(det_AB)
det_A= det(A)
det_B= det(B)
det_product = det_A * det_B
print(det_product)
x<-all.equal.numeric(det_AB,det_product)
if(x==TRUE)
print("Property is true")
|
fig08x011<-function(){
#
require(lattice)
#
haireye<-matrix(data=c(7,10,16,94,26,14,14,17,119,54,29,84,68,15,5,20),
nrow=4,ncol=4,byrow=TRUE,dimnames=list(c("Blond","Red","Brunette","Black"),
c("Brown","Hazel","Green","Blue")))
#
trellis.device(color=FALSE)
graphics.off()
windows(width=4.5,height=3.5,pointsize=12)
par(fin=c(4.45,3.45),pin=c(4.45,3.45),
mai=c(0.85,0.85,0.25,0.25))
#
trellis.par.set("color",FALSE)
#
figure <- dotplot(haireye,xlab="Frequency",
ylab="Hair Color",col.line="gray",lty="dotted",
lwd=1,as.table=FALSE,auto.key=TRUE)
#
print(figure)
#
dev.copy2eps(file="fig08x011.eps")
dev.copy2pdf(file="fig08x011.pdf")
}
|
/graphicsforstatistics_2e_figures_scripts_r/Chapter 8/fig08x011.R
|
no_license
|
saqibarfeen/coding_time
|
R
| false
| false
| 661
|
r
|
fig08x011<-function(){
#
require(lattice)
#
haireye<-matrix(data=c(7,10,16,94,26,14,14,17,119,54,29,84,68,15,5,20),
nrow=4,ncol=4,byrow=TRUE,dimnames=list(c("Blond","Red","Brunette","Black"),
c("Brown","Hazel","Green","Blue")))
#
trellis.device(color=FALSE)
graphics.off()
windows(width=4.5,height=3.5,pointsize=12)
par(fin=c(4.45,3.45),pin=c(4.45,3.45),
mai=c(0.85,0.85,0.25,0.25))
#
trellis.par.set("color",FALSE)
#
figure <- dotplot(haireye,xlab="Frequency",
ylab="Hair Color",col.line="gray",lty="dotted",
lwd=1,as.table=FALSE,auto.key=TRUE)
#
print(figure)
#
dev.copy2eps(file="fig08x011.eps")
dev.copy2pdf(file="fig08x011.pdf")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGetters.R
\name{RSS,MCR-method}
\alias{RSS,MCR-method}
\title{MCR accessor RSS,}
\usage{
\S4method{RSS}{MCR}(object)
}
\arguments{
\item{object}{object of type MCR}
}
\value{
RSS from object
}
\description{
MCR accessor RSS,
}
|
/man/RSS-MCR-method.Rd
|
no_license
|
lorenzgerber/tofsims
|
R
| false
| true
| 308
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGetters.R
\name{RSS,MCR-method}
\alias{RSS,MCR-method}
\title{MCR accessor RSS,}
\usage{
\S4method{RSS}{MCR}(object)
}
\arguments{
\item{object}{object of type MCR}
}
\value{
RSS from object
}
\description{
MCR accessor RSS,
}
|
#' Calculate the fetch length around a point
#'
#' Given a point, a shoreline layer and a vector of wind directions (bearings),
#' \code{fetch_len} calculates the distance from point to shore for each bearing.
#'
#' The fetch length (or fetch) is the distance of open water over which the wind
#' can blow in a specific direction. Note that bearings represent the direction
#' from where the wind originates.
#'
#' The optional \code{spread} argument defines relative directions that are
#' added to each main bearing to produce a set of sub-bearings. The fetch lengths
#' calculated for each sub-bearing are averaged with weights proportional to
#' \code{cos(spread)}. By default, \code{spread = 0} and fetch length is
#' calculated for the main bearings only.
#'
#' If \code{projected} is FALSE (the default), the input data must be in WGS84
#' geographic (longitude, latitude) coordinates. Geodesic distances are calculated
#' using the \code{\link[geosphere]{distGeo}} function from the geosphere R
#' package. All distance are expressed in meters.
#'
#' If \code{projected} is TRUE, the input data (\code{p} and \code{shoreline})
#' must share the same projection. Projected distances are calculated with the
#' rgeos R package. All distances are expressed in the projection's coordinates.
#'
#' If the shoreline layer is given as SpatialPolygons*, the function verifies
#' that the input point is outside all polygons (i.e. in water). If this is
#' not the case, it issues a warning and returns a vector of \code{NA}.
#'
#' @param p SpatialPoints* object of length 1 (single point).
#' @param bearings Vector of bearings, in degrees.
#' @param shoreline SpatialLines* or SpatialPolygons* object representing the
#' shoreline.
#' @param dmax Maximum value of fetch length, returned if there is no land
#' within a distance of \code{dmax} from a given bearing.
#' @param spread Vector of relative bearings (in degrees) for which
#' to calculate fetch around each main bearing (see details).
#' @param projected Should projected coordinates be used to calculate fetch?
#' @param check_inputs Should the validity of inputs be checked? It is
#' recommended to keep this TRUE, unless this function is called repeatedly from
#' another function that already checks inputs.
#' @return A named vector representing the fetch length for each direction
#' given in \code{bearings}.
#' @examples
#' pt <- SpatialPoints(matrix(c(0, 0), ncol = 2),
#' proj4string = CRS("+proj=longlat"))
#' # Shoreline is a rectangle from (-0.2, 0.25) to (0.3, 0.5)
#' rect <- Polygon(cbind(c(rep(-0.2, 2), rep(0.3, 2), -0.2),
#' c(0.25, rep(0.3, 2), rep(0.25, 2))))
#' land <- SpatialPolygons(list(Polygons(list(rect), ID = 1)),
#' proj4string = CRS("+proj=longlat"))
#' fetch_len(pt, bearings = c(0, 45, 225, 315), land,
#' dmax = 50000, spread = c(-10, 0, 10))
#' @seealso \code{\link{fetch_len_multi}} for an efficient alternative when
#' computing fetch length for multiple points.
#' @export
fetch_len <- function(p, bearings, shoreline, dmax,
spread = 0, projected = FALSE, check_inputs = TRUE) {
if (check_inputs) {
if (!is(p, "SpatialPoints")) stop("p must be a SpatialPoints* object.")
p <- as(p, "SpatialPoints") # remove DataFrame part if there is one
if(length(p) != 1) stop("p must be a single point.")
if (!(is(shoreline, "SpatialLines") || is(shoreline, "SpatialPolygons"))) {
stop("shoreline must be a SpatialLines* or SpatialPolygons* object.")
}
if (projected) {
if (!is.projected(p) || !is.projected(shoreline)) {
stop("cannot use long/lat coordinates if projected = TRUE.")
}
if (proj4string(p) != proj4string(shoreline)) {
stop("projections of p and shoreline do not match.")
}
} else if (is.projected(p) || is.projected(shoreline)) {
stop(paste("p and shoreline must have unprojected (long/lat)",
"coordinates if projected = FALSE."))
}
if (!is.vector(bearings, "numeric")) stop("bearings must be a numeric vector.")
if (!is.vector(spread, "numeric")) stop("spread must be a numeric vector.")
if (!is.vector(dmax, "numeric") || length(dmax) != 1 || dmax <= 0) {
stop("dmax must be a single number greater than 0.")
}
}
# If shoreline is a polygons (land) layer, check that point is not on land
if (is(shoreline, "SpatialPolygons")) {
in_water <- is.null(rgeos::gIntersects(p, shoreline, byid = TRUE,
returnDense = FALSE)[[1]])
if(!in_water) {
warning("point on land, returning NA")
return(setNames(rep(NA, length(bearings)), bearings))
}
}
# Clip shoreline layer to a rectangle around point
# to guarantee at least dmax on each side
clip_rect <- get_clip_rect(p, dmax, projected)
shore_clip <- tryCatch(
rgeos::gIntersection(shoreline, clip_rect, byid = TRUE),
# If it fails, try byid = FALSE
error = function(e) tryCatch(
rgeos::gIntersection(shoreline, clip_rect, byid = FALSE),
error = function(e) {
warning("Error clipping shoreline, returning NA")
return(setNames(rep(NA, length(bearings)), bearings))
}
)
)
# Convert any polygons to lines to get line-line intersections later
shore_clip <- convert_to_lines(shore_clip)
# If no land within rectangle, return dmax for all bearings
if (is.null(shore_clip)) {
return(setNames(rep(dmax, length(bearings)), bearings))
}
# Calculate fetch
if (all(spread == 0)) {
# if no sub-bearings, just return distance to shore for each bearing
fetch_res <- vapply(bearings,
function(b) dist_shore(p, shore_clip, b, dmax, projected), 0)
} else {
# calculate the distance to shore for each sub-bearing
bear_mat <- outer(bearings, spread, "+")
dists <- vapply(bear_mat,
function(b) dist_shore(p, shore_clip, b, dmax, projected), 0)
dim(dists) <- dim(bear_mat)
# return weighted means of the sub-bearing fetch values
# with weights proportional to the cosine (relative to their main bearing)
weights <- cospi(spread / 180)
weights <- weights / sum(weights)
fetch_res <- as.vector(dists %*% weights)
}
names(fetch_res) <- as.character(bearings)
fetch_res
}
#' Calculate the fetch length for multiple points
#'
#' \code{fetch_len_multi} provides two methods to efficiently compute fetch length
#' for multiple points.
#'
#' With \code{method = "btree"}, the \code{\link[rgeos]{gBinarySTRtreeQuery}}
#' function from the rgeos package is called to determine which polygons in
#' \code{shoreline} could be within \code{dmax} of each point. This is a fast
#' calculation based on bounding box overlap.
#'
#' With \code{method = "clip"}, the \code{shoreline} layer is clipped to a polygon
#' formed by the union of rectangular buffers around each point.
#'
#' In both cases, \code{\link{fetch_len}} is then applied to each point,
#' using only the necessary portion of the shoreline.
#'
#' Generally, the "clip" method will produce the biggest time savings when
#' points are clustered within distances less than \code{dmax} (so their
#' clipping rectangles overlap), whereas the "btree" method will be more
#' efficient when the shoreline is composed of multiple polygons and points are
#' distant from each other.
#'
#' @param pts A SpatialPoints* object.
#' @param bearings Vector of bearings, in degrees.
#' @param shoreline SpatialLines* or SpatialPolygons* object representing the
#' shoreline.
#' @param dmax Maximum value of fetch length, returned if there is no land
#' within a distance of \code{dmax} from a given bearing.
#' @param spread Vector of relative bearings (in degrees) for which
#' to calculate fetch around each main bearing.
#' @param method Whether to use the "btree" (default) or "clip" method.
#' See below for more details.
#' @param projected Should projected coordinates be used to calculate fetch?
#' @return A matrix of fetch lengths, with one row by point in \code{pts} and
#' one column by bearing in \code{bearings}.
#' @seealso \code{\link{fetch_len}} for details on the fetch length computation.
#' @export
fetch_len_multi <- function(pts, bearings, shoreline, dmax,
spread = 0, method = "btree", projected = FALSE) {
# Check inputs
match.arg(method, choices = c("btree", "clip"))
if (!is(pts, "SpatialPoints")) stop("pts must be a SpatialPoints* object.")
pts <- as(pts, "SpatialPoints") # remove DataFrame part if there is one
if (is(shoreline, "SpatialLines")) {
shoreline <- as(shoreline, "SpatialLines")
} else if (is(shoreline, "SpatialPolygons")) {
shoreline <- as(shoreline, "SpatialPolygons")
} else {
stop("shoreline must be a SpatialLines* or SpatialPolygons* object.")
}
if (projected) {
if (!is.projected(pts) || !is.projected(shoreline)) {
stop("cannot use long/lat coordinates if projected = TRUE.")
}
if (proj4string(pts) != proj4string(shoreline)) {
stop("projections of pts and shoreline do not match.")
}
} else if (is.projected(pts) || is.projected(shoreline)) {
stop(paste("pts and shoreline must have unprojected (long/lat)",
"coordinates if projected = FALSE."))
}
if (!is.vector(bearings, "numeric")) stop("bearings must be a numeric vector.")
if (!is.vector(spread, "numeric")) stop("spread must be a numeric vector.")
if (!is.vector(dmax, "numeric") || length(dmax) != 1 || dmax <= 0) {
stop("dmax must be a single number greater than 0.")
}
# Create rectangular buffers around each point
rect_list <- lapply(1:length(pts),
function(i) get_clip_rect(pts[i], dmax, projected))
rect_buf <- do.call(rbind, c(rect_list, makeUniqueIDs = TRUE))
if (method == "btree") {
# Generate list of shoreline polygon IDs with bounding box overlap for each rectangle
btree <- rgeos::gBinarySTRtreeQuery(shoreline, rect_buf)
# Calculate fetch for point at index i using btree
fetch_i <- function(i) {
if (is.null(btree[[i]])) {
setNames(rep(dmax, length(bearings)), bearings)
} else {
fetch_len(pts[i], bearings, shoreline[btree[[i]]], dmax,
spread, projected, check_inputs = FALSE)
}
}
# Calculate fetch for all points and return a (points x bearings) matrix
fetch_res <- t(vapply(1:length(pts), fetch_i, rep(0, length(bearings))))
} else { # method == "clip"
# Clip shoreline to a merged buffer around all points
rect_buf <- rgeos::gUnaryUnion(rect_buf)
sub_shore <- rgeos::gIntersection(shoreline, rect_buf, byid = TRUE)
fetch_res <- t(
vapply(1:length(pts),
function(i) fetch_len(pts[i], bearings, sub_shore, dmax,
spread, projected, check_inputs = FALSE),
rep(0, length(bearings)))
)
}
fetch_res
}
#### Helper functions below are not exported by the package ####
# Returns the distance from point p to shoreline
# following bearing bear and up to distance dmax
# all distances in projection units, or meters if projected = FALSE
dist_shore <- function(p, shoreline, bear, dmax, projected) {
if (projected) {
# Draw line of length dmax with given start point and bearing
bline <- bearing_line(p, bear, dmax)
# Return (minimum) distance from p1 to intersection of geo_line and shoreline
# If no intersection, fetch is dmax
land_int <- rgeos::gIntersection(bline, shoreline)
if (is.null(land_int)) {
dmax
} else {
rgeos::gDistance(p, land_int)
}
} else {
# Draw geodesic line of length dmax with given start point and bearing
# Line drawn with a point every dmax/500
geo_line <- geosphere::gcIntermediate(p, geosphere::destPoint(p, bear, dmax),
n = 500, sp = TRUE, breakAtDateLine = TRUE, addStartEnd = TRUE)
geo_line <- spTransform(geo_line, CRS(proj4string(shoreline)))
# Return (minimum) distance from p to intersection of geo_line and shoreline
# If no intersection, fetch is dmax
land_int <- rgeos::gIntersection(geo_line, shoreline)
if (is.null(land_int)) {
dmax
} else {
dist_min(p, land_int)
}
}
}
# Returns the minimum distance between the focal point p and inters, the result
# of an intersection between SpatialLines which could include points and lines
dist_min <- function(p, inters) {
if (class(inters) == "SpatialPoints") {
min(geosphere::distGeo(p, inters))
} else if (class(inters) == "SpatialLines") {
min(geosphere::distGeo(p, lines_to_endpts(inters)))
} else if (class(inters) == "SpatialCollections") {
coord_mat <- rbind(coordinates(inters@pointobj),
coordinates(lines_to_endpts(inters@lineobj)))
min(geosphere::distGeo(p, coord_mat))
} else {
warning(paste("Point at", c(p[1], p[2]),
"cannot calculate distance to shore, returning NA."))
NA
}
}
|
/R/fetch_len.R
|
no_license
|
pmarchand1/waver
|
R
| false
| false
| 13,711
|
r
|
#' Calculate the fetch length around a point
#'
#' Given a point, a shoreline layer and a vector of wind directions (bearings),
#' \code{fetch_len} calculates the distance from point to shore for each bearing.
#'
#' The fetch length (or fetch) is the distance of open water over which the wind
#' can blow in a specific direction. Note that bearings represent the direction
#' from where the wind originates.
#'
#' The optional \code{spread} argument defines relative directions that are
#' added to each main bearing to produce a set of sub-bearings. The fetch lengths
#' calculated for each sub-bearing are averaged with weights proportional to
#' \code{cos(spread)}. By default, \code{spread = 0} and fetch length is
#' calculated for the main bearings only.
#'
#' If \code{projected} is FALSE (the default), the input data must be in WGS84
#' geographic (longitude, latitude) coordinates. Geodesic distances are calculated
#' using the \code{\link[geosphere]{distGeo}} function from the geosphere R
#' package. All distance are expressed in meters.
#'
#' If \code{projected} is TRUE, the input data (\code{p} and \code{shoreline})
#' must share the same projection. Projected distances are calculated with the
#' rgeos R package. All distances are expressed in the projection's coordinates.
#'
#' If the shoreline layer is given as SpatialPolygons*, the function verifies
#' that the input point is outside all polygons (i.e. in water). If this is
#' not the case, it issues a warning and returns a vector of \code{NA}.
#'
#' @param p SpatialPoints* object of length 1 (single point).
#' @param bearings Vector of bearings, in degrees.
#' @param shoreline SpatialLines* or SpatialPolygons* object representing the
#' shoreline.
#' @param dmax Maximum value of fetch length, returned if there is no land
#' within a distance of \code{dmax} from a given bearing.
#' @param spread Vector of relative bearings (in degrees) for which
#' to calculate fetch around each main bearing (see details).
#' @param projected Should projected coordinates be used to calculate fetch?
#' @param check_inputs Should the validity of inputs be checked? It is
#' recommended to keep this TRUE, unless this function is called repeatedly from
#' another function that already checks inputs.
#' @return A named vector representing the fetch length for each direction
#' given in \code{bearings}.
#' @examples
#' pt <- SpatialPoints(matrix(c(0, 0), ncol = 2),
#' proj4string = CRS("+proj=longlat"))
#' # Shoreline is a rectangle from (-0.2, 0.25) to (0.3, 0.5)
#' rect <- Polygon(cbind(c(rep(-0.2, 2), rep(0.3, 2), -0.2),
#' c(0.25, rep(0.3, 2), rep(0.25, 2))))
#' land <- SpatialPolygons(list(Polygons(list(rect), ID = 1)),
#' proj4string = CRS("+proj=longlat"))
#' fetch_len(pt, bearings = c(0, 45, 225, 315), land,
#' dmax = 50000, spread = c(-10, 0, 10))
#' @seealso \code{\link{fetch_len_multi}} for an efficient alternative when
#' computing fetch length for multiple points.
#' @export
fetch_len <- function(p, bearings, shoreline, dmax,
spread = 0, projected = FALSE, check_inputs = TRUE) {
if (check_inputs) {
if (!is(p, "SpatialPoints")) stop("p must be a SpatialPoints* object.")
p <- as(p, "SpatialPoints") # remove DataFrame part if there is one
if(length(p) != 1) stop("p must be a single point.")
if (!(is(shoreline, "SpatialLines") || is(shoreline, "SpatialPolygons"))) {
stop("shoreline must be a SpatialLines* or SpatialPolygons* object.")
}
if (projected) {
if (!is.projected(p) || !is.projected(shoreline)) {
stop("cannot use long/lat coordinates if projected = TRUE.")
}
if (proj4string(p) != proj4string(shoreline)) {
stop("projections of p and shoreline do not match.")
}
} else if (is.projected(p) || is.projected(shoreline)) {
stop(paste("p and shoreline must have unprojected (long/lat)",
"coordinates if projected = FALSE."))
}
if (!is.vector(bearings, "numeric")) stop("bearings must be a numeric vector.")
if (!is.vector(spread, "numeric")) stop("spread must be a numeric vector.")
if (!is.vector(dmax, "numeric") || length(dmax) != 1 || dmax <= 0) {
stop("dmax must be a single number greater than 0.")
}
}
# If shoreline is a polygons (land) layer, check that point is not on land
if (is(shoreline, "SpatialPolygons")) {
in_water <- is.null(rgeos::gIntersects(p, shoreline, byid = TRUE,
returnDense = FALSE)[[1]])
if(!in_water) {
warning("point on land, returning NA")
return(setNames(rep(NA, length(bearings)), bearings))
}
}
# Clip shoreline layer to a rectangle around point
# to guarantee at least dmax on each side
clip_rect <- get_clip_rect(p, dmax, projected)
shore_clip <- tryCatch(
rgeos::gIntersection(shoreline, clip_rect, byid = TRUE),
# If it fails, try byid = FALSE
error = function(e) tryCatch(
rgeos::gIntersection(shoreline, clip_rect, byid = FALSE),
error = function(e) {
warning("Error clipping shoreline, returning NA")
return(setNames(rep(NA, length(bearings)), bearings))
}
)
)
# Convert any polygons to lines to get line-line intersections later
shore_clip <- convert_to_lines(shore_clip)
# If no land within rectangle, return dmax for all bearings
if (is.null(shore_clip)) {
return(setNames(rep(dmax, length(bearings)), bearings))
}
# Calculate fetch
if (all(spread == 0)) {
# if no sub-bearings, just return distance to shore for each bearing
fetch_res <- vapply(bearings,
function(b) dist_shore(p, shore_clip, b, dmax, projected), 0)
} else {
# calculate the distance to shore for each sub-bearing
bear_mat <- outer(bearings, spread, "+")
dists <- vapply(bear_mat,
function(b) dist_shore(p, shore_clip, b, dmax, projected), 0)
dim(dists) <- dim(bear_mat)
# return weighted means of the sub-bearing fetch values
# with weights proportional to the cosine (relative to their main bearing)
weights <- cospi(spread / 180)
weights <- weights / sum(weights)
fetch_res <- as.vector(dists %*% weights)
}
names(fetch_res) <- as.character(bearings)
fetch_res
}
#' Calculate the fetch length for multiple points
#'
#' \code{fetch_len_multi} provides two methods to efficiently compute fetch length
#' for multiple points.
#'
#' With \code{method = "btree"}, the \code{\link[rgeos]{gBinarySTRtreeQuery}}
#' function from the rgeos package is called to determine which polygons in
#' \code{shoreline} could be within \code{dmax} of each point. This is a fast
#' calculation based on bounding box overlap.
#'
#' With \code{method = "clip"}, the \code{shoreline} layer is clipped to a polygon
#' formed by the union of rectangular buffers around each point.
#'
#' In both cases, \code{\link{fetch_len}} is then applied to each point,
#' using only the necessary portion of the shoreline.
#'
#' Generally, the "clip" method will produce the biggest time savings when
#' points are clustered within distances less than \code{dmax} (so their
#' clipping rectangles overlap), whereas the "btree" method will be more
#' efficient when the shoreline is composed of multiple polygons and points are
#' distant from each other.
#'
#' @param pts A SpatialPoints* object.
#' @param bearings Vector of bearings, in degrees.
#' @param shoreline SpatialLines* or SpatialPolygons* object representing the
#' shoreline.
#' @param dmax Maximum value of fetch length, returned if there is no land
#' within a distance of \code{dmax} from a given bearing.
#' @param spread Vector of relative bearings (in degrees) for which
#' to calculate fetch around each main bearing.
#' @param method Whether to use the "btree" (default) or "clip" method.
#' See below for more details.
#' @param projected Should projected coordinates be used to calculate fetch?
#' @return A matrix of fetch lengths, with one row by point in \code{pts} and
#' one column by bearing in \code{bearings}.
#' @seealso \code{\link{fetch_len}} for details on the fetch length computation.
#' @export
fetch_len_multi <- function(pts, bearings, shoreline, dmax,
spread = 0, method = "btree", projected = FALSE) {
# Check inputs
match.arg(method, choices = c("btree", "clip"))
if (!is(pts, "SpatialPoints")) stop("pts must be a SpatialPoints* object.")
pts <- as(pts, "SpatialPoints") # remove DataFrame part if there is one
if (is(shoreline, "SpatialLines")) {
shoreline <- as(shoreline, "SpatialLines")
} else if (is(shoreline, "SpatialPolygons")) {
shoreline <- as(shoreline, "SpatialPolygons")
} else {
stop("shoreline must be a SpatialLines* or SpatialPolygons* object.")
}
if (projected) {
if (!is.projected(pts) || !is.projected(shoreline)) {
stop("cannot use long/lat coordinates if projected = TRUE.")
}
if (proj4string(pts) != proj4string(shoreline)) {
stop("projections of pts and shoreline do not match.")
}
} else if (is.projected(pts) || is.projected(shoreline)) {
stop(paste("pts and shoreline must have unprojected (long/lat)",
"coordinates if projected = FALSE."))
}
if (!is.vector(bearings, "numeric")) stop("bearings must be a numeric vector.")
if (!is.vector(spread, "numeric")) stop("spread must be a numeric vector.")
if (!is.vector(dmax, "numeric") || length(dmax) != 1 || dmax <= 0) {
stop("dmax must be a single number greater than 0.")
}
# Create rectangular buffers around each point
rect_list <- lapply(1:length(pts),
function(i) get_clip_rect(pts[i], dmax, projected))
rect_buf <- do.call(rbind, c(rect_list, makeUniqueIDs = TRUE))
if (method == "btree") {
# Generate list of shoreline polygon IDs with bounding box overlap for each rectangle
btree <- rgeos::gBinarySTRtreeQuery(shoreline, rect_buf)
# Calculate fetch for point at index i using btree
fetch_i <- function(i) {
if (is.null(btree[[i]])) {
setNames(rep(dmax, length(bearings)), bearings)
} else {
fetch_len(pts[i], bearings, shoreline[btree[[i]]], dmax,
spread, projected, check_inputs = FALSE)
}
}
# Calculate fetch for all points and return a (points x bearings) matrix
fetch_res <- t(vapply(1:length(pts), fetch_i, rep(0, length(bearings))))
} else { # method == "clip"
# Clip shoreline to a merged buffer around all points
rect_buf <- rgeos::gUnaryUnion(rect_buf)
sub_shore <- rgeos::gIntersection(shoreline, rect_buf, byid = TRUE)
fetch_res <- t(
vapply(1:length(pts),
function(i) fetch_len(pts[i], bearings, sub_shore, dmax,
spread, projected, check_inputs = FALSE),
rep(0, length(bearings)))
)
}
fetch_res
}
#### Helper functions below are not exported by the package ####
# Returns the distance from point p to shoreline
# following bearing bear and up to distance dmax
# all distances in projection units, or meters if projected = FALSE
dist_shore <- function(p, shoreline, bear, dmax, projected) {
if (projected) {
# Draw line of length dmax with given start point and bearing
bline <- bearing_line(p, bear, dmax)
# Return (minimum) distance from p1 to intersection of geo_line and shoreline
# If no intersection, fetch is dmax
land_int <- rgeos::gIntersection(bline, shoreline)
if (is.null(land_int)) {
dmax
} else {
rgeos::gDistance(p, land_int)
}
} else {
# Draw geodesic line of length dmax with given start point and bearing
# Line drawn with a point every dmax/500
geo_line <- geosphere::gcIntermediate(p, geosphere::destPoint(p, bear, dmax),
n = 500, sp = TRUE, breakAtDateLine = TRUE, addStartEnd = TRUE)
geo_line <- spTransform(geo_line, CRS(proj4string(shoreline)))
# Return (minimum) distance from p to intersection of geo_line and shoreline
# If no intersection, fetch is dmax
land_int <- rgeos::gIntersection(geo_line, shoreline)
if (is.null(land_int)) {
dmax
} else {
dist_min(p, land_int)
}
}
}
# Returns the minimum distance between the focal point p and inters, the result
# of an intersection between SpatialLines which could include points and lines
dist_min <- function(p, inters) {
if (class(inters) == "SpatialPoints") {
min(geosphere::distGeo(p, inters))
} else if (class(inters) == "SpatialLines") {
min(geosphere::distGeo(p, lines_to_endpts(inters)))
} else if (class(inters) == "SpatialCollections") {
coord_mat <- rbind(coordinates(inters@pointobj),
coordinates(lines_to_endpts(inters@lineobj)))
min(geosphere::distGeo(p, coord_mat))
} else {
warning(paste("Point at", c(p[1], p[2]),
"cannot calculate distance to shore, returning NA."))
NA
}
}
|
Portugal <-
read.csv("portugal_data.csv", header = TRUE, sep = ";")
regpred <- read.csv("Reg_p_whole.csv", header = TRUE, sep = ";")
plot(Portugal[1:59,2], Portugal[1:59,3], type = "l", col = "blue", xlim = c(0, 100), ylim = c(800,2700),
xlab = "Days", ylab = "Number of orders", main = "Forecasting by Regression Analysis")
points(regpred[c(1:91),1], regpred[c(1:91),2], type = "l", col = "red")
plot(Portugal[1:59,2], Portugal[1:59,3], type = "l", col = "blue", xlim = c(0, 100), ylim = c(800,2700),
xlab = "Days", ylab = "Number of orders")
points(regpred[c(92:123),1], regpred[c(92:123),2], type = "l", col = "green")
points(regpred[c(59:91),1], regpred[c(59:91),2], type = "l", col = "red")
Ghana <- read.csv("ghana_data.csv", header = TRUE, sep = ";")
regpred2 <- read.csv("Reg_g_whole.csv", header = TRUE, sep = ";")
plot(Ghana[1:59,2], Ghana[1:59,3], type = "l", col = "blue", xlim = c(0, 100), ylim = c(80,470),
xlab = "Days", ylab = "Number of orders")
points(regpred2[c(1:91),1], regpred2[c(1:91),2], type = "l", col = "red")
plot(Ghana[1:59,2], Ghana[1:59,3], type = "l", col = "blue", xlim = c(0, 100), ylim = c(80,500),
xlab = "Days", ylab = "Number of orders")
points(regpred2[c(92:123),1], regpred2[c(92:123),2], type = "l", col = "green")
points(regpred2[c(59:91),1], regpred2[c(59:91),2], type = "l", col = "red")
|
/Forecast_Graphs_Umut_Yilmaz.R
|
no_license
|
uyilmaz16/Time_Series_Forecast_Food_Delivery_App
|
R
| false
| false
| 1,392
|
r
|
Portugal <-
read.csv("portugal_data.csv", header = TRUE, sep = ";")
regpred <- read.csv("Reg_p_whole.csv", header = TRUE, sep = ";")
plot(Portugal[1:59,2], Portugal[1:59,3], type = "l", col = "blue", xlim = c(0, 100), ylim = c(800,2700),
xlab = "Days", ylab = "Number of orders", main = "Forecasting by Regression Analysis")
points(regpred[c(1:91),1], regpred[c(1:91),2], type = "l", col = "red")
plot(Portugal[1:59,2], Portugal[1:59,3], type = "l", col = "blue", xlim = c(0, 100), ylim = c(800,2700),
xlab = "Days", ylab = "Number of orders")
points(regpred[c(92:123),1], regpred[c(92:123),2], type = "l", col = "green")
points(regpred[c(59:91),1], regpred[c(59:91),2], type = "l", col = "red")
Ghana <- read.csv("ghana_data.csv", header = TRUE, sep = ";")
regpred2 <- read.csv("Reg_g_whole.csv", header = TRUE, sep = ";")
plot(Ghana[1:59,2], Ghana[1:59,3], type = "l", col = "blue", xlim = c(0, 100), ylim = c(80,470),
xlab = "Days", ylab = "Number of orders")
points(regpred2[c(1:91),1], regpred2[c(1:91),2], type = "l", col = "red")
plot(Ghana[1:59,2], Ghana[1:59,3], type = "l", col = "blue", xlim = c(0, 100), ylim = c(80,500),
xlab = "Days", ylab = "Number of orders")
points(regpred2[c(92:123),1], regpred2[c(92:123),2], type = "l", col = "green")
points(regpred2[c(59:91),1], regpred2[c(59:91),2], type = "l", col = "red")
|
library(VLF)
### Name: overall.matched
### Title: Final Matching
### Aliases: overall.matched
### ** Examples
## Not run:
##D #Nucleotide VLF analysis
##D data(birds)
##D species.names <- birds[,2]
##D specimen.Number <- nrow(birds)
##D rownames(birds) <- species.names
##D Nuc.count <- count.function(birds, specimen.Number, 648)
##D frequency.matrix <- ffrequency.matrix.function(Nuc.count, 648)
##D birdSpec.freq <- specimen.frequencies(frequency.matrix, birds, specimen.Number, species.names, 648)
##D Bird_specimen_VLFcount <- VLF.count.spec(birdSpec.freq, 0.001, 648)
##D bird_VLFconvert <- VLF.convert.matrix(birds, birdSpec.freq, 0.001, 648)
##D bird_VLFnuc <- VLF.nucleotides(bird_VLFconvert, birds, 648)
##D bird_VLFreduced <- VLF.reduced(bird_VLFnuc, Bird_specimen_VLFcount, 648)
##D bird_species <- separate(bird_VLFreduced)
##D birds_singleAndShared <- find.singles(bird_species, 648)
##D
##D #Amino Acid VLF Analysis
##D data(birds_aminoAcids)
##D birds_aa_speciesNames <- birds_aminoAcids[,2]
##D aminoAcids_specimenNumber <- nrow(birds_aminoAcids)
##D birds_aminoAcid_count <- aa.count.function(birds_aminoAcids, 216)
##D aa_freq.Mat <- aa.frequency.matrix.function(birds_aminoAcid_count, 216)
##D bird_aa_freq <- aa.specimen.frequencies(aa_freq.Mat, birds_aminoAcids, birds_aa_speciesNames, 216)
##D aminoAcid_Modal <- aa.MODE(aa_freq.Mat, 216)
##D birds_aminoAcid_specimenVLFcount <- aa.VLF.count.spec(bird_aa_freq, 0.001, 216)
##D birds_aaVLFconvert <- aa.VLF.convert.matrix(birds_aminoAcids, bird_aa_freq, 0.001, 216)
##D birds_aminoAcidVLFs <- VLF.aminoAcids(birds_aaVLFconvert, birds_aminoAcids, 216)
##D birds_aaVLFreduced <- aa.VLF.reduced(birds_aminoAcidVLFs, birds_aminoAcid_specimenVLFcount, 216)
##D birds_aaSpecies <- separate(birds_aaVLFreduced)
##D birds_aminoAcid_singleAndShared <- aa.find.singles(birds_aaSpecies, 216)
##D
##D #Concordance Analysis
##D VLF_match <- find.matching(bird_VLFreduced, birds_aaVLFreduced, 648, 216)
##D position_matchingNuc <- nucleotide.matching.positions(VLF_match[[2]], 648)
##D position_matchingAA <- aminoAcid.matching.positions(VLF_match[[1]], 216)
##D matching_comparison <- overall.matched(position_matchingNuc, position_matchingAA, 648, 216)
## End(Not run)
|
/data/genthat_extracted_code/VLF/examples/overall.matched.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 2,239
|
r
|
library(VLF)
### Name: overall.matched
### Title: Final Matching
### Aliases: overall.matched
### ** Examples
## Not run:
##D #Nucleotide VLF analysis
##D data(birds)
##D species.names <- birds[,2]
##D specimen.Number <- nrow(birds)
##D rownames(birds) <- species.names
##D Nuc.count <- count.function(birds, specimen.Number, 648)
##D frequency.matrix <- ffrequency.matrix.function(Nuc.count, 648)
##D birdSpec.freq <- specimen.frequencies(frequency.matrix, birds, specimen.Number, species.names, 648)
##D Bird_specimen_VLFcount <- VLF.count.spec(birdSpec.freq, 0.001, 648)
##D bird_VLFconvert <- VLF.convert.matrix(birds, birdSpec.freq, 0.001, 648)
##D bird_VLFnuc <- VLF.nucleotides(bird_VLFconvert, birds, 648)
##D bird_VLFreduced <- VLF.reduced(bird_VLFnuc, Bird_specimen_VLFcount, 648)
##D bird_species <- separate(bird_VLFreduced)
##D birds_singleAndShared <- find.singles(bird_species, 648)
##D
##D #Amino Acid VLF Analysis
##D data(birds_aminoAcids)
##D birds_aa_speciesNames <- birds_aminoAcids[,2]
##D aminoAcids_specimenNumber <- nrow(birds_aminoAcids)
##D birds_aminoAcid_count <- aa.count.function(birds_aminoAcids, 216)
##D aa_freq.Mat <- aa.frequency.matrix.function(birds_aminoAcid_count, 216)
##D bird_aa_freq <- aa.specimen.frequencies(aa_freq.Mat, birds_aminoAcids, birds_aa_speciesNames, 216)
##D aminoAcid_Modal <- aa.MODE(aa_freq.Mat, 216)
##D birds_aminoAcid_specimenVLFcount <- aa.VLF.count.spec(bird_aa_freq, 0.001, 216)
##D birds_aaVLFconvert <- aa.VLF.convert.matrix(birds_aminoAcids, bird_aa_freq, 0.001, 216)
##D birds_aminoAcidVLFs <- VLF.aminoAcids(birds_aaVLFconvert, birds_aminoAcids, 216)
##D birds_aaVLFreduced <- aa.VLF.reduced(birds_aminoAcidVLFs, birds_aminoAcid_specimenVLFcount, 216)
##D birds_aaSpecies <- separate(birds_aaVLFreduced)
##D birds_aminoAcid_singleAndShared <- aa.find.singles(birds_aaSpecies, 216)
##D
##D #Concordance Analysis
##D VLF_match <- find.matching(bird_VLFreduced, birds_aaVLFreduced, 648, 216)
##D position_matchingNuc <- nucleotide.matching.positions(VLF_match[[2]], 648)
##D position_matchingAA <- aminoAcid.matching.positions(VLF_match[[1]], 216)
##D matching_comparison <- overall.matched(position_matchingNuc, position_matchingAA, 648, 216)
## End(Not run)
|
################################################################################
context("test-nlines.R")
################################################################################
test_that("'nlines' works", {
SEQ <- seq_len(nrow(iris))
strings <- c("", "", " ", sapply(10^(seq(0, 4, by = 0.2)), function(i) {
paste(as.matrix(iris)[sample(SEQ, i, TRUE), ], collapse = " ")
}))
replicate(10, {
writeLines(sample(strings, replace = TRUE), tmp <- tempfile())
expect_identical(nlines(tmp), 24L)
})
})
################################################################################
|
/tests/testthat/test-nlines.R
|
no_license
|
Kat-Jump/bigreadr
|
R
| false
| false
| 611
|
r
|
################################################################################
context("test-nlines.R")
################################################################################
test_that("'nlines' works", {
SEQ <- seq_len(nrow(iris))
strings <- c("", "", " ", sapply(10^(seq(0, 4, by = 0.2)), function(i) {
paste(as.matrix(iris)[sample(SEQ, i, TRUE), ], collapse = " ")
}))
replicate(10, {
writeLines(sample(strings, replace = TRUE), tmp <- tempfile())
expect_identical(nlines(tmp), 24L)
})
})
################################################################################
|
path <- getwd()
nrows <- -1
getdata <- function(set) {
# subject
subject <- read.table(file = file.path(path, set, paste0("subject_", set, ".txt")), nrows = nrows)
# activity
y <- read.table(file = file.path(path, set, paste0("y_",set,".txt")), nrows = nrows)
act <- read.table(file = file.path(path, "activity_labels.txt"))
y <- factor(y[, 1], levels = act[, 1], labels = act[, 2])
# features
X <- read.table(file = file.path(path, set, paste0("X_", set, ".txt")), nrows = nrows)
fea <- read.table(file = file.path(path, "features.txt"))[, 2]
colnames(X) <- fea
ii <- c(grep("mean[()]", fea), grep("std[()]", fea))
cbind(subject = subject[, 1], activity = y, X[ii])
}
# merge data sets
train <- getdata("train")
test <- getdata("test")
dataset <- rbind(train, test)
# calc means per subject and activity for each var, and format results
s <- split(dataset[, -(1:2)], list(dataset$subject, dataset$activity), drop = TRUE)
d <- sapply(s, colMeans)
n <- unlist(strsplit(colnames(d), "[.]"))
subject <- n[seq(1, length(n), 2)]
activity <- n[seq(2, length(n), 2)]
dataset.means <- cbind(subject = subject, activity = activity, as.data.frame(t(d)))
write.table(dataset.means, "uci_har_dataset_means.txt", row.names = FALSE, quote = FALSE)
|
/run_analysis.R
|
no_license
|
vladi81/Data-Science-Specialization
|
R
| false
| false
| 1,268
|
r
|
path <- getwd()
nrows <- -1
getdata <- function(set) {
# subject
subject <- read.table(file = file.path(path, set, paste0("subject_", set, ".txt")), nrows = nrows)
# activity
y <- read.table(file = file.path(path, set, paste0("y_",set,".txt")), nrows = nrows)
act <- read.table(file = file.path(path, "activity_labels.txt"))
y <- factor(y[, 1], levels = act[, 1], labels = act[, 2])
# features
X <- read.table(file = file.path(path, set, paste0("X_", set, ".txt")), nrows = nrows)
fea <- read.table(file = file.path(path, "features.txt"))[, 2]
colnames(X) <- fea
ii <- c(grep("mean[()]", fea), grep("std[()]", fea))
cbind(subject = subject[, 1], activity = y, X[ii])
}
# merge data sets
train <- getdata("train")
test <- getdata("test")
dataset <- rbind(train, test)
# calc means per subject and activity for each var, and format results
s <- split(dataset[, -(1:2)], list(dataset$subject, dataset$activity), drop = TRUE)
d <- sapply(s, colMeans)
n <- unlist(strsplit(colnames(d), "[.]"))
subject <- n[seq(1, length(n), 2)]
activity <- n[seq(2, length(n), 2)]
dataset.means <- cbind(subject = subject, activity = activity, as.data.frame(t(d)))
write.table(dataset.means, "uci_har_dataset_means.txt", row.names = FALSE, quote = FALSE)
|
## Tune number of particles to get Var(log likelihood) roughly equal to 1.5
library(pomp)
# Data (as in Lorenz_data.npy)
yobs = c(-13.55, -16.19, 30.77,
1.51, -3.54, 14.27,
-18.93, -21.09, 32.1,
10.09, 9.88, 31.29,
5.31, 6.76, 19.93)
yobs = matrix(yobs, ncol=3, byrow=TRUE)
colnames(yobs) = c("y1","y2","y3")
yobs = data.frame(yobs)
lorenz = pomp(data=yobs, times=0.4*1:5, t0=0,
## nb Large x1,x2,x3 values can become non-finite.
## Zero likelihood is appropriate for these.
dmeasure=Csnippet("
if (!R_FINITE(x1) || !R_FINITE(x2) || !R_FINITE(x3)) {
lik = R_NegInf;
} else {
lik = dnorm(y1,x1,exp(sigma),1) +
dnorm(y2,x2,exp(sigma),1) +
dnorm(y3,x3,exp(sigma),1);
}
lik = (give_log) ? lik : exp(lik);
"),
rprocess=discrete_time(
step.fun=Csnippet("
double tx1, tx2, tx3;
tx1 = x1 + dt*exp(th1)*(x2-x1) + rnorm(0,sqrt(10*dt));
tx2 = x2 + dt*(exp(th2)*x1 - x2 - x1*x3) +
rnorm(0, sqrt(10*dt));
tx3 = x3 + dt*(x1*x2 - exp(th3)*x3) +
rnorm(0, sqrt(10*dt));
x1 = tx1; x2 = tx2; x3 = tx3;
"),
delta.t=0.02),
rinit=Csnippet("
x1 = -30; x2 = 0; x3 = 30;
"),
obsnames=c("y1","y2","y3"),
statenames=c("x1","x2","x3"),
paramnames=c("th1","th2","th3","sigma"),
params=c(th1=log(10), th2=log(28), th3=log(8/3), sigma=log(2))
)
nparticles_seq = seq(from=10, to=200, by=10)
sd_est = 0 * nparticles_seq
time_est = sd_est
cat("\n")
for (i in 1:length(nparticles_seq)) {
Np = nparticles_seq[i]
cat("Running", Np, "particles\n")
start_time = Sys.time()
temp = replicate(100, pfilter(lorenz, Np=Np, tol=0)@loglik)
end_time = Sys.time()
time_est[i] = end_time - start_time
temp = replicate(100, pfilter(lorenz, Np=Np, tol=0)@loglik)
if (min(temp) == -Inf) {
cat("Some particle filters failed\n")
temp = temp[temp>-Inf]
}
sd_est[i] = sd(temp)
cat("Standard deviation is", sd_est[i],"\n")
}
plot(nparticles_seq, sd_est, log="y")
abline(h=1.5)
plot(nparticles_seq, time_est)
## Now the case where sigma is fixed to 0.2
lorenz@params=c(th1=log(10), th2=log(28), th3=log(8/3), sigma=log(0.2))
nparticles_seq = c(1E3, 1E4, 1E5, 1E6)
sd_est = 0 * nparticles_seq
time_est = sd_est
cat("\n")
for (i in 1:length(nparticles_seq)) {
Np = nparticles_seq[i]
cat("Running", Np, "particles\n")
start_time = Sys.time()
temp = replicate(100, pfilter(lorenz, Np=Np, tol=0)@loglik)
end_time = Sys.time()
time_est[i] = end_time - start_time
if (min(temp) == -Inf) {
cat("Some particle filters failed\n")
temp = temp[temp>-Inf]
}
sd_est[i] = sd(temp)
cat("Standard deviation is", sd_est[i],"\n")
}
plot(nparticles_seq, sd_est, log="xy")
abline(h=1.5)
plot(nparticles_seq, time_est)
|
/pmcmc/loglike_tuning.R
|
permissive
|
WN1695173791/DistillingImportanceSampling
|
R
| false
| false
| 3,208
|
r
|
## Tune number of particles to get Var(log likelihood) roughly equal to 1.5
library(pomp)
# Data (as in Lorenz_data.npy)
yobs = c(-13.55, -16.19, 30.77,
1.51, -3.54, 14.27,
-18.93, -21.09, 32.1,
10.09, 9.88, 31.29,
5.31, 6.76, 19.93)
yobs = matrix(yobs, ncol=3, byrow=TRUE)
colnames(yobs) = c("y1","y2","y3")
yobs = data.frame(yobs)
lorenz = pomp(data=yobs, times=0.4*1:5, t0=0,
## nb Large x1,x2,x3 values can become non-finite.
## Zero likelihood is appropriate for these.
dmeasure=Csnippet("
if (!R_FINITE(x1) || !R_FINITE(x2) || !R_FINITE(x3)) {
lik = R_NegInf;
} else {
lik = dnorm(y1,x1,exp(sigma),1) +
dnorm(y2,x2,exp(sigma),1) +
dnorm(y3,x3,exp(sigma),1);
}
lik = (give_log) ? lik : exp(lik);
"),
rprocess=discrete_time(
step.fun=Csnippet("
double tx1, tx2, tx3;
tx1 = x1 + dt*exp(th1)*(x2-x1) + rnorm(0,sqrt(10*dt));
tx2 = x2 + dt*(exp(th2)*x1 - x2 - x1*x3) +
rnorm(0, sqrt(10*dt));
tx3 = x3 + dt*(x1*x2 - exp(th3)*x3) +
rnorm(0, sqrt(10*dt));
x1 = tx1; x2 = tx2; x3 = tx3;
"),
delta.t=0.02),
rinit=Csnippet("
x1 = -30; x2 = 0; x3 = 30;
"),
obsnames=c("y1","y2","y3"),
statenames=c("x1","x2","x3"),
paramnames=c("th1","th2","th3","sigma"),
params=c(th1=log(10), th2=log(28), th3=log(8/3), sigma=log(2))
)
nparticles_seq = seq(from=10, to=200, by=10)
sd_est = 0 * nparticles_seq
time_est = sd_est
cat("\n")
for (i in 1:length(nparticles_seq)) {
Np = nparticles_seq[i]
cat("Running", Np, "particles\n")
start_time = Sys.time()
temp = replicate(100, pfilter(lorenz, Np=Np, tol=0)@loglik)
end_time = Sys.time()
time_est[i] = end_time - start_time
temp = replicate(100, pfilter(lorenz, Np=Np, tol=0)@loglik)
if (min(temp) == -Inf) {
cat("Some particle filters failed\n")
temp = temp[temp>-Inf]
}
sd_est[i] = sd(temp)
cat("Standard deviation is", sd_est[i],"\n")
}
plot(nparticles_seq, sd_est, log="y")
abline(h=1.5)
plot(nparticles_seq, time_est)
## Now the case where sigma is fixed to 0.2
lorenz@params=c(th1=log(10), th2=log(28), th3=log(8/3), sigma=log(0.2))
nparticles_seq = c(1E3, 1E4, 1E5, 1E6)
sd_est = 0 * nparticles_seq
time_est = sd_est
cat("\n")
for (i in 1:length(nparticles_seq)) {
Np = nparticles_seq[i]
cat("Running", Np, "particles\n")
start_time = Sys.time()
temp = replicate(100, pfilter(lorenz, Np=Np, tol=0)@loglik)
end_time = Sys.time()
time_est[i] = end_time - start_time
if (min(temp) == -Inf) {
cat("Some particle filters failed\n")
temp = temp[temp>-Inf]
}
sd_est[i] = sd(temp)
cat("Standard deviation is", sd_est[i],"\n")
}
plot(nparticles_seq, sd_est, log="xy")
abline(h=1.5)
plot(nparticles_seq, time_est)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ArMag.R
\name{read.Pal.info}
\alias{read.Pal.info}
\title{Lecture des infos sur mesures d'un fichier AM}
\usage{
read.Pal.info(file.Pal, encoding = "macroman")
}
\value{
une data.frame avec les infos sur les spécimens
}
\description{
Lecture des infos sur mesures d'un fichier AM
}
|
/man/read.Pal.info.Rd
|
no_license
|
chrono35/ArMag
|
R
| false
| true
| 361
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ArMag.R
\name{read.Pal.info}
\alias{read.Pal.info}
\title{Lecture des infos sur mesures d'un fichier AM}
\usage{
read.Pal.info(file.Pal, encoding = "macroman")
}
\value{
une data.frame avec les infos sur les spécimens
}
\description{
Lecture des infos sur mesures d'un fichier AM
}
|
library(shiny)
library(corrplot)
shinyServer(function(input, output,session) {
selectedData <- reactive({
project[, c(input$xcol, input$ycol)]
})
output$plot1 <- renderPlot({
plot(selectedData(),col=brewer.pal(n='8',name = "Paired"))
})
output$correlation<-renderPrint({
cor(project[input$xcol],project[input$ycol])
})
output$corplot<-renderPlot({
M<-data.frame(project$Project.ID,project$Est..GM.,project$Rev..GM..,project$PO.Value,project$Del.Hit.Ratio,project$X..Client.Reported.Issues,project$X..Pending.HP.Issues,project$EE,project$AE)
corrplot(cor(M),method="color")
})
})
|
/code for server.R
|
no_license
|
pavanthota123/correlation-plot-
|
R
| false
| false
| 615
|
r
|
library(shiny)
library(corrplot)
shinyServer(function(input, output,session) {
selectedData <- reactive({
project[, c(input$xcol, input$ycol)]
})
output$plot1 <- renderPlot({
plot(selectedData(),col=brewer.pal(n='8',name = "Paired"))
})
output$correlation<-renderPrint({
cor(project[input$xcol],project[input$ycol])
})
output$corplot<-renderPlot({
M<-data.frame(project$Project.ID,project$Est..GM.,project$Rev..GM..,project$PO.Value,project$Del.Hit.Ratio,project$X..Client.Reported.Issues,project$X..Pending.HP.Issues,project$EE,project$AE)
corrplot(cor(M),method="color")
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/gmql_extend.R
\docType{methods}
\name{extend}
\alias{extend}
\alias{extend,GMQLDataset-method}
\alias{extend-method}
\title{Method extend}
\usage{
extend(.data, ...)
\S4method{extend}{GMQLDataset}(.data, ...)
}
\arguments{
\item{.data}{GMQLDataset class object}
\item{...}{a series of expressions separated by comma in the form
\emph{key} = \emph{aggregate}. The \emph{aggregate} is an object of
class AGGREGATES. The aggregate functions available are: \code{\link{SUM}},
\code{\link{COUNT}}, \code{\link{MIN}}, \code{\link{MAX}},
\code{\link{AVG}}, \code{\link{MEDIAN}}, \code{\link{STD}},
\code{\link{BAG}}, \code{\link{BAGD}}, \code{\link{Q1}},
\code{\link{Q2}}, \code{\link{Q3}}.
Every aggregate accepts a string value, except for COUNT, which does not
have any value.
Argument of 'aggregate function' must exist in schema, i.e. among region
attributes. Two styles are allowed:
\itemize{
\item list of key-value pairs: e.g. sum = SUM("pvalue")
\item list of values: e.g. SUM("pvalue")
}
"mixed style" is not allowed}
}
\value{
GMQLDataset object. It contains the value to use as input
for the subsequent GMQLDataset method
}
\description{
Wrapper to GMQL EXTEND operator
For each sample in an input dataset, it generates new metadata attributes
as result of aggregate functions applied to sample region attributes
and adds them to the existing metadata attributes of the sample.
Aggregate functions are applied sample by sample.
}
\examples{
## This statement initializes and runs the GMQL server for local execution
## and creation of results on disk. Then, with system.file() it defines
## the path to the folder "DATASET" in the subdirectory "example"
## of the package "RGMQL" and opens such folder as a GMQL dataset
## named "data"
init_gmql()
test_path <- system.file("example", "DATASET", package = "RGMQL")
data <- read_gmql(test_path)
## This statement counts the regions in each sample and stores their number
## as value of the new metadata attribute RegionCount of the sample.
e <- extend(data, RegionCount = COUNT())
## This statement copies all samples of data dataset into 'res' dataset,
## and then calculates for each of them two new metadata attributes:
## 1. RegionCount is the number of sample regions;
## 2. MinP is the minimum pvalue of the sample regions.
## res sample regions are the same as the ones in data.
res = extend(data, RegionCount = COUNT(), MinP = MIN("pvalue"))
}
|
/man/extend.Rd
|
no_license
|
Pall8aSim1/RGMQL
|
R
| false
| true
| 2,529
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/gmql_extend.R
\docType{methods}
\name{extend}
\alias{extend}
\alias{extend,GMQLDataset-method}
\alias{extend-method}
\title{Method extend}
\usage{
extend(.data, ...)
\S4method{extend}{GMQLDataset}(.data, ...)
}
\arguments{
\item{.data}{GMQLDataset class object}
\item{...}{a series of expressions separated by comma in the form
\emph{key} = \emph{aggregate}. The \emph{aggregate} is an object of
class AGGREGATES. The aggregate functions available are: \code{\link{SUM}},
\code{\link{COUNT}}, \code{\link{MIN}}, \code{\link{MAX}},
\code{\link{AVG}}, \code{\link{MEDIAN}}, \code{\link{STD}},
\code{\link{BAG}}, \code{\link{BAGD}}, \code{\link{Q1}},
\code{\link{Q2}}, \code{\link{Q3}}.
Every aggregate accepts a string value, except for COUNT, which does not
have any value.
Argument of 'aggregate function' must exist in schema, i.e. among region
attributes. Two styles are allowed:
\itemize{
\item list of key-value pairs: e.g. sum = SUM("pvalue")
\item list of values: e.g. SUM("pvalue")
}
"mixed style" is not allowed}
}
\value{
GMQLDataset object. It contains the value to use as input
for the subsequent GMQLDataset method
}
\description{
Wrapper to GMQL EXTEND operator
For each sample in an input dataset, it generates new metadata attributes
as result of aggregate functions applied to sample region attributes
and adds them to the existing metadata attributes of the sample.
Aggregate functions are applied sample by sample.
}
\examples{
## This statement initializes and runs the GMQL server for local execution
## and creation of results on disk. Then, with system.file() it defines
## the path to the folder "DATASET" in the subdirectory "example"
## of the package "RGMQL" and opens such folder as a GMQL dataset
## named "data"
init_gmql()
test_path <- system.file("example", "DATASET", package = "RGMQL")
data <- read_gmql(test_path)
## This statement counts the regions in each sample and stores their number
## as value of the new metadata attribute RegionCount of the sample.
e <- extend(data, RegionCount = COUNT())
## This statement copies all samples of data dataset into 'res' dataset,
## and then calculates for each of them two new metadata attributes:
## 1. RegionCount is the number of sample regions;
## 2. MinP is the minimum pvalue of the sample regions.
## res sample regions are the same as the ones in data.
res = extend(data, RegionCount = COUNT(), MinP = MIN("pvalue"))
}
|
context("callback")
test_that("Callback inputs must be well-defined", {
app <- Dash$new()
app$layout_set(
coreSlider(id = "x"),
htmlDiv(id = "y")
)
expect_warning(
app$callback(
function(x = input("x")) x,
output("y", "nonsense")
),
"'nonsense' is not a valid property for the component with id 'y'"
)
expect_warning(
app$callback(
function(x = input("x", "gobble-gobble")) x,
output("y")
),
"'gobble-gobble' is not a valid property for the component with id 'x'"
)
})
test_that("Can suppress warnings", {
app <- Dash$new(suppress_callback_exceptions = TRUE)
app$layout_set(
coreSlider(id = "x"),
htmlDiv(id = "y")
)
expect_silent(
app$callback(
function(x = input("x")) x,
output("y", "nonsense")
)
)
expect_silent(
app$callback(
function(x = input("x", "gobble-gobble")) x,
output("y")
)
)
})
|
/tests/testthat/test-callback.R
|
permissive
|
Akayeshmantha/dashR
|
R
| false
| false
| 941
|
r
|
context("callback")
test_that("Callback inputs must be well-defined", {
app <- Dash$new()
app$layout_set(
coreSlider(id = "x"),
htmlDiv(id = "y")
)
expect_warning(
app$callback(
function(x = input("x")) x,
output("y", "nonsense")
),
"'nonsense' is not a valid property for the component with id 'y'"
)
expect_warning(
app$callback(
function(x = input("x", "gobble-gobble")) x,
output("y")
),
"'gobble-gobble' is not a valid property for the component with id 'x'"
)
})
test_that("Can suppress warnings", {
app <- Dash$new(suppress_callback_exceptions = TRUE)
app$layout_set(
coreSlider(id = "x"),
htmlDiv(id = "y")
)
expect_silent(
app$callback(
function(x = input("x")) x,
output("y", "nonsense")
)
)
expect_silent(
app$callback(
function(x = input("x", "gobble-gobble")) x,
output("y")
)
)
})
|
library(RJSONIO)
data<-read.csv('student.csv')
## clean data
data$low<-ifelse(data$GPA<2.5,2,1)
data$med<-ifelse(data$GPA<3.5&data[,1]>=2.5,2,1)
data$high<-ifelse(data$GPA>=3.5,2,1)
data<-data[,-1]
data$FakeID<-as.numeric(data$FakeID)
data$ChtdSO<-as.numeric(data$ChtdSO)
data$SmokeCig<-as.numeric(data$SmokeCig)
data$SmokedMJ<-as.numeric(data$SmokedMJ)
data$ChtdExam<-as.numeric(data$ChtdExam)
data$SkipClass<-ifelse(data$SkipClass<1,0,data$SkipClass)
## create links json out of correlations between groups
cor.data<-data.frame(cor(data))
temp<-cor.data[upper.tri(cor.data)]
d<-data.frame(temp)[-c(35,36,28),]
upper<-(d-min(d))/(max(d)-min(d))
## if this list was longer, I would have automated this
links<-list(c(cor=upper[1],source=0, target=1),
c(cor=upper[2],source=0, target=2),
c(cor=upper[3],source=1,target= 2),
c(cor=upper[4],source=0, target=3),
c(cor=upper[5],source=1,target= 3),
c(cor=upper[6],source=2,target= 3),
c(cor=upper[7],source=0,target= 4),
c(cor=upper[8],source=1, target=4),
c(cor=upper[9],source=2,target= 4),
c(cor=upper[10],source=3, target=4),
c(cor=upper[11],source=0,target= 5),
c(cor=upper[12],source=1, target=5),
c(cor=upper[13],source=2,target= 5),
c(cor=upper[14],source=3, target=5),
c(cor=upper[15],source=4,target= 5),
c(cor=upper[16],source=0,target= 6),
c(cor=upper[17],source=1, target=6),
c(cor=upper[18],source=2,target= 6),
c(cor=upper[19],source=3, target=6),
c(cor=upper[20],source=4,target= 6),
c(cor=upper[21],source=5,target= 6),
c(cor=upper[22],source=0,target= 7),
c(cor=upper[23],source=1, target=7),
c(cor=upper[24],source=2,target= 7),
c(cor=upper[25],source=3, target=7),
c(cor=upper[26],source=4,target= 7),
c(cor=upper[27],source=5,target= 7),
c(cor=upper[28],source=0,target= 8),
c(cor=upper[29],source=1, target=8),
c(cor=upper[30],source=2,target= 8),
c(cor=upper[31],source=3, target=8),
c(cor=upper[32],source=4,target= 8),
c(cor=upper[33],source=5,target= 8))
json<-toJSON(links,.escapeEscapes=FALSE,pretty=TRUE)
write.table(json,file='links.txt')
## create nodes size
count.data<-apply(data,2,function(x){ifelse(x>1,1,0)})
size<-apply(count.data,2,sum)
nodes<-list()
for (i in c(1:length(size))){
row<-list(c(group=names(size)[i],size=size[[i]]))
nodes<-append(nodes,row)
}
json<-toJSON(nodes,.escapeEscapes=FALSE,pretty=TRUE)
write.table(json,file='nodes.txt')
|
/directed_graph/force.R
|
no_license
|
ilanman/d3_projects
|
R
| false
| false
| 2,721
|
r
|
library(RJSONIO)
data<-read.csv('student.csv')
## clean data
data$low<-ifelse(data$GPA<2.5,2,1)
data$med<-ifelse(data$GPA<3.5&data[,1]>=2.5,2,1)
data$high<-ifelse(data$GPA>=3.5,2,1)
data<-data[,-1]
data$FakeID<-as.numeric(data$FakeID)
data$ChtdSO<-as.numeric(data$ChtdSO)
data$SmokeCig<-as.numeric(data$SmokeCig)
data$SmokedMJ<-as.numeric(data$SmokedMJ)
data$ChtdExam<-as.numeric(data$ChtdExam)
data$SkipClass<-ifelse(data$SkipClass<1,0,data$SkipClass)
## create links json out of correlations between groups
cor.data<-data.frame(cor(data))
temp<-cor.data[upper.tri(cor.data)]
d<-data.frame(temp)[-c(35,36,28),]
upper<-(d-min(d))/(max(d)-min(d))
## if this list was longer, I would have automated this
links<-list(c(cor=upper[1],source=0, target=1),
c(cor=upper[2],source=0, target=2),
c(cor=upper[3],source=1,target= 2),
c(cor=upper[4],source=0, target=3),
c(cor=upper[5],source=1,target= 3),
c(cor=upper[6],source=2,target= 3),
c(cor=upper[7],source=0,target= 4),
c(cor=upper[8],source=1, target=4),
c(cor=upper[9],source=2,target= 4),
c(cor=upper[10],source=3, target=4),
c(cor=upper[11],source=0,target= 5),
c(cor=upper[12],source=1, target=5),
c(cor=upper[13],source=2,target= 5),
c(cor=upper[14],source=3, target=5),
c(cor=upper[15],source=4,target= 5),
c(cor=upper[16],source=0,target= 6),
c(cor=upper[17],source=1, target=6),
c(cor=upper[18],source=2,target= 6),
c(cor=upper[19],source=3, target=6),
c(cor=upper[20],source=4,target= 6),
c(cor=upper[21],source=5,target= 6),
c(cor=upper[22],source=0,target= 7),
c(cor=upper[23],source=1, target=7),
c(cor=upper[24],source=2,target= 7),
c(cor=upper[25],source=3, target=7),
c(cor=upper[26],source=4,target= 7),
c(cor=upper[27],source=5,target= 7),
c(cor=upper[28],source=0,target= 8),
c(cor=upper[29],source=1, target=8),
c(cor=upper[30],source=2,target= 8),
c(cor=upper[31],source=3, target=8),
c(cor=upper[32],source=4,target= 8),
c(cor=upper[33],source=5,target= 8))
json<-toJSON(links,.escapeEscapes=FALSE,pretty=TRUE)
write.table(json,file='links.txt')
## create nodes size
count.data<-apply(data,2,function(x){ifelse(x>1,1,0)})
size<-apply(count.data,2,sum)
nodes<-list()
for (i in c(1:length(size))){
row<-list(c(group=names(size)[i],size=size[[i]]))
nodes<-append(nodes,row)
}
json<-toJSON(nodes,.escapeEscapes=FALSE,pretty=TRUE)
write.table(json,file='nodes.txt')
|
#' This function estimates a multiplicative mixed-frequency GARCH model. For the sake of numerical stability, it is best to multiply log returns by 100.
#' @param data data frame containing a column named date of type 'Date'.
#' @param y name of high frequency dependent variable in df.
#' @param x covariate employed in mfGARCH.
#' @param K an integer specifying lag length K in the long-term component.
#' @param low.freq a string of the low frequency variable in the df.
#' @param var.ratio.freq specify a frequency column on which the variance ratio should be calculated.
#' @param gamma if TRUE, an asymmetric GJR-GARCH is used as the short-term component. If FALSE, a simple GARCH(1,1) is employed.
#' @param weighting specifies the weighting scheme employed in the long-term component. Options are "beta.restricted" (default) or "beta.unrestricted"
#' @param x.two optional second covariate
#' @param K.two lag lgenth of optional second covariate
#' @param low.freq.two low frequency of optional second covariate
#' @param weighting.two specifies the weighting scheme employed in the optional second long-term component. Currently, the only option is "beta.restricted"
#' @param multi.start if TRUE, optimization is carried out with multiple starting values
#' @param control a list
#' @keywords fit_mfgarch
#' @export
#'
#' @return A list of class mfGARCH with letters and numbers.
#' \itemize{
#' \item par - vector of estimated parameters
#' \item rob.std.err - sandwich/HAC-type standard errors
#' \item broom.mgarch - a broom-like data.frame with entries
#' 1) estimate: column of estimated parameters
#' 2) rob.std.err - sandwich/HAC-type standard errors
#' 3) p.value - p-values derived from sandwich/HAC-type standard errors
#' 4) opg.std.err - Bollerslev-Wooldrige/OPG standard errors for GARCH processes
#' 5) opg.p.value - corresponding alternative p-values
#' \item tau - fitted long-term component
#' \item g - fitted short-term component
#' \item df.fitted - data frame with fitted values and residuals
#' \item K - chosen lag-length in the long-term component
#' \item weighting.scheme - chosen weighting scheme
#' \item llh - log-likelihood value at estimated parameter vector
#' \item bic - corresponding BIC value
#' \item y - dependent variable y
#' \item optim - output of the optimization routine
#' \item K.two - lag-lenth of x.two if two covariates are employed
#' \item weighting.scheme.two - chosen weighting scheme of x.two (if K.two != NULL)
#' \item tau.forecast - one-step ahead forecast of the long-term component
#' \item variance.ratio - calculated variance ratio
#' \item est.weighting - estimated weighting scheme
#' \item est.weighting.two - estimated weighting scheme of x.two (if K.two != NULL)
#' }
#'
#' @importFrom numDeriv jacobian
#' @importFrom stats nlminb
#' @importFrom numDeriv hessian
#' @importFrom stats constrOptim
#' @importFrom stats na.exclude
#' @importFrom stats optim
#' @importFrom stats pnorm
#' @importFrom stats var
#' @importFrom stats aggregate
#' @importFrom numDeriv jacobian
#' @importFrom maxLik maxLik
#' @importFrom utils tail
#' @examples
#' \dontrun{
#' fit_mfgarch(data = df_financial, y = "return", x = "nfci", low.freq = "week", K = 52)
#' fit_mfgarch(data = df_mfgarch, y = "return", x = "nfci", low.freq = "year_week", K = 52,
#' x.two = "dindpro", K.two = 12, low.freq.two = "year_month", weighting.two = "beta.restricted")
#' }
fit_mfgarch <- function(data, y, x = NULL, K = NULL, low.freq = "date", var.ratio.freq = NULL, gamma = TRUE, weighting = "beta.restricted", x.two = NULL, K.two = NULL, low.freq.two = NULL, weighting.two = NULL, multi.start = FALSE, control = list(par.start = NULL)) {
print("For ensuring numerical stability of the parameter optimization and inversion of the Hessian, it is best to multiply log returns by 100.")
if (is.null(weighting.two) == FALSE) {
if (weighting.two != "beta.restricted") {
stop("Right now, only beta.restricted weighting scheme is employed for the second covariate.")
}
}
if (is.null(x.two) == FALSE) {
weighting.two <- "beta.restricted"
}
if (is.null(x.two) == FALSE && gamma == FALSE) {
stop("Regarding two covariates, only asymmetric GJR-GARCH component is implemented.")
}
# if (K == 1 && is.null(x.two) == FALSE && K.two != 1) {
# stop("Regarding two covariates, only K.two = 1 is implemented if K = 1.")
# }
if (is.null(K.two) == FALSE) {
if (K == 1 & K.two > 1) {
stop("Regarding two covariates with one of them being equal to one, only K.two = 1 is implemented.")
}
}
if (is.null(x.two) == FALSE) {
print("Specifying two covariates may lead to long estimation times.")
}
if (weighting %in% c("beta.restricted", "beta.unrestricted") == FALSE) {
stop("Incorrect weighting scheme specified - options are \"beta.restricted\" and \"beta.unrestricted\".")
}
if (gamma %in% c(TRUE, FALSE) == FALSE) {
stop("Gamma can't be anything different than TRUE or FALSE.")
}
if ("date" %in% colnames(data) == FALSE) {
stop("No date column.")
}
if (inherits(data$date, 'Date') == FALSE) {
stop(paste0("Supplied date column is not of format 'Date'. It is of class '", class(data$date), "'."))
}
if (inherits(data[[low.freq]], 'Date') == FALSE) {
stop(paste0("Supplied low.freq column is not of format 'Date'. It is of class '", class(data[[low.freq]]), "'."))
}
if (is.null(x) == FALSE && K == 0) {
warning("You specified an external covariate x but chose K = 0 - simple GARCH is estimated (K = 0).")
}
if (is.null(x) == TRUE) {
warning("No external covariate x is specified - simple GARCH is estimated (K = 0).")
x <- "date"
K <- 0
}
if (is.null(K) == TRUE) {
warning("No K is specified - simple GARCH is estimated (K = 0).")
x <- "date"
K <- 0
}
if (K < 0 || K %% 1 != 0) {
stop("K can't be smaller than 0 and has to be an integer.")
}
if (dim(unique(data[c(x, low.freq)]))[1] > dim(unique(data[c(low.freq)]))[1]) {
stop("There is more than one unique observation per low frequency entry.")
}
# if ((is.null(x) == TRUE && (is.null(K) == TRUE)) || K == 0) {
# K <- 0
# }
if (y %in% colnames(data) == FALSE) {
stop(paste("There is no variable in your data frame with name ", y, "."))
}
if (x %in% colnames(data) == FALSE && is.null(x) != FALSE) {
stop(paste("There is no variable in your data frame with name ", x, "."))
}
if (low.freq %in% colnames(data) == FALSE) {
stop(paste("There is no low freq. variable in your data frame with name ", low.freq, "."))
}
if ("tau" %in% colnames(data) == TRUE) {
stop("There may not be a column named tau - it will be part of df.fitted")
}
if ("g" %in% colnames(data) == TRUE) {
stop("There may not be a column named g - it will be part of df.fitted")
}
if (is.null(x) == TRUE) {
if (sum(is.na(data[[y]]) == TRUE) > 0) {
stop(paste0("Column ", y, "contains NAs."))
}
} else {
if (sum(is.na(data[[y]]) == TRUE) > 0 | sum(is.na(data[[x]]) == TRUE) > 0) {
stop(paste0("Either column ", y, " or column ", x, "includes NAs."))
}
}
if (length(unlist(unique(data[["date"]]))) != dim(data)[1]) {
stop("There is more than one observation per high frequency (presumably date).")
}
if (is.null(var.ratio.freq) == FALSE) {
if (var.ratio.freq %in% colnames(data) == FALSE) {
stop(paste0("There is no var.ratio.freq column with name ", var.ratio.freq, "."))
}
}
# Order by high frequency variable
data <- data[order(data$date), ]
# Deprecated dplyr version
#data <- dplyr::arrange_(data, "date")
# We store date in new variable because computation on integerized date seemed to be faster
date_backup <- data[["date"]]
data["date"] <- as.numeric(unlist(data["date"]))
if (is.null(var.ratio.freq) == TRUE) {
var.ratio.freq <- low.freq
print(paste0("No frequency specified for calculating the variance ratio - default: low.freq = ", low.freq))
}
low_freq_backup <- data[, low.freq]
if (x != "date") {
if (is.null(x.two) == TRUE) {
df_llh <- data[, c(y, x, low.freq)]
df_llh[, low.freq] <- as.integer(unlist(df_llh[ , low.freq]))
} else {
low_freq.two_backup <- data[, low.freq.two]
if (low.freq != low.freq.two) { # if they are different, both have to be included in df_llh
df_llh <- data[, c(y, x, low.freq, x.two, low.freq.two)]
df_llh[, low.freq] <- as.integer(unlist(df_llh[ , low.freq]))
df_llh[, low.freq.two] <- as.integer(unlist(df_llh[ , low.freq.two]))
} else { # else, the low.freq column is needed only once
df_llh <- data[, c(y, x, low.freq, x.two)]
df_llh[, low.freq] <- as.integer(unlist(df_llh[ , low.freq]))
}
}
}
g_zero <- var(unlist(data[[y]]))
ret <- data[[y]]
# Parameter estimation
if (K == 0) {
if (gamma == TRUE) {
lf <- function(p) {
llh_simple(y = ret,
mu = p["mu"],
alpha = p["alpha"],
beta = p["beta"],
gamma = p["gamma"],
m = p["m"],
g_zero = g_zero)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04, m = 0)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0), c(0, 1, 0, 0, 0), c(0, 0, 1, 0, 0))
ci.opt <- c(-0.99999, 0, 0)
} else {
lf <- function(p) {
llh_simple(y = ret,
mu = p["mu"],
alpha = p["alpha"],
beta = p["beta"],
gamma = 0,
m = p["m"],
g_zero = g_zero)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0)
ui.opt <- rbind(c(0, -1, -1, 0), c(0, 1, 0, 0), c(0, 0, 1, 0))
ci.opt <- c(-0.99999, 0, 0)
}
if(is.null(control$par.start) == FALSE) {
par.start <- control$par.start
}
p.e.nlminb <- constrOptim(theta = par.start,
f = function(theta) { sum(lf(theta)) },
grad = NULL, ui = ui.opt, ci = ci.opt, hessian = FALSE)
if (multi.start == TRUE && gamma == TRUE) {
p.e.nlminb.two <- try({
suppressWarnings(optim(par = p.e.nlminb$par, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.two) == "try-error") {
print("Second-step BFGS optimization failed. Fallback: First-stage Nelder-Mead estimate.")
} else {
if (p.e.nlminb.two$value < p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.two
}
}
}
p.e.nlminb$value <- -p.e.nlminb$value
par <- p.e.nlminb$par
returns <- as.numeric(unlist(data[[y]]))
tau <- rep(exp(par["m"]), times = length(returns))
if (gamma == TRUE) {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"] - par["gamma"]/2,
alpha = par["alpha"],
beta = par["beta"],
gamma = par["gamma"],
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))),
g0 = g_zero))
tau <- rep(exp(par["m"]), times = length(g))
} else {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"],
alpha = par["alpha"],
beta = par["beta"],
gamma = 0,
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))),
g0 = g_zero))
tau <- rep(exp(par["m"]), times = length(g))
}
if ((var.ratio.freq %in% c("date", "low.freq")) == FALSE) {
df.fitted <- cbind(data[c("date", y, var.ratio.freq)], g = g, tau = tau)
} else {
df.fitted <- cbind(data[c("date", y)], g = g, tau = tau)
}
df.fitted$residuals <- unlist((df.fitted[y] - par["mu"]) / sqrt(df.fitted$g * df.fitted$tau))
} else { # if K > 0 we get the covariate series
covariate <- unlist(unique(data[c(low.freq, x)])[x])
if (is.null(x.two) == FALSE) {
covariate.two <- unlist(unique(data[c(low.freq.two, x.two)])[x.two])
}
}
if (K == 1) {
if (is.null(K.two) == FALSE) {
if (gamma == TRUE) {
lf <- function(p) {
llh_mf(df = df_llh, y = ret, x = covariate, low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"],
beta = p["beta"],
gamma = p["gamma"],
m = p["m"],
theta = p["theta"],
w1 = 1, w2 = 1, g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = K.two, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = 1)
}
par_start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04, m = 0, theta = 0, theta.two = 0)
ui_opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0), c(0, 1, 0, 0, 0, 0, 0), c(0, 0, 1, 0, 0, 0, 0))
ci_opt <- c(-0.99999, 0, 0)
} else {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret, x = covariate, low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"],
alpha = p["alpha"],
beta = p["beta"],
gamma = 0,
m = p["m"],
theta = p["theta"],
w1 = 1, w2 = 1, g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = K.two, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = 1)
}
par_start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0, theta = 0, theta.two = 0)
ui_opt <- rbind(c(0, -1, -1, 0, 0, 0), c(0, 1, 0, 0, 0, 0), c(0, 0, 1, 0, 0, 0))
ci_opt <- c(-0.99999, 0, 0)
}
} else {
if (gamma == TRUE) {
lf <- function(p) {
llh_mf(df = df_llh, y = ret, x = covariate, low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"],
beta = p["beta"],
gamma = p["gamma"],
m = p["m"],
theta = p["theta"],
w1 = 1, w2 = 1, g_zero = g_zero, K = K)
}
par_start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04, m = 0, theta = 0)
ui_opt <- rbind(c(0, -1, -1, -1/2, 0, 0), c(0, 1, 0, 0, 0, 0), c(0, 0, 1, 0, 0, 0))
ci_opt <- c(-0.99999, 0, 0)
} else {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret, x = covariate, low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"],
alpha = p["alpha"],
beta = p["beta"],
gamma = 0,
m = p["m"],
theta = p["theta"],
w1 = 1, w2 = 1, g_zero = g_zero, K = K)
}
par_start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0, theta = 0)
ui_opt <- rbind(c(0, -1, -1, 0, 0), c(0, 1, 0, 0, 0), c(0, 0, 1, 0, 0))
ci_opt <- c(-0.99999, 0, 0)
}
}
if(is.null(control$par.start) == FALSE) {
par.start <- control$par.start
}
p.e.nlminb <- constrOptim(theta = par_start, f = function(theta) { sum(lf(theta)) },
grad = NULL, ui = ui_opt, ci = ci_opt, hessian = FALSE)
par <- p.e.nlminb$par
p.e.nlminb$value <- -p.e.nlminb$value
if (is.null(x.two) == FALSE) {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = 1, w2 = 1, theta = par["theta"], m = par["m"], K = K,
x.two = covariate.two, K.two = K.two, theta.two = par["theta.two"],
low.freq.two = low.freq.two,
w1.two = 1, w2.two = 1)$tau
} else {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
theta = par["theta"], m = par["m"], w1 = 1, w2 = 1, K = K)$tau
}
tau_forecast <-
exp(sum_tau_fcts(m = par["m"],
i = K + 1,
theta = par["theta"],
phivar = calculate_phi(w1 = 1, w2 = 1, K = K),
covariate = c(tail(unlist(unique(data[c(x, low.freq)])[x]), K), NA),
K = K))
if (is.null(x.two) == FALSE) {
tau_forecast <-
tau_forecast *
exp(sum_tau_fcts(m = 0,
i = K.two + 1,
theta = par["theta.two"],
phivar = calculate_phi(w1 = 1, w2 = 1, K = K.two),
covariate = c(tail(unlist(unique(data[c(x.two, low.freq.two)])[x.two]), K.two), NA),
K = K.two))
}
returns <- unlist(data[y])
if (gamma == TRUE) {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"] - par["gamma"]/2,
alpha = par["alpha"], beta = par["beta"], gamma = par["gamma"],
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))), g0 = g_zero))
} else {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"],
alpha = par["alpha"], beta = par["beta"], gamma = 0,
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))), g0 = g_zero))
}
if ((var.ratio.freq %in% c("date", "low.freq")) == FALSE) {
df.fitted <- cbind(data[c("date", y, low.freq, x, var.ratio.freq)], g = g, tau = tau)
} else {
df.fitted <- cbind(data[c("date", y, low.freq, x)], g = g, tau = tau)
}
df.fitted$residuals <- unlist((df.fitted[y] - par["mu"]) / sqrt(df.fitted$g * df.fitted$tau))
}
if (K > 1) {
if (gamma == TRUE) {
if (weighting == "beta.restricted" & is.null(K.two) == TRUE) {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"],
beta = p["beta"],
gamma = p["gamma"],
m = p["m"],
theta = p["theta"],
w1 = 1,
w2 = p["w2"],
g_zero = g_zero,
K = K)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w2 = 3)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1),
c(0, 1, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 0, 0)
}
if (weighting == "beta.restricted" & is.null(K.two) == FALSE) {
if (K.two == 1) {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"], beta = p["beta"], gamma = p["gamma"],
m = p["m"], theta = p["theta"],
w1 = 1, w2 = p["w2"], g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = 1, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = 1)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w2 = 3, theta.two = 0)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1, 0),
c(0, 1, 0, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 0, 0)
}
if (K.two > 1) {
if (weighting.two == "beta.restricted") {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"], beta = p["beta"], gamma = p["gamma"],
m = p["m"], theta = p["theta"],
w1 = 1, w2 = p["w2"], g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = K.two, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = p["w2.two"])
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w2 = 3, theta.two = 0, w2.two = 3)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1, 0, 0),
c(0, 1, 0, 0, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 0, 0, 1))
ci.opt <- c(-0.99999999, 1, 0, 0, 1)
}
if (weighting.two != "beta.restricted") {
stop("Weighting scheme for second variable can only be beta.restricted.")
}
}
}
if (weighting == "beta.unrestricted" & is.null(K.two) == TRUE){
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"], beta = p["beta"], gamma = p["gamma"],
m = p["m"], theta = p["theta"], w1 = p["w1"], w2 = p["w2"],
g_zero = g_zero,
K = K)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w1 = 1.0000001, w2 = 3)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1, 0),
c(0, 0, 0, 0, 0, 0, 0, 1),
c(0, 1, 0, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 1, 0, 0)
}
if (weighting == "beta.unrestricted" & is.null(weighting.two) == FALSE) {
if (weighting.two == "beta.restricted") {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"], beta = p["beta"], gamma = p["gamma"],
m = p["m"], theta = p["theta"],
w1 = p["w1"], w2 = p["w2"], g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = K.two, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = p["w2.two"])
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w1 = 1.00000001, w2 = 3, theta.two = 0, w2.two = 3)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 0, 1, 0, 0),
c(0, 1, 0, 0, 0, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
ci.opt <- c(-0.99999999, 1, 1, 0, 0, 1)
}
}
}
if (gamma == FALSE) {
if (weighting == "beta.restricted") {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"], omega = 1 - p["alpha"] - p["beta"],
alpha = p["alpha"], beta = p["beta"], gamma = 0,
m = p["m"], theta = p["theta"], w1 = 1, w2 = p["w2"],
g_zero = g_zero,
K = K)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0, theta = 0, w2 = 3)
ui.opt <- rbind(c(0, -1, -1, 0, 0, 0),
c(0, 0, 0, 0, 0, 1),
c(0, 1, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 0, 0)
}
if (weighting == "beta.unrestricted") {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"],
alpha = p["alpha"],
beta = p["beta"],
gamma = 0,
m = p["m"],
theta = p["theta"],
w1 = p["w1"],
w2 = p["w2"],
g_zero = g_zero,
K = K)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0, theta = 0, w1 = 1.00000001, w2 = 3)
ui.opt <- rbind(c(0, -1, -1, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 1, 0),
c(0, 0, 0, 0, 0, 0, 1),
c(0, 1, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 1, 0, 0)
}
}
if(is.null(control$par.start) == FALSE) {
par.start <- control$par.start
}
p.e.nlminb <- constrOptim(theta = par.start, f = function(theta) { sum(lf(theta)) },
grad = NULL, ui = ui.opt, ci = ci.opt, hessian = FALSE)
p.e.nlminb$value <- -p.e.nlminb$value
if (multi.start == TRUE && gamma == TRUE) {
p.e.nlminb.two <- try({
suppressWarnings(optim(par = p.e.nlminb$par, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE | theta["alpha"] < 0 | theta["alpha"] + theta["beta"] + theta["gamma"]/2 >= 1 | theta["w2"] < 1) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.two) != "try-error" && -p.e.nlminb.two$value > p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.two
p.e.nlminb$value <- -p.e.nlminb$value
}
par.max.lik.nr <- try({maxLik(logLik = function(x) - lf(x), start = par.start, method = "NR")}, silent = TRUE)
if (class(par.max.lik.nr) != "try-error" && par.max.lik.nr$maximum > p.e.nlminb$value &&
par.max.lik.nr$estimate["w2"] >= 1 &&
par.max.lik.nr$estimate["alpha"] + par.max.lik.nr$estimate["beta"] + par.max.lik.nr$estimate["gamma"] / 2 < 1 &&
par.max.lik.nr$estimate["alpha"] >= 0 && par.max.lik.nr$estimate["beta"] >= 0) {
p.e.nlminb$par <- par.max.lik.nr$estimate
p.e.nlminb$value <- par.max.lik.nr$maximum
}
par.max.lik.nm <- try({maxLik(logLik = function(x) -lf(x), start = par.start, method = "NM")}, silent = TRUE)
if (class(par.max.lik.nm) != "try-error" && par.max.lik.nm$maximum > p.e.nlminb$value &&
par.max.lik.nm$estimate["w2"] >= 1 &&
par.max.lik.nm$estimate["alpha"] + par.max.lik.nm$estimate["beta"] + par.max.lik.nm$estimate["gamma"] / 2 < 1 &&
par.max.lik.nm$estimate["alpha"] >= 0 && par.max.lik.nm$estimate["beta"] >= 0) {
p.e.nlminb$par <- par.max.lik.nm$estimate
p.e.nlminb$value <- par.max.lik.nm$maximum
}
p.e.nlminb.three <- try({
suppressWarnings(optim(par = par.start, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE | theta["alpha"] < 0 | theta["alpha"] + theta["beta"] + theta["gamma"]/2 >= 1 | theta["w2"] < 1) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.three) != "try-error" && -p.e.nlminb.three$value > p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.three
p.e.nlminb$value <- -p.e.nlminb$value
}
}
if (multi.start == TRUE && gamma == FALSE) {
p.e.nlminb.two <- try({
suppressWarnings(optim(par = p.e.nlminb$par, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE | theta["alpha"] < 0 | theta["alpha"] + theta["beta"] >= 1 | theta["w2"] < 1) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.two) != "try-error" && -p.e.nlminb.two$value > p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.two
p.e.nlminb$value <- -p.e.nlminb$value
}
par.max.lik.nr <- try({maxLik(logLik = function(x) - lf(x), start = par.start, method = "NR")}, silent = TRUE)
if (class(par.max.lik.nr) != "try-error" && par.max.lik.nr$maximum > p.e.nlminb$value &&
par.max.lik.nr$estimate["w2"] >= 1 &&
par.max.lik.nr$estimate["alpha"] + par.max.lik.nr$estimate["beta"] < 1 &&
par.max.lik.nr$estimate["alpha"] >= 0 && par.max.lik.nr$estimate["beta"] >= 0) {
p.e.nlminb$par <- par.max.lik.nr$estimate
p.e.nlminb$value <- par.max.lik.nr$maximum
}
par.max.lik.nm <- try({maxLik(logLik = function(x) - lf(x), start = par.start, method = "NM")}, silent = TRUE)
if (class(par.max.lik.nm) != "try-error" && par.max.lik.nm$maximum > p.e.nlminb$value &&
par.max.lik.nm$estimate["w2"] >= 1 &&
par.max.lik.nm$estimate["alpha"] + par.max.lik.nm$estimate["beta"] < 1 &&
par.max.lik.nm$estimate["alpha"] >= 0 && par.max.lik.nm$estimate["beta"] >= 0) {
p.e.nlminb$par <- par.max.lik.nm$estimate
p.e.nlminb$value <- par.max.lik.nm$maximum
}
p.e.nlminb.three <- try({
suppressWarnings(optim(par = par.start, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE | theta["alpha"] < 0 | theta["alpha"] + theta["beta"] >= 1 | theta["w2"] < 1) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.three) != "try-error" && -p.e.nlminb.three$value > p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.three
p.e.nlminb$value <- -p.e.nlminb$value
}
}
par <- p.e.nlminb$par
if (weighting == "beta.restricted") {
if (is.null(x.two) == FALSE) {
if (K.two > 1) {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = 1, w2 = par["w2"], theta = par["theta"], m = par["m"], K = K,
x.two = covariate.two, K.two = K.two, theta.two = par["theta.two"],
low.freq.two = low.freq.two,
w1.two = 1, w2.two = par["w2.two"])$tau
} else {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = 1, w2 = par["w2"], theta = par["theta"], m = par["m"], K = K,
x.two = covariate.two, K.two = K.two, theta.two = par["theta.two"],
low.freq.two = low.freq.two,
w1.two = 1, w2.two = 1)$tau
}
} else {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = 1, w2 = par["w2"],
theta = par["theta"],
m = par["m"], K = K)$tau
}
tau_forecast <-
exp(sum_tau_fcts(m = par["m"],
i = K + 1,
theta = par["theta"],
phivar = calculate_phi(w1 = 1, w2 = par["w2"], K = K),
covariate = c(tail(unlist(unique(data[c(x, low.freq)])[x]), K), NA),
K = K))
if (is.null(x.two) == FALSE) {
if (K.two > 1) {
tau_forecast <-
tau_forecast *
exp(sum_tau_fcts(m = 0,
i = K.two + 1,
theta = par["theta.two"],
phivar = calculate_phi(w1 = 1, w2 = par["w2.two"], K = K.two),
covariate = c(tail(unlist(unique(data[c(x.two, low.freq.two)])[x.two]), K.two), NA),
K = K.two))
} else {
tau_forecast <-
tau_forecast *
exp(sum_tau_fcts(m = 0,
i = K.two + 1,
theta = par["theta.two"],
phivar = calculate_phi(w1 = 1, w2 = 1, K = K.two),
covariate = c(tail(unlist(unique(data[c(x.two, low.freq.two)])[x.two]), K.two), NA),
K = K.two))
}
}
}
if (weighting == "beta.unrestricted") {
if (is.null(x.two) == FALSE) {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = par["w1"], w2 = par["w2"], theta = par["theta"], m = par["m"], K = K,
x.two = covariate.two, K.two = K.two, theta.two = par["theta.two"],
low.freq.two = low.freq.two,
w1.two = 1, w2.two = par["w2.two"])$tau
} else {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = par["w1"], w2 = par["w2"],
theta = par["theta"],
m = par["m"], K = K)$tau
}
tau_forecast <-
exp(sum_tau_fcts(m = par["m"],
i = K + 1,
theta = par["theta"],
phivar = calculate_phi(w1 = par["w1"], w2 = par["w2"], K = K),
covariate = c(tail(unlist(unique(data[c(x, low.freq)])[x]), K), NA),
K = K))
if (is.null(x.two) == FALSE) {
tau_forecast <-
tau_forecast *
exp(sum_tau_fcts(m = 0,
i = K.two + 1,
theta = par["theta.two"],
phivar = calculate_phi(w1 = 1, w2 = par["w2.two"], K = K.two),
covariate = c(tail(unlist(unique(data[c(x.two, low.freq.two)])[x.two]), K.two), NA),
K = K.two))
}
}
returns <- unlist(data[y])
if (gamma == TRUE) {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"] - par["gamma"]/2,
alpha = par["alpha"],
beta = par["beta"],
gamma = par["gamma"],
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))),
g0 = g_zero))
} else {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"],
alpha = par["alpha"],
beta = par["beta"],
gamma = 0,
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))),
g0 = g_zero))
}
if ((var.ratio.freq %in% c("date", low.freq)) == FALSE) {
if (is.null(x.two) == TRUE) {
df.fitted <- cbind(data[c("date", y, low.freq, x, var.ratio.freq)], g = g, tau = tau)
} else {
df.fitted <- cbind(data[c("date", y, low.freq, x, low.freq.two, x.two, var.ratio.freq)], g = g, tau = tau)
}
} else {
if (is.null(x.two) == TRUE) {
df.fitted <- cbind(data[c("date", y, low.freq, x)], g = g, tau = tau)
} else {
df.fitted <- cbind(data[c("date", y, low.freq, x, low.freq.two, x.two)], g = g, tau = tau)
}
}
df.fitted$residuals <- unlist((df.fitted[y] - par["mu"]) / sqrt(df.fitted$g * df.fitted$tau))
}
df.fitted$date <- as.Date(date_backup)
# Standard errors --------------------------------------------------------------------------------
# inv_hessian <- try({
# solve(-optimHess(par = par, fn = function (theta) {
# if( is.na(sum(lf(theta))) == TRUE) {
# 10000000
# } else {
# sum(lf(theta))
# }
# }))
# }, silent = TRUE)
inv_hessian <- try({
solve(-suppressWarnings(hessian(x = par, func = function (theta) {
if( is.na(sum(lf(theta))) == TRUE) {
0
} else {
-sum(lf(theta))
}
})))
}, silent = TRUE)
opg.std.err <- try({sqrt(diag(solve(crossprod(jacobian(func = function(theta) -lf(theta), x = par)))))},
silent = TRUE)
if (class(opg.std.err)[1] == "try-error") {
warning("Inverting the OPG matrix failed. No OPG standard errors calculated.")
opg.std.err <- NA
} else {
opg.std.err <- opg.std.err * sqrt((mean(df.fitted$residuals^4, na.rm = TRUE) - 1) / 2)
}
if (class(inv_hessian)[1] == "try-error") {
warning("Inverting the Hessian matrix failed. No robust standard errors calculated. Possible workaround: Multiply returns by 100.")
rob.std.err <- NA
} else {
rob.std.err <- sqrt(diag(inv_hessian %*% crossprod(jacobian(func = lf, x = par)) %*% inv_hessian))
}
# Output -----------------------------------------------------------------------------------------
output <-
list(par = par,
std.err = rob.std.err,
broom.mgarch = data.frame(term = names(par),
estimate = par,
rob.std.err = rob.std.err,
p.value = 2 * (1 - pnorm(unlist(abs(par/rob.std.err)))),
opg.std.err = opg.std.err,
opg.p.value = 2 * (1 - pnorm(unlist(abs(par/opg.std.err))))),
tau = tau,
g = g,
df.fitted = df.fitted,
K = K,
weighting.scheme = weighting,
llh = p.e.nlminb$value,
bic = log(sum(!is.na(tau))) * length(par) - 2 * (p.e.nlminb$value),
y = y,
optim = p.e.nlminb)
if (is.null(x.two) == FALSE) {
output$K.two <- K.two
output$weighting.scheme.two <- weighting.two
}
if (K == 0) {
output$tau.forecast <- exp(par["m"])
}
# Additional output if there is a long-term component (K > 0) -------------------------------------
if (K > 0) {
output$variance.ratio <- 100 *
var(log(aggregate(df.fitted$tau, by = df.fitted[var.ratio.freq],
FUN = mean)[,2]),
na.rm = TRUE) /
var(log(aggregate(df.fitted$tau * df.fitted$g, by = df.fitted[var.ratio.freq],
FUN = mean)[,2]),
na.rm = TRUE)
output$tau.forecast <- tau_forecast
if (weighting == "beta.restricted") {
output$est.weighting <- calculate_phi(1, w2 = par["w2"], K = K)
}
if (weighting == "beta.unrestricted") {
output$est.weighting <- calculate_phi(w1 = par["w1"], w2 = par["w2"], K = K)
}
if (is.null(x.two) == FALSE) {
if (K.two > 1) {
output$est.weighting.two <- calculate_phi(w1 = 1, w2 = par["w2.two"], K = K.two)
}
}
}
# Add class mfGARCH for employing generic functions
class(output) <- "mfGARCH"
output
}
|
/mfGARCH/R/fit_mfgarch.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 40,402
|
r
|
#' This function estimates a multiplicative mixed-frequency GARCH model. For the sake of numerical stability, it is best to multiply log returns by 100.
#' @param data data frame containing a column named date of type 'Date'.
#' @param y name of high frequency dependent variable in df.
#' @param x covariate employed in mfGARCH.
#' @param K an integer specifying lag length K in the long-term component.
#' @param low.freq a string of the low frequency variable in the df.
#' @param var.ratio.freq specify a frequency column on which the variance ratio should be calculated.
#' @param gamma if TRUE, an asymmetric GJR-GARCH is used as the short-term component. If FALSE, a simple GARCH(1,1) is employed.
#' @param weighting specifies the weighting scheme employed in the long-term component. Options are "beta.restricted" (default) or "beta.unrestricted"
#' @param x.two optional second covariate
#' @param K.two lag lgenth of optional second covariate
#' @param low.freq.two low frequency of optional second covariate
#' @param weighting.two specifies the weighting scheme employed in the optional second long-term component. Currently, the only option is "beta.restricted"
#' @param multi.start if TRUE, optimization is carried out with multiple starting values
#' @param control a list
#' @keywords fit_mfgarch
#' @export
#'
#' @return A list of class mfGARCH with letters and numbers.
#' \itemize{
#' \item par - vector of estimated parameters
#' \item rob.std.err - sandwich/HAC-type standard errors
#' \item broom.mgarch - a broom-like data.frame with entries
#' 1) estimate: column of estimated parameters
#' 2) rob.std.err - sandwich/HAC-type standard errors
#' 3) p.value - p-values derived from sandwich/HAC-type standard errors
#' 4) opg.std.err - Bollerslev-Wooldrige/OPG standard errors for GARCH processes
#' 5) opg.p.value - corresponding alternative p-values
#' \item tau - fitted long-term component
#' \item g - fitted short-term component
#' \item df.fitted - data frame with fitted values and residuals
#' \item K - chosen lag-length in the long-term component
#' \item weighting.scheme - chosen weighting scheme
#' \item llh - log-likelihood value at estimated parameter vector
#' \item bic - corresponding BIC value
#' \item y - dependent variable y
#' \item optim - output of the optimization routine
#' \item K.two - lag-lenth of x.two if two covariates are employed
#' \item weighting.scheme.two - chosen weighting scheme of x.two (if K.two != NULL)
#' \item tau.forecast - one-step ahead forecast of the long-term component
#' \item variance.ratio - calculated variance ratio
#' \item est.weighting - estimated weighting scheme
#' \item est.weighting.two - estimated weighting scheme of x.two (if K.two != NULL)
#' }
#'
#' @importFrom numDeriv jacobian
#' @importFrom stats nlminb
#' @importFrom numDeriv hessian
#' @importFrom stats constrOptim
#' @importFrom stats na.exclude
#' @importFrom stats optim
#' @importFrom stats pnorm
#' @importFrom stats var
#' @importFrom stats aggregate
#' @importFrom numDeriv jacobian
#' @importFrom maxLik maxLik
#' @importFrom utils tail
#' @examples
#' \dontrun{
#' fit_mfgarch(data = df_financial, y = "return", x = "nfci", low.freq = "week", K = 52)
#' fit_mfgarch(data = df_mfgarch, y = "return", x = "nfci", low.freq = "year_week", K = 52,
#' x.two = "dindpro", K.two = 12, low.freq.two = "year_month", weighting.two = "beta.restricted")
#' }
fit_mfgarch <- function(data, y, x = NULL, K = NULL, low.freq = "date", var.ratio.freq = NULL, gamma = TRUE, weighting = "beta.restricted", x.two = NULL, K.two = NULL, low.freq.two = NULL, weighting.two = NULL, multi.start = FALSE, control = list(par.start = NULL)) {
print("For ensuring numerical stability of the parameter optimization and inversion of the Hessian, it is best to multiply log returns by 100.")
if (is.null(weighting.two) == FALSE) {
if (weighting.two != "beta.restricted") {
stop("Right now, only beta.restricted weighting scheme is employed for the second covariate.")
}
}
if (is.null(x.two) == FALSE) {
weighting.two <- "beta.restricted"
}
if (is.null(x.two) == FALSE && gamma == FALSE) {
stop("Regarding two covariates, only asymmetric GJR-GARCH component is implemented.")
}
# if (K == 1 && is.null(x.two) == FALSE && K.two != 1) {
# stop("Regarding two covariates, only K.two = 1 is implemented if K = 1.")
# }
if (is.null(K.two) == FALSE) {
if (K == 1 & K.two > 1) {
stop("Regarding two covariates with one of them being equal to one, only K.two = 1 is implemented.")
}
}
if (is.null(x.two) == FALSE) {
print("Specifying two covariates may lead to long estimation times.")
}
if (weighting %in% c("beta.restricted", "beta.unrestricted") == FALSE) {
stop("Incorrect weighting scheme specified - options are \"beta.restricted\" and \"beta.unrestricted\".")
}
if (gamma %in% c(TRUE, FALSE) == FALSE) {
stop("Gamma can't be anything different than TRUE or FALSE.")
}
if ("date" %in% colnames(data) == FALSE) {
stop("No date column.")
}
if (inherits(data$date, 'Date') == FALSE) {
stop(paste0("Supplied date column is not of format 'Date'. It is of class '", class(data$date), "'."))
}
if (inherits(data[[low.freq]], 'Date') == FALSE) {
stop(paste0("Supplied low.freq column is not of format 'Date'. It is of class '", class(data[[low.freq]]), "'."))
}
if (is.null(x) == FALSE && K == 0) {
warning("You specified an external covariate x but chose K = 0 - simple GARCH is estimated (K = 0).")
}
if (is.null(x) == TRUE) {
warning("No external covariate x is specified - simple GARCH is estimated (K = 0).")
x <- "date"
K <- 0
}
if (is.null(K) == TRUE) {
warning("No K is specified - simple GARCH is estimated (K = 0).")
x <- "date"
K <- 0
}
if (K < 0 || K %% 1 != 0) {
stop("K can't be smaller than 0 and has to be an integer.")
}
if (dim(unique(data[c(x, low.freq)]))[1] > dim(unique(data[c(low.freq)]))[1]) {
stop("There is more than one unique observation per low frequency entry.")
}
# if ((is.null(x) == TRUE && (is.null(K) == TRUE)) || K == 0) {
# K <- 0
# }
if (y %in% colnames(data) == FALSE) {
stop(paste("There is no variable in your data frame with name ", y, "."))
}
if (x %in% colnames(data) == FALSE && is.null(x) != FALSE) {
stop(paste("There is no variable in your data frame with name ", x, "."))
}
if (low.freq %in% colnames(data) == FALSE) {
stop(paste("There is no low freq. variable in your data frame with name ", low.freq, "."))
}
if ("tau" %in% colnames(data) == TRUE) {
stop("There may not be a column named tau - it will be part of df.fitted")
}
if ("g" %in% colnames(data) == TRUE) {
stop("There may not be a column named g - it will be part of df.fitted")
}
if (is.null(x) == TRUE) {
if (sum(is.na(data[[y]]) == TRUE) > 0) {
stop(paste0("Column ", y, "contains NAs."))
}
} else {
if (sum(is.na(data[[y]]) == TRUE) > 0 | sum(is.na(data[[x]]) == TRUE) > 0) {
stop(paste0("Either column ", y, " or column ", x, "includes NAs."))
}
}
if (length(unlist(unique(data[["date"]]))) != dim(data)[1]) {
stop("There is more than one observation per high frequency (presumably date).")
}
if (is.null(var.ratio.freq) == FALSE) {
if (var.ratio.freq %in% colnames(data) == FALSE) {
stop(paste0("There is no var.ratio.freq column with name ", var.ratio.freq, "."))
}
}
# Order by high frequency variable
data <- data[order(data$date), ]
# Deprecated dplyr version
#data <- dplyr::arrange_(data, "date")
# We store date in new variable because computation on integerized date seemed to be faster
date_backup <- data[["date"]]
data["date"] <- as.numeric(unlist(data["date"]))
if (is.null(var.ratio.freq) == TRUE) {
var.ratio.freq <- low.freq
print(paste0("No frequency specified for calculating the variance ratio - default: low.freq = ", low.freq))
}
low_freq_backup <- data[, low.freq]
if (x != "date") {
if (is.null(x.two) == TRUE) {
df_llh <- data[, c(y, x, low.freq)]
df_llh[, low.freq] <- as.integer(unlist(df_llh[ , low.freq]))
} else {
low_freq.two_backup <- data[, low.freq.two]
if (low.freq != low.freq.two) { # if they are different, both have to be included in df_llh
df_llh <- data[, c(y, x, low.freq, x.two, low.freq.two)]
df_llh[, low.freq] <- as.integer(unlist(df_llh[ , low.freq]))
df_llh[, low.freq.two] <- as.integer(unlist(df_llh[ , low.freq.two]))
} else { # else, the low.freq column is needed only once
df_llh <- data[, c(y, x, low.freq, x.two)]
df_llh[, low.freq] <- as.integer(unlist(df_llh[ , low.freq]))
}
}
}
g_zero <- var(unlist(data[[y]]))
ret <- data[[y]]
# Parameter estimation
if (K == 0) {
if (gamma == TRUE) {
lf <- function(p) {
llh_simple(y = ret,
mu = p["mu"],
alpha = p["alpha"],
beta = p["beta"],
gamma = p["gamma"],
m = p["m"],
g_zero = g_zero)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04, m = 0)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0), c(0, 1, 0, 0, 0), c(0, 0, 1, 0, 0))
ci.opt <- c(-0.99999, 0, 0)
} else {
lf <- function(p) {
llh_simple(y = ret,
mu = p["mu"],
alpha = p["alpha"],
beta = p["beta"],
gamma = 0,
m = p["m"],
g_zero = g_zero)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0)
ui.opt <- rbind(c(0, -1, -1, 0), c(0, 1, 0, 0), c(0, 0, 1, 0))
ci.opt <- c(-0.99999, 0, 0)
}
if(is.null(control$par.start) == FALSE) {
par.start <- control$par.start
}
p.e.nlminb <- constrOptim(theta = par.start,
f = function(theta) { sum(lf(theta)) },
grad = NULL, ui = ui.opt, ci = ci.opt, hessian = FALSE)
if (multi.start == TRUE && gamma == TRUE) {
p.e.nlminb.two <- try({
suppressWarnings(optim(par = p.e.nlminb$par, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.two) == "try-error") {
print("Second-step BFGS optimization failed. Fallback: First-stage Nelder-Mead estimate.")
} else {
if (p.e.nlminb.two$value < p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.two
}
}
}
p.e.nlminb$value <- -p.e.nlminb$value
par <- p.e.nlminb$par
returns <- as.numeric(unlist(data[[y]]))
tau <- rep(exp(par["m"]), times = length(returns))
if (gamma == TRUE) {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"] - par["gamma"]/2,
alpha = par["alpha"],
beta = par["beta"],
gamma = par["gamma"],
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))),
g0 = g_zero))
tau <- rep(exp(par["m"]), times = length(g))
} else {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"],
alpha = par["alpha"],
beta = par["beta"],
gamma = 0,
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))),
g0 = g_zero))
tau <- rep(exp(par["m"]), times = length(g))
}
if ((var.ratio.freq %in% c("date", "low.freq")) == FALSE) {
df.fitted <- cbind(data[c("date", y, var.ratio.freq)], g = g, tau = tau)
} else {
df.fitted <- cbind(data[c("date", y)], g = g, tau = tau)
}
df.fitted$residuals <- unlist((df.fitted[y] - par["mu"]) / sqrt(df.fitted$g * df.fitted$tau))
} else { # if K > 0 we get the covariate series
covariate <- unlist(unique(data[c(low.freq, x)])[x])
if (is.null(x.two) == FALSE) {
covariate.two <- unlist(unique(data[c(low.freq.two, x.two)])[x.two])
}
}
if (K == 1) {
if (is.null(K.two) == FALSE) {
if (gamma == TRUE) {
lf <- function(p) {
llh_mf(df = df_llh, y = ret, x = covariate, low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"],
beta = p["beta"],
gamma = p["gamma"],
m = p["m"],
theta = p["theta"],
w1 = 1, w2 = 1, g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = K.two, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = 1)
}
par_start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04, m = 0, theta = 0, theta.two = 0)
ui_opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0), c(0, 1, 0, 0, 0, 0, 0), c(0, 0, 1, 0, 0, 0, 0))
ci_opt <- c(-0.99999, 0, 0)
} else {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret, x = covariate, low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"],
alpha = p["alpha"],
beta = p["beta"],
gamma = 0,
m = p["m"],
theta = p["theta"],
w1 = 1, w2 = 1, g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = K.two, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = 1)
}
par_start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0, theta = 0, theta.two = 0)
ui_opt <- rbind(c(0, -1, -1, 0, 0, 0), c(0, 1, 0, 0, 0, 0), c(0, 0, 1, 0, 0, 0))
ci_opt <- c(-0.99999, 0, 0)
}
} else {
if (gamma == TRUE) {
lf <- function(p) {
llh_mf(df = df_llh, y = ret, x = covariate, low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"],
beta = p["beta"],
gamma = p["gamma"],
m = p["m"],
theta = p["theta"],
w1 = 1, w2 = 1, g_zero = g_zero, K = K)
}
par_start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04, m = 0, theta = 0)
ui_opt <- rbind(c(0, -1, -1, -1/2, 0, 0), c(0, 1, 0, 0, 0, 0), c(0, 0, 1, 0, 0, 0))
ci_opt <- c(-0.99999, 0, 0)
} else {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret, x = covariate, low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"],
alpha = p["alpha"],
beta = p["beta"],
gamma = 0,
m = p["m"],
theta = p["theta"],
w1 = 1, w2 = 1, g_zero = g_zero, K = K)
}
par_start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0, theta = 0)
ui_opt <- rbind(c(0, -1, -1, 0, 0), c(0, 1, 0, 0, 0), c(0, 0, 1, 0, 0))
ci_opt <- c(-0.99999, 0, 0)
}
}
if(is.null(control$par.start) == FALSE) {
par.start <- control$par.start
}
p.e.nlminb <- constrOptim(theta = par_start, f = function(theta) { sum(lf(theta)) },
grad = NULL, ui = ui_opt, ci = ci_opt, hessian = FALSE)
par <- p.e.nlminb$par
p.e.nlminb$value <- -p.e.nlminb$value
if (is.null(x.two) == FALSE) {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = 1, w2 = 1, theta = par["theta"], m = par["m"], K = K,
x.two = covariate.two, K.two = K.two, theta.two = par["theta.two"],
low.freq.two = low.freq.two,
w1.two = 1, w2.two = 1)$tau
} else {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
theta = par["theta"], m = par["m"], w1 = 1, w2 = 1, K = K)$tau
}
tau_forecast <-
exp(sum_tau_fcts(m = par["m"],
i = K + 1,
theta = par["theta"],
phivar = calculate_phi(w1 = 1, w2 = 1, K = K),
covariate = c(tail(unlist(unique(data[c(x, low.freq)])[x]), K), NA),
K = K))
if (is.null(x.two) == FALSE) {
tau_forecast <-
tau_forecast *
exp(sum_tau_fcts(m = 0,
i = K.two + 1,
theta = par["theta.two"],
phivar = calculate_phi(w1 = 1, w2 = 1, K = K.two),
covariate = c(tail(unlist(unique(data[c(x.two, low.freq.two)])[x.two]), K.two), NA),
K = K.two))
}
returns <- unlist(data[y])
if (gamma == TRUE) {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"] - par["gamma"]/2,
alpha = par["alpha"], beta = par["beta"], gamma = par["gamma"],
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))), g0 = g_zero))
} else {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"],
alpha = par["alpha"], beta = par["beta"], gamma = 0,
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))), g0 = g_zero))
}
if ((var.ratio.freq %in% c("date", "low.freq")) == FALSE) {
df.fitted <- cbind(data[c("date", y, low.freq, x, var.ratio.freq)], g = g, tau = tau)
} else {
df.fitted <- cbind(data[c("date", y, low.freq, x)], g = g, tau = tau)
}
df.fitted$residuals <- unlist((df.fitted[y] - par["mu"]) / sqrt(df.fitted$g * df.fitted$tau))
}
if (K > 1) {
if (gamma == TRUE) {
if (weighting == "beta.restricted" & is.null(K.two) == TRUE) {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"],
beta = p["beta"],
gamma = p["gamma"],
m = p["m"],
theta = p["theta"],
w1 = 1,
w2 = p["w2"],
g_zero = g_zero,
K = K)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w2 = 3)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1),
c(0, 1, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 0, 0)
}
if (weighting == "beta.restricted" & is.null(K.two) == FALSE) {
if (K.two == 1) {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"], beta = p["beta"], gamma = p["gamma"],
m = p["m"], theta = p["theta"],
w1 = 1, w2 = p["w2"], g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = 1, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = 1)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w2 = 3, theta.two = 0)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1, 0),
c(0, 1, 0, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 0, 0)
}
if (K.two > 1) {
if (weighting.two == "beta.restricted") {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"], beta = p["beta"], gamma = p["gamma"],
m = p["m"], theta = p["theta"],
w1 = 1, w2 = p["w2"], g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = K.two, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = p["w2.two"])
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w2 = 3, theta.two = 0, w2.two = 3)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1, 0, 0),
c(0, 1, 0, 0, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 0, 0, 1))
ci.opt <- c(-0.99999999, 1, 0, 0, 1)
}
if (weighting.two != "beta.restricted") {
stop("Weighting scheme for second variable can only be beta.restricted.")
}
}
}
if (weighting == "beta.unrestricted" & is.null(K.two) == TRUE){
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"], beta = p["beta"], gamma = p["gamma"],
m = p["m"], theta = p["theta"], w1 = p["w1"], w2 = p["w2"],
g_zero = g_zero,
K = K)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w1 = 1.0000001, w2 = 3)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1, 0),
c(0, 0, 0, 0, 0, 0, 0, 1),
c(0, 1, 0, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 1, 0, 0)
}
if (weighting == "beta.unrestricted" & is.null(weighting.two) == FALSE) {
if (weighting.two == "beta.restricted") {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"] - p["gamma"]/2,
alpha = p["alpha"], beta = p["beta"], gamma = p["gamma"],
m = p["m"], theta = p["theta"],
w1 = p["w1"], w2 = p["w2"], g_zero = g_zero, K = K,
x.two = covariate.two,
K.two = K.two, low.freq.two = low.freq.two,
theta.two = p["theta.two"], w1.two = 1, w2.two = p["w2.two"])
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, gamma = 0.04,
m = 0, theta = 0, w1 = 1.00000001, w2 = 3, theta.two = 0, w2.two = 3)
ui.opt <- rbind(c(0, -1, -1, -1/2, 0, 0, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 1, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 0, 1, 0, 0),
c(0, 1, 0, 0, 0, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
ci.opt <- c(-0.99999999, 1, 1, 0, 0, 1)
}
}
}
if (gamma == FALSE) {
if (weighting == "beta.restricted") {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"], omega = 1 - p["alpha"] - p["beta"],
alpha = p["alpha"], beta = p["beta"], gamma = 0,
m = p["m"], theta = p["theta"], w1 = 1, w2 = p["w2"],
g_zero = g_zero,
K = K)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0, theta = 0, w2 = 3)
ui.opt <- rbind(c(0, -1, -1, 0, 0, 0),
c(0, 0, 0, 0, 0, 1),
c(0, 1, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 0, 0)
}
if (weighting == "beta.unrestricted") {
lf <- function(p) {
llh_mf(df = df_llh,
y = ret,
x = covariate,
low.freq = low.freq,
mu = p["mu"],
omega = 1 - p["alpha"] - p["beta"],
alpha = p["alpha"],
beta = p["beta"],
gamma = 0,
m = p["m"],
theta = p["theta"],
w1 = p["w1"],
w2 = p["w2"],
g_zero = g_zero,
K = K)
}
par.start <- c(mu = 0, alpha = 0.02, beta = 0.85, m = 0, theta = 0, w1 = 1.00000001, w2 = 3)
ui.opt <- rbind(c(0, -1, -1, 0, 0, 0, 0),
c(0, 0, 0, 0, 0, 1, 0),
c(0, 0, 0, 0, 0, 0, 1),
c(0, 1, 0, 0, 0, 0, 0),
c(0, 0, 1, 0, 0, 0, 0))
ci.opt <- c(-0.99999999, 1, 1, 0, 0)
}
}
if(is.null(control$par.start) == FALSE) {
par.start <- control$par.start
}
p.e.nlminb <- constrOptim(theta = par.start, f = function(theta) { sum(lf(theta)) },
grad = NULL, ui = ui.opt, ci = ci.opt, hessian = FALSE)
p.e.nlminb$value <- -p.e.nlminb$value
if (multi.start == TRUE && gamma == TRUE) {
p.e.nlminb.two <- try({
suppressWarnings(optim(par = p.e.nlminb$par, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE | theta["alpha"] < 0 | theta["alpha"] + theta["beta"] + theta["gamma"]/2 >= 1 | theta["w2"] < 1) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.two) != "try-error" && -p.e.nlminb.two$value > p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.two
p.e.nlminb$value <- -p.e.nlminb$value
}
par.max.lik.nr <- try({maxLik(logLik = function(x) - lf(x), start = par.start, method = "NR")}, silent = TRUE)
if (class(par.max.lik.nr) != "try-error" && par.max.lik.nr$maximum > p.e.nlminb$value &&
par.max.lik.nr$estimate["w2"] >= 1 &&
par.max.lik.nr$estimate["alpha"] + par.max.lik.nr$estimate["beta"] + par.max.lik.nr$estimate["gamma"] / 2 < 1 &&
par.max.lik.nr$estimate["alpha"] >= 0 && par.max.lik.nr$estimate["beta"] >= 0) {
p.e.nlminb$par <- par.max.lik.nr$estimate
p.e.nlminb$value <- par.max.lik.nr$maximum
}
par.max.lik.nm <- try({maxLik(logLik = function(x) -lf(x), start = par.start, method = "NM")}, silent = TRUE)
if (class(par.max.lik.nm) != "try-error" && par.max.lik.nm$maximum > p.e.nlminb$value &&
par.max.lik.nm$estimate["w2"] >= 1 &&
par.max.lik.nm$estimate["alpha"] + par.max.lik.nm$estimate["beta"] + par.max.lik.nm$estimate["gamma"] / 2 < 1 &&
par.max.lik.nm$estimate["alpha"] >= 0 && par.max.lik.nm$estimate["beta"] >= 0) {
p.e.nlminb$par <- par.max.lik.nm$estimate
p.e.nlminb$value <- par.max.lik.nm$maximum
}
p.e.nlminb.three <- try({
suppressWarnings(optim(par = par.start, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE | theta["alpha"] < 0 | theta["alpha"] + theta["beta"] + theta["gamma"]/2 >= 1 | theta["w2"] < 1) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.three) != "try-error" && -p.e.nlminb.three$value > p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.three
p.e.nlminb$value <- -p.e.nlminb$value
}
}
if (multi.start == TRUE && gamma == FALSE) {
p.e.nlminb.two <- try({
suppressWarnings(optim(par = p.e.nlminb$par, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE | theta["alpha"] < 0 | theta["alpha"] + theta["beta"] >= 1 | theta["w2"] < 1) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.two) != "try-error" && -p.e.nlminb.two$value > p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.two
p.e.nlminb$value <- -p.e.nlminb$value
}
par.max.lik.nr <- try({maxLik(logLik = function(x) - lf(x), start = par.start, method = "NR")}, silent = TRUE)
if (class(par.max.lik.nr) != "try-error" && par.max.lik.nr$maximum > p.e.nlminb$value &&
par.max.lik.nr$estimate["w2"] >= 1 &&
par.max.lik.nr$estimate["alpha"] + par.max.lik.nr$estimate["beta"] < 1 &&
par.max.lik.nr$estimate["alpha"] >= 0 && par.max.lik.nr$estimate["beta"] >= 0) {
p.e.nlminb$par <- par.max.lik.nr$estimate
p.e.nlminb$value <- par.max.lik.nr$maximum
}
par.max.lik.nm <- try({maxLik(logLik = function(x) - lf(x), start = par.start, method = "NM")}, silent = TRUE)
if (class(par.max.lik.nm) != "try-error" && par.max.lik.nm$maximum > p.e.nlminb$value &&
par.max.lik.nm$estimate["w2"] >= 1 &&
par.max.lik.nm$estimate["alpha"] + par.max.lik.nm$estimate["beta"] < 1 &&
par.max.lik.nm$estimate["alpha"] >= 0 && par.max.lik.nm$estimate["beta"] >= 0) {
p.e.nlminb$par <- par.max.lik.nm$estimate
p.e.nlminb$value <- par.max.lik.nm$maximum
}
p.e.nlminb.three <- try({
suppressWarnings(optim(par = par.start, fn = function (theta) {
if( is.na(sum(lf(theta))) == TRUE | theta["alpha"] < 0 | theta["alpha"] + theta["beta"] >= 1 | theta["w2"] < 1) {
NA
} else {
sum(lf(theta))
}
}, method = "BFGS"))}, silent = TRUE)
if (class(p.e.nlminb.three) != "try-error" && -p.e.nlminb.three$value > p.e.nlminb$value) {
p.e.nlminb <- p.e.nlminb.three
p.e.nlminb$value <- -p.e.nlminb$value
}
}
par <- p.e.nlminb$par
if (weighting == "beta.restricted") {
if (is.null(x.two) == FALSE) {
if (K.two > 1) {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = 1, w2 = par["w2"], theta = par["theta"], m = par["m"], K = K,
x.two = covariate.two, K.two = K.two, theta.two = par["theta.two"],
low.freq.two = low.freq.two,
w1.two = 1, w2.two = par["w2.two"])$tau
} else {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = 1, w2 = par["w2"], theta = par["theta"], m = par["m"], K = K,
x.two = covariate.two, K.two = K.two, theta.two = par["theta.two"],
low.freq.two = low.freq.two,
w1.two = 1, w2.two = 1)$tau
}
} else {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = 1, w2 = par["w2"],
theta = par["theta"],
m = par["m"], K = K)$tau
}
tau_forecast <-
exp(sum_tau_fcts(m = par["m"],
i = K + 1,
theta = par["theta"],
phivar = calculate_phi(w1 = 1, w2 = par["w2"], K = K),
covariate = c(tail(unlist(unique(data[c(x, low.freq)])[x]), K), NA),
K = K))
if (is.null(x.two) == FALSE) {
if (K.two > 1) {
tau_forecast <-
tau_forecast *
exp(sum_tau_fcts(m = 0,
i = K.two + 1,
theta = par["theta.two"],
phivar = calculate_phi(w1 = 1, w2 = par["w2.two"], K = K.two),
covariate = c(tail(unlist(unique(data[c(x.two, low.freq.two)])[x.two]), K.two), NA),
K = K.two))
} else {
tau_forecast <-
tau_forecast *
exp(sum_tau_fcts(m = 0,
i = K.two + 1,
theta = par["theta.two"],
phivar = calculate_phi(w1 = 1, w2 = 1, K = K.two),
covariate = c(tail(unlist(unique(data[c(x.two, low.freq.two)])[x.two]), K.two), NA),
K = K.two))
}
}
}
if (weighting == "beta.unrestricted") {
if (is.null(x.two) == FALSE) {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = par["w1"], w2 = par["w2"], theta = par["theta"], m = par["m"], K = K,
x.two = covariate.two, K.two = K.two, theta.two = par["theta.two"],
low.freq.two = low.freq.two,
w1.two = 1, w2.two = par["w2.two"])$tau
} else {
tau <- calculate_tau_mf(df = data, x = covariate, low.freq = low.freq,
w1 = par["w1"], w2 = par["w2"],
theta = par["theta"],
m = par["m"], K = K)$tau
}
tau_forecast <-
exp(sum_tau_fcts(m = par["m"],
i = K + 1,
theta = par["theta"],
phivar = calculate_phi(w1 = par["w1"], w2 = par["w2"], K = K),
covariate = c(tail(unlist(unique(data[c(x, low.freq)])[x]), K), NA),
K = K))
if (is.null(x.two) == FALSE) {
tau_forecast <-
tau_forecast *
exp(sum_tau_fcts(m = 0,
i = K.two + 1,
theta = par["theta.two"],
phivar = calculate_phi(w1 = 1, w2 = par["w2.two"], K = K.two),
covariate = c(tail(unlist(unique(data[c(x.two, low.freq.two)])[x.two]), K.two), NA),
K = K.two))
}
}
returns <- unlist(data[y])
if (gamma == TRUE) {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"] - par["gamma"]/2,
alpha = par["alpha"],
beta = par["beta"],
gamma = par["gamma"],
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))),
g0 = g_zero))
} else {
g <- c(rep(NA, times = sum(is.na((returns - par["mu"])/sqrt(tau)))),
calculate_g(omega = 1 - par["alpha"] - par["beta"],
alpha = par["alpha"],
beta = par["beta"],
gamma = 0,
as.numeric(na.exclude((returns - par["mu"])/sqrt(tau))),
g0 = g_zero))
}
if ((var.ratio.freq %in% c("date", low.freq)) == FALSE) {
if (is.null(x.two) == TRUE) {
df.fitted <- cbind(data[c("date", y, low.freq, x, var.ratio.freq)], g = g, tau = tau)
} else {
df.fitted <- cbind(data[c("date", y, low.freq, x, low.freq.two, x.two, var.ratio.freq)], g = g, tau = tau)
}
} else {
if (is.null(x.two) == TRUE) {
df.fitted <- cbind(data[c("date", y, low.freq, x)], g = g, tau = tau)
} else {
df.fitted <- cbind(data[c("date", y, low.freq, x, low.freq.two, x.two)], g = g, tau = tau)
}
}
df.fitted$residuals <- unlist((df.fitted[y] - par["mu"]) / sqrt(df.fitted$g * df.fitted$tau))
}
df.fitted$date <- as.Date(date_backup)
# Standard errors --------------------------------------------------------------------------------
# inv_hessian <- try({
# solve(-optimHess(par = par, fn = function (theta) {
# if( is.na(sum(lf(theta))) == TRUE) {
# 10000000
# } else {
# sum(lf(theta))
# }
# }))
# }, silent = TRUE)
inv_hessian <- try({
solve(-suppressWarnings(hessian(x = par, func = function (theta) {
if( is.na(sum(lf(theta))) == TRUE) {
0
} else {
-sum(lf(theta))
}
})))
}, silent = TRUE)
opg.std.err <- try({sqrt(diag(solve(crossprod(jacobian(func = function(theta) -lf(theta), x = par)))))},
silent = TRUE)
if (class(opg.std.err)[1] == "try-error") {
warning("Inverting the OPG matrix failed. No OPG standard errors calculated.")
opg.std.err <- NA
} else {
opg.std.err <- opg.std.err * sqrt((mean(df.fitted$residuals^4, na.rm = TRUE) - 1) / 2)
}
if (class(inv_hessian)[1] == "try-error") {
warning("Inverting the Hessian matrix failed. No robust standard errors calculated. Possible workaround: Multiply returns by 100.")
rob.std.err <- NA
} else {
rob.std.err <- sqrt(diag(inv_hessian %*% crossprod(jacobian(func = lf, x = par)) %*% inv_hessian))
}
# Output -----------------------------------------------------------------------------------------
output <-
list(par = par,
std.err = rob.std.err,
broom.mgarch = data.frame(term = names(par),
estimate = par,
rob.std.err = rob.std.err,
p.value = 2 * (1 - pnorm(unlist(abs(par/rob.std.err)))),
opg.std.err = opg.std.err,
opg.p.value = 2 * (1 - pnorm(unlist(abs(par/opg.std.err))))),
tau = tau,
g = g,
df.fitted = df.fitted,
K = K,
weighting.scheme = weighting,
llh = p.e.nlminb$value,
bic = log(sum(!is.na(tau))) * length(par) - 2 * (p.e.nlminb$value),
y = y,
optim = p.e.nlminb)
if (is.null(x.two) == FALSE) {
output$K.two <- K.two
output$weighting.scheme.two <- weighting.two
}
if (K == 0) {
output$tau.forecast <- exp(par["m"])
}
# Additional output if there is a long-term component (K > 0) -------------------------------------
if (K > 0) {
output$variance.ratio <- 100 *
var(log(aggregate(df.fitted$tau, by = df.fitted[var.ratio.freq],
FUN = mean)[,2]),
na.rm = TRUE) /
var(log(aggregate(df.fitted$tau * df.fitted$g, by = df.fitted[var.ratio.freq],
FUN = mean)[,2]),
na.rm = TRUE)
output$tau.forecast <- tau_forecast
if (weighting == "beta.restricted") {
output$est.weighting <- calculate_phi(1, w2 = par["w2"], K = K)
}
if (weighting == "beta.unrestricted") {
output$est.weighting <- calculate_phi(w1 = par["w1"], w2 = par["w2"], K = K)
}
if (is.null(x.two) == FALSE) {
if (K.two > 1) {
output$est.weighting.two <- calculate_phi(w1 = 1, w2 = par["w2.two"], K = K.two)
}
}
}
# Add class mfGARCH for employing generic functions
class(output) <- "mfGARCH"
output
}
|
library(testthat)
library(parsnip)
# ------------------------------------------------------------------------------
context("boosted tree execution with xgboost")
source(test_path("helper-objects.R"))
hpc <- hpc_data[1:150, c(2:5, 8)]
num_pred <- names(hpc)[1:4]
hpc_xgboost <-
boost_tree(trees = 2, mode = "classification") %>%
set_engine("xgboost")
# ------------------------------------------------------------------------------
test_that('xgboost execution, classification', {
skip_if_not_installed("xgboost")
expect_error(
res <- parsnip::fit(
hpc_xgboost,
class ~ compounds + input_fields,
data = hpc,
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit_xy(
hpc_xgboost,
x = hpc[, num_pred],
y = hpc$class,
control = ctrl
),
regexp = NA
)
expect_true(has_multi_predict(res))
expect_equal(multi_predict_args(res), "trees")
expect_error(
res <- parsnip::fit(
hpc_xgboost,
class ~ novar,
data = hpc,
control = ctrl
)
)
})
test_that('xgboost classification prediction', {
skip_if_not_installed("xgboost")
library(xgboost)
xy_fit <- fit_xy(
hpc_xgboost,
x = hpc[, num_pred],
y = hpc$class,
control = ctrl
)
xy_pred <- predict(xy_fit$fit, newdata = xgb.DMatrix(data = as.matrix(hpc[1:8, num_pred])), type = "class")
xy_pred <- matrix(xy_pred, ncol = 4, byrow = TRUE)
xy_pred <- factor(levels(hpc$class)[apply(xy_pred, 1, which.max)], levels = levels(hpc$class))
expect_equal(xy_pred, predict(xy_fit, new_data = hpc[1:8, num_pred], type = "class")$.pred_class)
form_fit <- fit(
hpc_xgboost,
class ~ .,
data = hpc,
control = ctrl
)
form_pred <- predict(form_fit$fit, newdata = xgb.DMatrix(data = as.matrix(hpc[1:8, num_pred])), type = "class")
form_pred <- matrix(form_pred, ncol = 4, byrow = TRUE)
form_pred <- factor(levels(hpc$class)[apply(form_pred, 1, which.max)], levels = levels(hpc$class))
expect_equal(form_pred, predict(form_fit, new_data = hpc[1:8, num_pred], type = "class")$.pred_class)
})
# ------------------------------------------------------------------------------
num_pred <- names(mtcars)[3:6]
car_basic <-
boost_tree(mode = "regression") %>%
set_engine("xgboost")
bad_xgboost_reg <-
boost_tree(mode = "regression") %>%
set_engine("xgboost", min.node.size = -10)
bad_rf_reg <-
boost_tree(mode = "regression") %>%
set_engine("xgboost", sampsize = -10)
test_that('xgboost execution, regression', {
skip_if_not_installed("xgboost")
expect_error(
res <- parsnip::fit(
car_basic,
mpg ~ .,
data = mtcars,
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit_xy(
car_basic,
x = mtcars[, num_pred],
y = mtcars$mpg,
control = ctrl
),
regexp = NA
)
})
test_that('xgboost regression prediction', {
skip_if_not_installed("xgboost")
xy_fit <- fit_xy(
car_basic,
x = mtcars[, -1],
y = mtcars$mpg,
control = ctrl
)
xy_pred <- predict(xy_fit$fit, newdata = xgb.DMatrix(data = as.matrix(mtcars[1:8, -1])))
expect_equal(xy_pred, predict(xy_fit, new_data = mtcars[1:8, -1])$.pred)
form_fit <- fit(
car_basic,
mpg ~ .,
data = mtcars,
control = ctrl
)
form_pred <- predict(form_fit$fit, newdata = xgb.DMatrix(data = as.matrix(mtcars[1:8, -1])))
expect_equal(form_pred, predict(form_fit, new_data = mtcars[1:8, -1])$.pred)
})
test_that('submodel prediction', {
skip_if_not_installed("xgboost")
library(xgboost)
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost") %>%
fit(mpg ~ ., data = mtcars[-(1:4), ])
x <- xgboost::xgb.DMatrix(as.matrix(mtcars[1:4, -1]))
pruned_pred <- predict(reg_fit$fit, x, ntreelimit = 5)
mp_res <- multi_predict(reg_fit, new_data = mtcars[1:4, -1], trees = 5)
mp_res <- do.call("rbind", mp_res$.pred)
expect_equal(mp_res[[".pred"]], pruned_pred)
vars <- c("female", "tenure", "total_charges", "phone_service", "monthly_charges")
class_fit <-
boost_tree(trees = 20, mode = "classification") %>%
set_engine("xgboost") %>%
fit(churn ~ ., data = wa_churn[-(1:4), c("churn", vars)])
x <- xgboost::xgb.DMatrix(as.matrix(wa_churn[1:4, vars]))
pred_class <- predict(class_fit$fit, x, ntreelimit = 5)
mp_res <- multi_predict(class_fit, new_data = wa_churn[1:4, vars], trees = 5, type = "prob")
mp_res <- do.call("rbind", mp_res$.pred)
expect_equal(mp_res[[".pred_No"]], pred_class)
expect_error(
multi_predict(class_fit, newdata = wa_churn[1:4, vars], trees = 5, type = "prob"),
"Did you mean"
)
})
test_that('default engine', {
skip_if_not_installed("xgboost")
expect_warning(
fit <- boost_tree(mode = "regression") %>% fit(mpg ~ ., data = mtcars),
"Engine set to"
)
expect_true(inherits(fit$fit, "xgb.Booster"))
})
# ------------------------------------------------------------------------------
test_that('validation sets', {
skip_if_not_installed("xgboost")
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", validation = .1) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_equal(colnames(reg_fit$fit$evaluation_log)[2], "validation_rmse")
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", validation = .1, eval_metric = "mae") %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_equal(colnames(reg_fit$fit$evaluation_log)[2], "validation_mae")
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", eval_metric = "mae") %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_equal(colnames(reg_fit$fit$evaluation_log)[2], "training_mae")
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", validation = 3) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = "`validation` should be on"
)
})
# ------------------------------------------------------------------------------
test_that('early stopping', {
skip_if_not_installed("xgboost")
set.seed(233456)
expect_error(
reg_fit <-
boost_tree(trees = 200, stop_iter = 5, mode = "regression") %>%
set_engine("xgboost", validation = .1) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_equal(reg_fit$fit$niter - reg_fit$fit$best_iteration, 5)
expect_true(reg_fit$fit$niter < 200)
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", validation = .1, eval_metric = "mae") %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_warning(
reg_fit <-
boost_tree(trees = 20, stop_iter = 30, mode = "regression") %>%
set_engine("xgboost", validation = .1) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = "`early_stop` was reduced to 19"
)
expect_error(
reg_fit <-
boost_tree(trees = 20, stop_iter = 0, mode = "regression") %>%
set_engine("xgboost", validation = .1) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = "`early_stop` should be on"
)
})
## -----------------------------------------------------------------------------
test_that('xgboost data conversion', {
skip_if_not_installed("xgboost")
mtcar_x <- mtcars[, -1]
mtcar_mat <- as.matrix(mtcar_x)
mtcar_smat <- Matrix::Matrix(mtcar_mat, sparse = TRUE)
expect_error(from_df <- parsnip:::as_xgb_data(mtcar_x, mtcars$mpg), regexp = NA)
expect_true(inherits(from_df$data, "xgb.DMatrix"))
expect_true(inherits(from_df$watchlist$training, "xgb.DMatrix"))
expect_error(from_mat <- parsnip:::as_xgb_data(mtcar_mat, mtcars$mpg), regexp = NA)
expect_true(inherits(from_mat$data, "xgb.DMatrix"))
expect_true(inherits(from_mat$watchlist$training, "xgb.DMatrix"))
expect_error(from_sparse <- parsnip:::as_xgb_data(mtcar_smat, mtcars$mpg), regexp = NA)
expect_true(inherits(from_mat$data, "xgb.DMatrix"))
expect_true(inherits(from_mat$watchlist$training, "xgb.DMatrix"))
expect_error(from_df <- parsnip:::as_xgb_data(mtcar_x, mtcars$mpg, validation = .1), regexp = NA)
expect_true(inherits(from_df$data, "xgb.DMatrix"))
expect_true(inherits(from_df$watchlist$validation, "xgb.DMatrix"))
expect_true(nrow(from_df$data) > nrow(from_df$watchlist$validation))
expect_error(from_mat <- parsnip:::as_xgb_data(mtcar_mat, mtcars$mpg, validation = .1), regexp = NA)
expect_true(inherits(from_mat$data, "xgb.DMatrix"))
expect_true(inherits(from_mat$watchlist$validation, "xgb.DMatrix"))
expect_true(nrow(from_mat$data) > nrow(from_mat$watchlist$validation))
expect_error(from_sparse <- parsnip:::as_xgb_data(mtcar_smat, mtcars$mpg, validation = .1), regexp = NA)
expect_true(inherits(from_mat$data, "xgb.DMatrix"))
expect_true(inherits(from_mat$watchlist$validation, "xgb.DMatrix"))
expect_true(nrow(from_sparse$data) > nrow(from_sparse$watchlist$validation))
})
test_that('xgboost data and sparse matrices', {
skip_if_not_installed("xgboost")
mtcar_x <- mtcars[, -1]
mtcar_mat <- as.matrix(mtcar_x)
mtcar_smat <- Matrix::Matrix(mtcar_mat, sparse = TRUE)
xgb_spec <-
boost_tree(trees = 10) %>%
set_engine("xgboost") %>%
set_mode("regression")
set.seed(1)
from_df <- xgb_spec %>% fit_xy(mtcar_x, mtcars$mpg)
set.seed(1)
from_mat <- xgb_spec %>% fit_xy(mtcar_mat, mtcars$mpg)
set.seed(1)
from_sparse <- xgb_spec %>% fit_xy(mtcar_smat, mtcars$mpg)
expect_equal(from_df$fit, from_mat$fit)
expect_equal(from_df$fit, from_sparse$fit)
})
|
/tests/testthat/test_boost_tree_xgboost.R
|
no_license
|
jas0nwhite/parsnip
|
R
| false
| false
| 9,789
|
r
|
library(testthat)
library(parsnip)
# ------------------------------------------------------------------------------
context("boosted tree execution with xgboost")
source(test_path("helper-objects.R"))
hpc <- hpc_data[1:150, c(2:5, 8)]
num_pred <- names(hpc)[1:4]
hpc_xgboost <-
boost_tree(trees = 2, mode = "classification") %>%
set_engine("xgboost")
# ------------------------------------------------------------------------------
test_that('xgboost execution, classification', {
skip_if_not_installed("xgboost")
expect_error(
res <- parsnip::fit(
hpc_xgboost,
class ~ compounds + input_fields,
data = hpc,
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit_xy(
hpc_xgboost,
x = hpc[, num_pred],
y = hpc$class,
control = ctrl
),
regexp = NA
)
expect_true(has_multi_predict(res))
expect_equal(multi_predict_args(res), "trees")
expect_error(
res <- parsnip::fit(
hpc_xgboost,
class ~ novar,
data = hpc,
control = ctrl
)
)
})
test_that('xgboost classification prediction', {
skip_if_not_installed("xgboost")
library(xgboost)
xy_fit <- fit_xy(
hpc_xgboost,
x = hpc[, num_pred],
y = hpc$class,
control = ctrl
)
xy_pred <- predict(xy_fit$fit, newdata = xgb.DMatrix(data = as.matrix(hpc[1:8, num_pred])), type = "class")
xy_pred <- matrix(xy_pred, ncol = 4, byrow = TRUE)
xy_pred <- factor(levels(hpc$class)[apply(xy_pred, 1, which.max)], levels = levels(hpc$class))
expect_equal(xy_pred, predict(xy_fit, new_data = hpc[1:8, num_pred], type = "class")$.pred_class)
form_fit <- fit(
hpc_xgboost,
class ~ .,
data = hpc,
control = ctrl
)
form_pred <- predict(form_fit$fit, newdata = xgb.DMatrix(data = as.matrix(hpc[1:8, num_pred])), type = "class")
form_pred <- matrix(form_pred, ncol = 4, byrow = TRUE)
form_pred <- factor(levels(hpc$class)[apply(form_pred, 1, which.max)], levels = levels(hpc$class))
expect_equal(form_pred, predict(form_fit, new_data = hpc[1:8, num_pred], type = "class")$.pred_class)
})
# ------------------------------------------------------------------------------
num_pred <- names(mtcars)[3:6]
car_basic <-
boost_tree(mode = "regression") %>%
set_engine("xgboost")
bad_xgboost_reg <-
boost_tree(mode = "regression") %>%
set_engine("xgboost", min.node.size = -10)
bad_rf_reg <-
boost_tree(mode = "regression") %>%
set_engine("xgboost", sampsize = -10)
test_that('xgboost execution, regression', {
skip_if_not_installed("xgboost")
expect_error(
res <- parsnip::fit(
car_basic,
mpg ~ .,
data = mtcars,
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit_xy(
car_basic,
x = mtcars[, num_pred],
y = mtcars$mpg,
control = ctrl
),
regexp = NA
)
})
test_that('xgboost regression prediction', {
skip_if_not_installed("xgboost")
xy_fit <- fit_xy(
car_basic,
x = mtcars[, -1],
y = mtcars$mpg,
control = ctrl
)
xy_pred <- predict(xy_fit$fit, newdata = xgb.DMatrix(data = as.matrix(mtcars[1:8, -1])))
expect_equal(xy_pred, predict(xy_fit, new_data = mtcars[1:8, -1])$.pred)
form_fit <- fit(
car_basic,
mpg ~ .,
data = mtcars,
control = ctrl
)
form_pred <- predict(form_fit$fit, newdata = xgb.DMatrix(data = as.matrix(mtcars[1:8, -1])))
expect_equal(form_pred, predict(form_fit, new_data = mtcars[1:8, -1])$.pred)
})
test_that('submodel prediction', {
skip_if_not_installed("xgboost")
library(xgboost)
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost") %>%
fit(mpg ~ ., data = mtcars[-(1:4), ])
x <- xgboost::xgb.DMatrix(as.matrix(mtcars[1:4, -1]))
pruned_pred <- predict(reg_fit$fit, x, ntreelimit = 5)
mp_res <- multi_predict(reg_fit, new_data = mtcars[1:4, -1], trees = 5)
mp_res <- do.call("rbind", mp_res$.pred)
expect_equal(mp_res[[".pred"]], pruned_pred)
vars <- c("female", "tenure", "total_charges", "phone_service", "monthly_charges")
class_fit <-
boost_tree(trees = 20, mode = "classification") %>%
set_engine("xgboost") %>%
fit(churn ~ ., data = wa_churn[-(1:4), c("churn", vars)])
x <- xgboost::xgb.DMatrix(as.matrix(wa_churn[1:4, vars]))
pred_class <- predict(class_fit$fit, x, ntreelimit = 5)
mp_res <- multi_predict(class_fit, new_data = wa_churn[1:4, vars], trees = 5, type = "prob")
mp_res <- do.call("rbind", mp_res$.pred)
expect_equal(mp_res[[".pred_No"]], pred_class)
expect_error(
multi_predict(class_fit, newdata = wa_churn[1:4, vars], trees = 5, type = "prob"),
"Did you mean"
)
})
test_that('default engine', {
skip_if_not_installed("xgboost")
expect_warning(
fit <- boost_tree(mode = "regression") %>% fit(mpg ~ ., data = mtcars),
"Engine set to"
)
expect_true(inherits(fit$fit, "xgb.Booster"))
})
# ------------------------------------------------------------------------------
test_that('validation sets', {
skip_if_not_installed("xgboost")
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", validation = .1) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_equal(colnames(reg_fit$fit$evaluation_log)[2], "validation_rmse")
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", validation = .1, eval_metric = "mae") %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_equal(colnames(reg_fit$fit$evaluation_log)[2], "validation_mae")
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", eval_metric = "mae") %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_equal(colnames(reg_fit$fit$evaluation_log)[2], "training_mae")
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", validation = 3) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = "`validation` should be on"
)
})
# ------------------------------------------------------------------------------
test_that('early stopping', {
skip_if_not_installed("xgboost")
set.seed(233456)
expect_error(
reg_fit <-
boost_tree(trees = 200, stop_iter = 5, mode = "regression") %>%
set_engine("xgboost", validation = .1) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_equal(reg_fit$fit$niter - reg_fit$fit$best_iteration, 5)
expect_true(reg_fit$fit$niter < 200)
expect_error(
reg_fit <-
boost_tree(trees = 20, mode = "regression") %>%
set_engine("xgboost", validation = .1, eval_metric = "mae") %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = NA
)
expect_warning(
reg_fit <-
boost_tree(trees = 20, stop_iter = 30, mode = "regression") %>%
set_engine("xgboost", validation = .1) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = "`early_stop` was reduced to 19"
)
expect_error(
reg_fit <-
boost_tree(trees = 20, stop_iter = 0, mode = "regression") %>%
set_engine("xgboost", validation = .1) %>%
fit(mpg ~ ., data = mtcars[-(1:4), ]),
regex = "`early_stop` should be on"
)
})
## -----------------------------------------------------------------------------
test_that('xgboost data conversion', {
skip_if_not_installed("xgboost")
mtcar_x <- mtcars[, -1]
mtcar_mat <- as.matrix(mtcar_x)
mtcar_smat <- Matrix::Matrix(mtcar_mat, sparse = TRUE)
expect_error(from_df <- parsnip:::as_xgb_data(mtcar_x, mtcars$mpg), regexp = NA)
expect_true(inherits(from_df$data, "xgb.DMatrix"))
expect_true(inherits(from_df$watchlist$training, "xgb.DMatrix"))
expect_error(from_mat <- parsnip:::as_xgb_data(mtcar_mat, mtcars$mpg), regexp = NA)
expect_true(inherits(from_mat$data, "xgb.DMatrix"))
expect_true(inherits(from_mat$watchlist$training, "xgb.DMatrix"))
expect_error(from_sparse <- parsnip:::as_xgb_data(mtcar_smat, mtcars$mpg), regexp = NA)
expect_true(inherits(from_mat$data, "xgb.DMatrix"))
expect_true(inherits(from_mat$watchlist$training, "xgb.DMatrix"))
expect_error(from_df <- parsnip:::as_xgb_data(mtcar_x, mtcars$mpg, validation = .1), regexp = NA)
expect_true(inherits(from_df$data, "xgb.DMatrix"))
expect_true(inherits(from_df$watchlist$validation, "xgb.DMatrix"))
expect_true(nrow(from_df$data) > nrow(from_df$watchlist$validation))
expect_error(from_mat <- parsnip:::as_xgb_data(mtcar_mat, mtcars$mpg, validation = .1), regexp = NA)
expect_true(inherits(from_mat$data, "xgb.DMatrix"))
expect_true(inherits(from_mat$watchlist$validation, "xgb.DMatrix"))
expect_true(nrow(from_mat$data) > nrow(from_mat$watchlist$validation))
expect_error(from_sparse <- parsnip:::as_xgb_data(mtcar_smat, mtcars$mpg, validation = .1), regexp = NA)
expect_true(inherits(from_mat$data, "xgb.DMatrix"))
expect_true(inherits(from_mat$watchlist$validation, "xgb.DMatrix"))
expect_true(nrow(from_sparse$data) > nrow(from_sparse$watchlist$validation))
})
test_that('xgboost data and sparse matrices', {
skip_if_not_installed("xgboost")
mtcar_x <- mtcars[, -1]
mtcar_mat <- as.matrix(mtcar_x)
mtcar_smat <- Matrix::Matrix(mtcar_mat, sparse = TRUE)
xgb_spec <-
boost_tree(trees = 10) %>%
set_engine("xgboost") %>%
set_mode("regression")
set.seed(1)
from_df <- xgb_spec %>% fit_xy(mtcar_x, mtcars$mpg)
set.seed(1)
from_mat <- xgb_spec %>% fit_xy(mtcar_mat, mtcars$mpg)
set.seed(1)
from_sparse <- xgb_spec %>% fit_xy(mtcar_smat, mtcars$mpg)
expect_equal(from_df$fit, from_mat$fit)
expect_equal(from_df$fit, from_sparse$fit)
})
|
Problem7.15 <- data.frame(
"Block" = c(
'Block 2',
'Block 1',
'Block 1',
'Block 2',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 2',
'Block 1',
'Block 1',
'Block 2'
),
"AcidStrength" = c(
87,
93,
87,
93,
87,
93,
87,
93,
87,
93,
87,
93,
87,
93,
87,
93
),
"ReactionTime" = c(
15,
15,
30,
30,
15,
15,
30,
30,
15,
15,
30,
30,
15,
15,
30,
30
),
"AmountOfAcid" = c(
35,
35,
35,
35,
45,
45,
45,
45,
35,
35,
35,
35,
45,
45,
45,
45
),
"ReactionTemperature" = c(
60,
60,
60,
60,
60,
60,
60,
60,
70,
70,
70,
70,
70,
70,
70,
70
),
"Yield" = c(
6.08,
6.04,
6.53,
6.43,
6.31,
6.09,
6.12,
6.36,
6.79,
6.68,
6.73,
6.08,
6.77,
6.38,
6.49,
6.23
))
|
/data/Problem7.15.R
|
no_license
|
ehassler/MontgomeryDAE
|
R
| false
| false
| 884
|
r
|
Problem7.15 <- data.frame(
"Block" = c(
'Block 2',
'Block 1',
'Block 1',
'Block 2',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 1',
'Block 2',
'Block 2',
'Block 1',
'Block 2',
'Block 1',
'Block 1',
'Block 2'
),
"AcidStrength" = c(
87,
93,
87,
93,
87,
93,
87,
93,
87,
93,
87,
93,
87,
93,
87,
93
),
"ReactionTime" = c(
15,
15,
30,
30,
15,
15,
30,
30,
15,
15,
30,
30,
15,
15,
30,
30
),
"AmountOfAcid" = c(
35,
35,
35,
35,
45,
45,
45,
45,
35,
35,
35,
35,
45,
45,
45,
45
),
"ReactionTemperature" = c(
60,
60,
60,
60,
60,
60,
60,
60,
70,
70,
70,
70,
70,
70,
70,
70
),
"Yield" = c(
6.08,
6.04,
6.53,
6.43,
6.31,
6.09,
6.12,
6.36,
6.79,
6.68,
6.73,
6.08,
6.77,
6.38,
6.49,
6.23
))
|
library(readr)
gtex=read_tsv("/proj/milovelab/mccabe/proj/GTEx/data/gtex.txt")
cn=colnames(gtex)
write.table(cn,"/proj/milovelab/mccabe/proj/GTEx/data/colNames.txt",quote=F,row.names=F)
|
/code/referencePanel/getColNames.R
|
no_license
|
mccabes292/actorPaper
|
R
| false
| false
| 186
|
r
|
library(readr)
gtex=read_tsv("/proj/milovelab/mccabe/proj/GTEx/data/gtex.txt")
cn=colnames(gtex)
write.table(cn,"/proj/milovelab/mccabe/proj/GTEx/data/colNames.txt",quote=F,row.names=F)
|
############################################################
# Combine individual human and autrmated sentiment codings
# of 1,500 random sentences from plenary Bundestag speeches
#
# Project: Validation of sentiment dictionaries
#
# Author: christian.rauh@wzb.eu / christian-rauh.eu
# Date: 06.08.2015
###########################################################
# Packages
library(stringr)
# Set YOUR working directory here (root of replication package)
setwd("M:/user/rauh/WZB_CR/Datensaetze/SentiWS_Validation/JITP-Replication/")
setwd("C:/Users/CUZ/WZB_CR/Datensaetze/SentiWS_Validation/JITP-Replication/")
# Load automatically scored data
load("./2_Bundestag/Data/ValidationSampleScored.Rdata")
# Load human coded data
load("./2_Bundestag/Data/HumanCoding/HumanSentimentCodings.Rdata")
# Merge by sentence id (Qnum)
#----------------------------
data <- merge(sent.sample, human.codes, by = "Qnum", all = TRUE)
data$text <- NULL
# Save final data
#----------------
save(data, file = "./2_Bundestag/Data/ValidationSampleFinal.Rdata")
|
/Dictionary/Rauh/JITP-Replication-Final/2_Bundestag/4_MergeHumanCodes.R
|
no_license
|
msaeltzer/scrape
|
R
| false
| false
| 1,053
|
r
|
############################################################
# Combine individual human and autrmated sentiment codings
# of 1,500 random sentences from plenary Bundestag speeches
#
# Project: Validation of sentiment dictionaries
#
# Author: christian.rauh@wzb.eu / christian-rauh.eu
# Date: 06.08.2015
###########################################################
# Packages
library(stringr)
# Set YOUR working directory here (root of replication package)
setwd("M:/user/rauh/WZB_CR/Datensaetze/SentiWS_Validation/JITP-Replication/")
setwd("C:/Users/CUZ/WZB_CR/Datensaetze/SentiWS_Validation/JITP-Replication/")
# Load automatically scored data
load("./2_Bundestag/Data/ValidationSampleScored.Rdata")
# Load human coded data
load("./2_Bundestag/Data/HumanCoding/HumanSentimentCodings.Rdata")
# Merge by sentence id (Qnum)
#----------------------------
data <- merge(sent.sample, human.codes, by = "Qnum", all = TRUE)
data$text <- NULL
# Save final data
#----------------
save(data, file = "./2_Bundestag/Data/ValidationSampleFinal.Rdata")
|
## builds strongly off of 'mapplots' package, but now incorporates more general worldwide mapping abilities by combining with existing mapdata
library(maps)
library(mapdata)
library(maptools)
library(mapplots)
library(scales)
#dummy data
#my field data, smaller geographic area
dat1 <- read.csv("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyData_1.csv")
dat11 <- read.table("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyData_1.txt", header=TRUE) #need the header specifier unlike the default for reading .csv
#illingworth field data, larger geographic area
dat2 <- read.csv("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyData_2.csv")
dat22 <- read.table("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyData_2.txt", header=TRUE)
## NOTE - must not have spaces in column names! otherwise text file messes up, while .csv doesn't
# # # # # # # # # # MAKE THE MAP # # # # # # # # # # # # # # #
basemap(xlim=range(dat1$lon), ylim=range(dat1$lat))
map("world", add=TRUE)
## EDIT THE BASEMAP FUNCTION FROM mapplots
extent.map <- function(dat, bg = "white", col="gray90", fill=TRUE, axes=FALSE, xlab="", ylab="", main="", usstates=FALSE)#, ...)
{
xlim <- range(dat$lon)
ylim <- range(dat$lat)
asp <- 1/cos(sum(ylim) * pi/360)
plot(NA, xlim = xlim, ylim = ylim, asp = asp, axes=axes, xlab=xlab, ylab=ylab, main=main)#, ...)
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = bg)
#add world map plotted on top
map("world", add=TRUE, col=col, fill=fill)
if(usstates==TRUE){map("state", add=TRUE, col=col, fill=fill)}
box()
}
## NEED TO FIGURE OUT HOW TO LEAVE THE ... in there, it calls standard options from base graphics
#the above map extent changes based on resizing of quartz window; might be okay once I figure out how to export to file, but try something else for now below
### just plot a map based on lat/lon in file
bmap <- function(dat, col="gray90", fill=TRUE, axes=FALSE, xlab="", ylab="", main="", usstates=FALSE, zoom=0.005)#, ...) #increasing zoom increases the amount of map space plotted
{
xlim <- c(range(dat$lon)[1]+range(dat$lon)[1]*zoom, range(dat$lon)[2]-range(dat$lon)[2]*zoom) ## NEED TO PUT IF STATEMENTS IN HERE FOR DATA THAT IS NOT IN N.AMERICA, because their longitudes in europe will be positive, or latitude in southern hemisphere negative
ylim <- c(range(dat$lat)[1]-range(dat$lat)[1]*zoom, range(dat$lat)[2]+range(dat$lat)[2]*zoom)
#plot map
map("world", xlim = xlim, ylim = ylim, col=col, fill=fill)
if(usstates==TRUE){map("state", add=TRUE, col=col, fill=fill)}
box()
}
### plot points
map.pie <- function(dat, labels="", zoom=0.01){ #adjust zoom to make nonscaled points larger or smaller
iterate <- seq_along(dat$lat)
i <- 0
for(i in iterate){
slice1 <- dat[i, which(colnames(dat)=="c1")]
slice2 <- dat[i, which(colnames(dat)=="c2")]
#if() #if there are more than 2 slices, plot them also
total <- dat[i, dat[i, which(colnames(dat)=="tot")]]
if(abs(range(dat$lon)[1]-range(dat$lon)[2]) > abs(range(dat$lat)[1]-range(dat$lat)[2])){r <- zoom*abs(range(dat$lon)[1]-range(dat$lon)[2])}else{r <- zoom*abs(range(dat$lat)[1]-range(dat$lat)[2])}
latitude <- dat[i, which(colnames(dat)=="lat")]
longitude <- dat[i, which(colnames(dat)=="lon")]
z <- c(slice1, slice2) #work this out for multiple slices
cols <- c("orange", "blue") #work this out for multiple colors
#put it all together and plot:
add.pie(z=z, x=longitude, y=latitude, radius=r, col= cols, labels=labels)
}
}
## OLD STUFF BELOW
gps <- read.csv("~/Documents/My_Documents/ProgrammingSoftware/R/MAPS/latifolia_cpDNA_data.csv")
map("world", xlim=c(-130,30),ylim=c(34,59.5), resolution=0.01, col="gray90", fill=TRUE)#, projection="conic", param=30)
map("state", add=TRUE)
ids <- seq(1, length(gps$POP)) #make a list the length of my number of data points
i <- 0 #start the loop at 0
for(i in ids){ #run through all data points
east <- gps[i,2]
west <- gps[i,3]
tot <- gps[i,4]
lat <- gps[i,5]
lon <- gps[i,6]
add.pie(z=c(east, west), x=lon, y=lat, radius=sqrt(tot)/2, col=c(alpha("orange", 0.6), alpha("blue", 0.6)), labels="")
#draw.pie((z=c(east, west), x=lon, y=lat, radius=1, col=c(alpha("orange", 0.6), alpha("blue", 0.6))))
}
map.scale(-50, 55, ratio=FALSE, relwidth=0.14, cex=1.5)
box()
#should try to get an apply funtion to work, but can't remember how to do it right now. it would likely be better than a loop!!
pie <- function(dat, scaled, col, z){
apply(dat, FUN=add.pie, MARGIN=1)
}
pie <- apply(function(dat){
dat, FUN=add.pie(z=c(dat$c1,dat$c2), x=dat$lon, y=dat$lat, radius=dat$tot, col=c("orange", "blue"), labels="")
})
#MARGIN 1 = rows, 2 = columns
|
/WorkingCode.R
|
no_license
|
kjgilbert/geostRuct
|
R
| false
| false
| 4,765
|
r
|
## builds strongly off of 'mapplots' package, but now incorporates more general worldwide mapping abilities by combining with existing mapdata
library(maps)
library(mapdata)
library(maptools)
library(mapplots)
library(scales)
#dummy data
#my field data, smaller geographic area
dat1 <- read.csv("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyData_1.csv")
dat11 <- read.table("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyData_1.txt", header=TRUE) #need the header specifier unlike the default for reading .csv
#illingworth field data, larger geographic area
dat2 <- read.csv("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyData_2.csv")
dat22 <- read.table("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyData_2.txt", header=TRUE)
## NOTE - must not have spaces in column names! otherwise text file messes up, while .csv doesn't
# # # # # # # # # # MAKE THE MAP # # # # # # # # # # # # # # #
basemap(xlim=range(dat1$lon), ylim=range(dat1$lat))
map("world", add=TRUE)
## EDIT THE BASEMAP FUNCTION FROM mapplots
extent.map <- function(dat, bg = "white", col="gray90", fill=TRUE, axes=FALSE, xlab="", ylab="", main="", usstates=FALSE)#, ...)
{
xlim <- range(dat$lon)
ylim <- range(dat$lat)
asp <- 1/cos(sum(ylim) * pi/360)
plot(NA, xlim = xlim, ylim = ylim, asp = asp, axes=axes, xlab=xlab, ylab=ylab, main=main)#, ...)
rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col = bg)
#add world map plotted on top
map("world", add=TRUE, col=col, fill=fill)
if(usstates==TRUE){map("state", add=TRUE, col=col, fill=fill)}
box()
}
## NEED TO FIGURE OUT HOW TO LEAVE THE ... in there, it calls standard options from base graphics
#the above map extent changes based on resizing of quartz window; might be okay once I figure out how to export to file, but try something else for now below
### just plot a map based on lat/lon in file
bmap <- function(dat, col="gray90", fill=TRUE, axes=FALSE, xlab="", ylab="", main="", usstates=FALSE, zoom=0.005)#, ...) #increasing zoom increases the amount of map space plotted
{
xlim <- c(range(dat$lon)[1]+range(dat$lon)[1]*zoom, range(dat$lon)[2]-range(dat$lon)[2]*zoom) ## NEED TO PUT IF STATEMENTS IN HERE FOR DATA THAT IS NOT IN N.AMERICA, because their longitudes in europe will be positive, or latitude in southern hemisphere negative
ylim <- c(range(dat$lat)[1]-range(dat$lat)[1]*zoom, range(dat$lat)[2]+range(dat$lat)[2]*zoom)
#plot map
map("world", xlim = xlim, ylim = ylim, col=col, fill=fill)
if(usstates==TRUE){map("state", add=TRUE, col=col, fill=fill)}
box()
}
### plot points
map.pie <- function(dat, labels="", zoom=0.01){ #adjust zoom to make nonscaled points larger or smaller
iterate <- seq_along(dat$lat)
i <- 0
for(i in iterate){
slice1 <- dat[i, which(colnames(dat)=="c1")]
slice2 <- dat[i, which(colnames(dat)=="c2")]
#if() #if there are more than 2 slices, plot them also
total <- dat[i, dat[i, which(colnames(dat)=="tot")]]
if(abs(range(dat$lon)[1]-range(dat$lon)[2]) > abs(range(dat$lat)[1]-range(dat$lat)[2])){r <- zoom*abs(range(dat$lon)[1]-range(dat$lon)[2])}else{r <- zoom*abs(range(dat$lat)[1]-range(dat$lat)[2])}
latitude <- dat[i, which(colnames(dat)=="lat")]
longitude <- dat[i, which(colnames(dat)=="lon")]
z <- c(slice1, slice2) #work this out for multiple slices
cols <- c("orange", "blue") #work this out for multiple colors
#put it all together and plot:
add.pie(z=z, x=longitude, y=latitude, radius=r, col= cols, labels=labels)
}
}
## OLD STUFF BELOW
gps <- read.csv("~/Documents/My_Documents/ProgrammingSoftware/R/MAPS/latifolia_cpDNA_data.csv")
map("world", xlim=c(-130,30),ylim=c(34,59.5), resolution=0.01, col="gray90", fill=TRUE)#, projection="conic", param=30)
map("state", add=TRUE)
ids <- seq(1, length(gps$POP)) #make a list the length of my number of data points
i <- 0 #start the loop at 0
for(i in ids){ #run through all data points
east <- gps[i,2]
west <- gps[i,3]
tot <- gps[i,4]
lat <- gps[i,5]
lon <- gps[i,6]
add.pie(z=c(east, west), x=lon, y=lat, radius=sqrt(tot)/2, col=c(alpha("orange", 0.6), alpha("blue", 0.6)), labels="")
#draw.pie((z=c(east, west), x=lon, y=lat, radius=1, col=c(alpha("orange", 0.6), alpha("blue", 0.6))))
}
map.scale(-50, 55, ratio=FALSE, relwidth=0.14, cex=1.5)
box()
#should try to get an apply funtion to work, but can't remember how to do it right now. it would likely be better than a loop!!
pie <- function(dat, scaled, col, z){
apply(dat, FUN=add.pie, MARGIN=1)
}
pie <- apply(function(dat){
dat, FUN=add.pie(z=c(dat$c1,dat$c2), x=dat$lon, y=dat$lat, radius=dat$tot, col=c("orange", "blue"), labels="")
})
#MARGIN 1 = rows, 2 = columns
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnSignature.R
\name{summarizeSignatures}
\alias{summarizeSignatures}
\title{cnSignature: Summarizes the CN signature list}
\usage{
summarizeSignatures(sig, ids = NULL, decompose = TRUE,
sig.metric.values = c("mean", "sd", "skew", "kurtosis", "modality"))
}
\arguments{
\item{sig}{[List]: The returned value from runCnSignatures() with numeric.return=TRUE}
\item{ids}{[Char. Vector]: Vector of all sample names}
\item{sig.metric.values}{[Char. Vector]: All values to be returned from describe and mclust}
}
\description{
Works with the return list from runCnSignatures() when numeric.return=TRUE. Will summarize this into a matrix
}
|
/PLTK/man/summarizeSignatures.Rd
|
no_license
|
pughlab/PLTK
|
R
| false
| true
| 715
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnSignature.R
\name{summarizeSignatures}
\alias{summarizeSignatures}
\title{cnSignature: Summarizes the CN signature list}
\usage{
summarizeSignatures(sig, ids = NULL, decompose = TRUE,
sig.metric.values = c("mean", "sd", "skew", "kurtosis", "modality"))
}
\arguments{
\item{sig}{[List]: The returned value from runCnSignatures() with numeric.return=TRUE}
\item{ids}{[Char. Vector]: Vector of all sample names}
\item{sig.metric.values}{[Char. Vector]: All values to be returned from describe and mclust}
}
\description{
Works with the return list from runCnSignatures() when numeric.return=TRUE. Will summarize this into a matrix
}
|
library('dplyr')
library('tidyr')
library('ggplot2')
plot_out_dir = "C:/Users/Cob/index/educational/usask/research/masters/graphics/thesis_graphics/validation/ray_sampling_validation/"
p_width = 8 # inches
p_height = 5.7 # inches
dpi = 100
photos_lai_in = "C:/Users/Cob/index/educational/usask/research/masters/data/hemispheres/19_149/clean/sized/thresholded/LAI_parsed.dat"
# photos_lai_in = "C:/Users/Cob/index/educational/usask/research/masters/data/hemispheres/045_052_050/LAI_045_050_052_parsed.dat"
photos_lai = read.csv(photos_lai_in, header=TRUE, na.strings = c("NA",""), sep=",")
photos_lai$original_file = toupper(gsub("_r.jpg", ".JPG", photos_lai$picture, ignore.case=TRUE))
photos_meta_in = "C:/Users/Cob/index/educational/usask/research/masters/data/hemispheres/hemi_lookup_cleaned.csv"
photos_meta = read.csv(photos_meta_in, header=TRUE, na.strings = c("NA",""), sep=",")
photos_meta$id <- as.numeric(rownames(photos_meta)) - 1
photos = merge(photos_lai, photos_meta, by.x='original_file', by.y='filename', all.x=TRUE)
# photos = photos[, c("original_file", "contactnum_1", "contactnum_2", "contactnum_3", "contactnum_4", "contactnum_5")]
# photos = photos[, c("original_file", "transmission_1", "transmission_2", "transmission_3", "transmission_4", "transmission_5", "transmission_s_1", "transmission_s_2", "transmission_s_3", "transmission_s_4", "transmission_s_5", "contactnum_1", "contactnum_2", "contactnum_3", "contactnum_4", "contactnum_5")]
photos = photos[, c("original_file", "transmission_s_1", "transmission_s_2", "transmission_s_3", "transmission_s_4", "transmission_s_5")]
rsm_big_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px1000_snow_off/outputs/rshmetalog_footprint_products.csv"
# rsm_big_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px1000_snow_on/outputs/rshmetalog_footprint_products.csv"
rsm_big = read.csv(rsm_big_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_big$id = as.character(rsm_big$id)
rsm_big = rsm_big[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5")]
colnames(rsm_big) = c("id", "lrs_cn_big_1", "lrs_cn_big_2", "lrs_cn_big_3", "lrs_cn_big_4", "lrs_cn_big_5")
rsm_far_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_off_max150m/outputs/rshmetalog_footprint_products.csv"
# rsm_far_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_on_max150m/outputs/rshmetalog_footprint_products.csv"
rsm_far = read.csv(rsm_far_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_far$id = as.character(rsm_far$id)
rsm_far = rsm_far[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5")]
colnames(rsm_far) = c("id", "lrs_cn_far_1", "lrs_cn_far_2", "lrs_cn_far_3", "lrs_cn_far_4", "lrs_cn_far_5")
rsm_bin_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_off/outputs/rshmetalog_footprint_products_opt_thresh.csv"
# rsm_bin_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_on/outputs/rshmetalog_footprint_products_opt_thresh.csv"
rsm_bin = read.csv(rsm_bin_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_bin$id = as.character(rsm_bin$id)
rsm_bin = rsm_bin[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5", "lrs_tx_1", "lrs_tx_2", "lrs_tx_3", "lrs_tx_4", "lrs_tx_5")]
colnames(rsm_bin) = c("id", "lrs_cn_bin_1", "lrs_cn_bin_2", "lrs_cn_bin_3", "lrs_cn_bin_4", "lrs_cn_bin_5", "lrs_tx_bin_1", "lrs_tx_bin_2", "lrs_tx_bin_3", "lrs_tx_bin_4", "lrs_tx_bin_5")
rsm_raw_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_off/outputs/rshmetalog_footprint_products.csv"
# rsm_raw_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_on/outputs/rshmetalog_footprint_products.csv"
rsm_raw = read.csv(rsm_raw_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_raw$id = as.character(rsm_raw$id)
rsm_raw = rsm_raw[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5", "lrs_tx_1", "lrs_tx_2", "lrs_tx_3", "lrs_tx_4", "lrs_tx_5")]
colnames(rsm_raw) = c("id", "lrs_cn_raw_1", "lrs_cn_raw_2", "lrs_cn_raw_3", "lrs_cn_raw_4", "lrs_cn_raw_5", "lrs_tx_raw_1", "lrs_tx_raw_2", "lrs_tx_raw_3", "lrs_tx_raw_4", "lrs_tx_raw_5")
rsm_opt_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_off/outputs/rshmetalog_footprint_products_opt.csv"
# rsm_opt_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_on/outputs/rshmetalog_footprint_products_opt.csv"
rsm_opt = read.csv(rsm_opt_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_opt$id = as.character(rsm_opt$id)
rsm_opt = rsm_opt[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5", "lrs_tx_1", "lrs_tx_2", "lrs_tx_3", "lrs_tx_4", "lrs_tx_5")]
colnames(rsm_opt) = c("id", "lrs_cn_opt_1", "lrs_cn_opt_2", "lrs_cn_opt_3", "lrs_cn_opt_4", "lrs_cn_opt_5", "lrs_tx_opt_1", "lrs_tx_opt_2", "lrs_tx_opt_3", "lrs_tx_opt_4", "lrs_tx_opt_5")
rsm_merge = merge(rsm_bin, rsm_far, by='id')
rsm_merge = merge(rsm_big, rsm_merge, by='id')
rsm_merge = merge(rsm_raw, rsm_merge, by='id')
rsm_merge = merge(rsm_opt, rsm_merge, by='id')
rsm_df = rsm_merge %>%
gather(key, value, -id) %>%
extract(key, c("val_type", "ring_number"), "(\\D+)_(\\d)") %>%
spread(val_type, value) %>%
mutate(angle_mean = (as.numeric(ring_number) * 15 - 15/2) * pi / 180)
ggplot(rsm_df, aes(x=lrs_cn_raw, y=lrs_cn_big, color=ring_number)) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
labs(title="Mean band-wise expected returns E[<u>] resolution sensitivity (snow-on)", x='E[<u>] (1-degree resolution)', y='E[<u>] (0.18-degree resolution)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_res_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_res_eval.png"), width=p_width, height=p_height, dpi=dpi)
size_lm = lm(lrs_cn_big ~ lrs_cn_raw, data = rsm_df)
summary(size_lm)
ggplot(rsm_df, aes(x=lrs_cn_raw, y=lrs_cn_far, color=ring_number)) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
labs(title="Mean band-wise expected returns <u> max distance sensitivity (snow-on)", x='E[<u>] (max 50m)', y='E[<u>] (max 150m)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_max_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_max_eval.png"), width=p_width, height=p_height, dpi=dpi)
rsm_df_band = rsm_df %>%
filter(ring_number != 5)
max_lm = lm(lrs_cn_far ~ 0 + lrs_cn_raw, data = rsm_df_band)
summary(max_lm)
max_band = rsm_df %>%
group_by(ring_number) %>%
summarize(mb = mean(lrs_cn_far - lrs_cn_raw), rmse = sqrt(mean((lrs_cn_far - lrs_cn_raw)^2)))
ggplot(rsm_df, aes(x=-log(lrs_tx_opt), y=lrs_cn_opt, color=ring_number)) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
labs(title="Mean band-wise contact number E(X) commutation response (snow-on)", x='-ln(E[exp(-X)])', y='E[X]', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_bin_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_bin_eval.png"), width=p_width, height=p_height, dpi=dpi)
bin_lm = lm(lrs_cn_far ~ 0 + lrs_cn, data = rsm_df)
summary(bin_lm)
ggplot(rsm_df, aes(x=-log(lrs_tx_bin), y=-log(lrs_tx_opt), color=ring_number)) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
labs(title="Mean band-wise contact number -ln(E[T]) threshold response (snow-off)", x='-ln(E[H(T - E[T])])', y='-ln(E[T])', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_thresh_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_thresh_eval.png"), width=p_width, height=p_height, dpi=dpi)
thresh_lm = lm(lrs_cn_far ~ 0 + lrs_cn, data = rsm_df)
summary(thresh_lm)
# df = merge(rsm_raw, photos, by.x='id', by.y='original_file')
df = merge(rsm_opt, photos, by.x='id', by.y='original_file')
df = merge(rsm_bin, df, by='id', suffixes = c("_bin", ""))
df = df %>%
gather(key, value, -id) %>%
extract(key, c("val_type", "ring_number"), "(\\D+)_(\\d)") %>%
spread(val_type, value) %>%
mutate(angle_mean = (as.numeric(ring_number) * 15 - 15/2) * pi / 180) %>%
mutate(solid_angle = 2* pi * (cos((as.numeric(ring_number) - 1) * 15 * pi / 180) - cos(as.numeric(ring_number) * 15 * pi / 180)))
df_drop = df[df$ring_number != "5",]
# calculate error
# ggplot(df, aes(x=lrs_cn, y=-log(transmission_s), color=ring_number)) +
# geom_point()
#
# ggplot(df, aes(x=lrs_cn_bin, y=-log(transmission_s), color=ring_number)) +
# geom_point()
## many models to choose from
# least squares contact number
# equal weight
# lm_rsm_mean_cn = lm(-log(df$transmission_s) ~ 0 + df$lrs_cn)
# summary(lm_rsm_mean_cn)
# cn_lm = predict(lm_rsm_mean_cn, df)
# solid angle weight
# lmw_rsm_mean_cn = lm(-log(df$transmission_s) ~ 0 + df$lrs_cn, weights=df$solid_angle)
# summary(lmw_rsm_mean_cn)
# cn_lmw = predict(lmw_rsm_mean_cn, df)
#
# # solid anlge weight drop ring 5
# lmw_rsm_mean_cn_drop = lm(-log(df_drop$transmission_s) ~ 0 + df_drop$lrs_cn, weights=df_drop$solid_angle)
# summary(lmw_rsm_mean_cn_drop)
# cn_lmw_drop = predict(lmw_rsm_mean_cn_drop, df_drop)
#
# # solid angle weight drop ring 5 thresholded
# # lmw_rsm_mean_cn_drop_bin = lm(-log(df_drop$transmission_s) ~ 0 + df_drop$lrs_cn_bin, weights=df_drop$solid_angle)
# lmw_rsm_mean_cn_drop_bin = lm(-log(df_drop$transmission_s[is.finite(df_drop$lrs_cn_bin)]) ~ 0 + df_drop$lrs_cn_bin[is.finite(df_drop$lrs_cn_bin)], weights=df_drop$solid_angle[is.finite(df_drop$lrs_cn_bin)])
# summary(lmw_rsm_mean_cn_drop_bin)
# # cn_lmw_drop_bin = predict(lmw_rsm_mean_cn_drop_bin, df_drop)
# cn_lmw_drop_bin = df_drop$lrs_cn_bin * summary(lmw_rsm_mean_cn_drop_bin)$coefficients[1]
# bin_lm = lm(-log(transmission_s) ~ lrs_cn_bin, data = df)
# summary(bin_lm)
# least squares transmittance
# # equal weight
# nls_rsm_mean_cn = nls(transmission_s ~ exp(-a * lrs_cn), data=df, start=c(a=0.5))
# summary(nls_rsm_mean_cn)
# tx_nls = predict(nls_rsm_mean_cn, newdata=df)
#
# # solid angle weight
# nlsw_rsm_mean_tx = nls(transmission_s ~ exp(-a * lrs_cn), data=df, start=c(a=0.5), weights=solid_angle)
# summary(nlsw_rsm_mean_tx)
# tx_nlsw = predict(nlsw_rsm_mean_tx, newdata=df)
# summary(nlsw_rsm_mean_tx)$parameters[1]
#
# # solid anlge weight drop ring 5
# df_drop = df[df$ring_number != "5",]
# lmw_rsm_mean_tx_drop = lm(df_drop$transmission_s ~ 0 + df_drop$lrs_cn, weights=df_drop$solid_angle)
# summary(lmw_rsm_mean_tx_drop)
# tx_lmw_drop = predict(lmw_rsm_mean_tx_drop, df_drop)
# #
# # solid angle weight, quadratic cn -- both terms are significant... could be good!
# nlsw2_rsm_mean_cn = nls(-log(transmission_s) ~ a * lrs_cn ^ 2 + b * lrs_cn, data=df, start=c(a=0.5, b=0), weights=solid_angle)
# summary(nlsw2_rsm_mean_cn)
# cn_nlsw2 = predict(nlsw2_rsm_mean_cn, newdata=df)
# # solid angle weight, quadratic tx
# nlsw2_rsm_mean_tx = nls(transmission_s ~ exp(-a * lrs_cn * ( 1 + b * lrs_cn)), data=df, start=c(a=0, b=0.5), weights=solid_angle)
# summary(nlsw2_rsm_mean_tx)
# tx_nlsw2 = predict(nlsw2_rsm_mean_tx, newdata=df)
#
# test_nls = nls(transmission_s ~ a * exp(-lrs_cn), data = df, start=c(a = 1), weights=solid_angle)
# summary(test_nls)
# tx_test = predict(test_nls, newdata=df)
#
# wmae_tx = function(cx){
# tx_error = exp(-cx * df$lrs_cn) - df$transmission_s
# weights = df$solid_angle / sum(df$solid_angle, na.rm=TRUE)
# wmae = abs(sum(weights * tx_error, na.rm=TRUE))
# }
#
# wmae_cn = function(cx){
# cn_error = cx * df$lrs_cn - -log(df$transmission_s)
# weights = df$solid_angle / sum(df$solid_angle, na.rm=TRUE)
# wmae = abs(sum(weights * cn_error, na.rm=TRUE))
# }
#
# opt_tx = optimize(wmae_tx, lower=0, upper=1)
# opt_tx$minimum
# tx_wmae = exp(-opt_tx$minimum * df$lrs_cn)
#
# opt_cn = optimize(wmae_cn, lower=0, upper=1)
# opt_cn$minimum
# cn_wmae = opt_cn$minimum * df$lrs_cn
# cn vs cn
# ggplot(df_drop, aes(x=lrs_cn)) +
# geom_point(aes(y=-log(transmission_s), color=ring_number)) +
# geom_line(aes(y=cn_lmw_drop))
# geom_line(aes(y=cn_lmw), color="orange") +
# geom_line(aes(y=-log(tx_test)), color="blue") +
# geom_line(aes(y=cn_nlsw), color="red") +
# geom_line(aes(y=-log(tx_nlsw2)), color="brown")
# geom_line(aes(y=cn_lm), color="red") +
# geom_line(aes(y=cn_wmae), color="yellow") +
# geom_line(aes(y=-log(tx_nls)), color="green") +
# geom_line(aes(y=-log(tx_wmae)), color="purple") +
# geom_line(aes(y=-log(tx_nlsw2)), color="brown")
# tx vs tx
# ggplot(df, aes(y=transmission_s, x=tx_nlsw2, color=ring_number)) +
# geom_point() +
# geom_abline(intercept = 0, slope = 1)
# cn vs tx
# ggplot(df_drop, aes(x=lrs_cn)) +
# geom_point(aes(y=transmission_s, color=ring_number)) +
# geom_line(aes(y=tx_test), color="red")
# geom_line(aes(y=exp(-cn_lm)), color="red") +
# geom_line(aes(y=exp(-cn_lmw)), color="orange") +
# geom_line(aes(y=exp(-cn_wmae)), color="yellow") +
# geom_line(aes(y=tx_nls), color="green") +
# geom_line(aes(y=tx_nlsw), color="blue") +
# geom_line(aes(y=tx_wmae), color="purple") +
# geom_line(aes(y=tx_nlsw2), color="brown")
# tx
ggplot(df_drop, aes(x=lrs_tx_opt, y=transmission_s, color=ring_number)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
xlim(0, 1) +
ylim(0, 1) +
labs(title="Light transmittance (T) error analysis", x='T (ray sampling snow-on)', y='T (hemispherical photography)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_tx_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_tx_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# tx_bin
ggplot(df_drop, aes(x=lrs_tx_bin, y=transmission_s, color=ring_number)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
xlim(0, 1) +
ylim(0, 1) +
labs(title="Light transmittance (T) error analysis", x='T (ray sampling snow-on)', y='T (hemispherical photography)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# cn
ggplot(df_drop, aes(x=lrs_cn_opt, y=-log(transmission_s), color=ring_number)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
labs(title="Mean band-wise contact number E[X] methods comparison (snow-off)", x='E[X] (ray sampling)', y='E[X] (thresholded hemispherical photography)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_photo_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_photo_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# cn bin
# ggplot(df_drop, aes(x=summary(lmw_rsm_mean_cn_drop_bin)$coefficients[1] * lrs_cn_bin, y=-log(transmission_s), color=ring_number)) +
ggplot(df_drop, aes(x=lrs_cn_bin, y=-log(transmission_s), color=ring_number)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
labs(title="Mean band-wise contact number E[X] thresholded methods comparison (snow-off)", x='E[X] (thresholded ray sampling)', y='E[X] (thresholded hemispherical photography)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_bin_photo_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_bin_photo_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# # # remove 5th ring due to horizon clipping
# df_anal = df[df$ring_number != 5,]
# # lm_rsm_mean_tx = lm(df_anal$contactnum ~ 0 + df_anal$rsm_mean)
# # lm_rsm_mean_cn = lm(-log(df_anal$transmission_s) ~ 0 + df_anal$lrs_cn)
# nls_rsm_mean_cn = nls(transmission_s ~ exp(-a * lrs_cn), data=df_anal, start=c(a=0.5))
# summary(nls_rsm_mean_cn)
#
# fo = paste0("hat(y) == ", sprintf("%.5f",summary(lmw_rsm_mean_cn_drop)$coefficients[1]), " * x")
# r2 = paste0("R^2 == ", sprintf("%.5f",summary(lmw_rsm_mean_cn_drop)$adj.r.squared))
#
#
# ggplot(df_drop, aes(x=lrs_cn, y=-log(transmission_s), color=ring_number)) +
# geom_point() +
# geom_abline(intercept = 0, slope = summary(lmw_rsm_mean_cn_drop)$coefficients[1]) +
# annotate("text", x=5, y=2, label=fo, parse=TRUE) +
# annotate("text", x=5, y=1.9, label=r2, parse=TRUE) +
# labs(title="", x='Lidar returns', y='-log(transmission)', color='Ring')
# # ggsave(paste0(plot_out_dir, "snow_off_returns_to_tx_optimization.png"), width=p_width, height=p_height, dpi=dpi)
# # ggsave(paste0(plot_out_dir, "snow-on_returns_to_tx_optimization.png"), width=p_width, height=p_height, dpi=dpi)
#
#
# stats
model_eval = function(nn = 0){
df_sub = df_drop
if(nn > 0){
df_sub = df_sub %>%
filter(ring_number == nn)
}
# cn model
# tx = df_sub$transmission_s
# cn = -log(tx)
# lrs_cn = df_sub$lrs_cn * summary(lmw_rsm_mean_cn_drop)$coefficients[1]
# # lrs_cn = df_sub$lrs_cn_bin * summary(lmw_rsm_mean_cn_drop_bin)$coefficients[1]
# # lrs_cn = df_sub$lrs_cn * lmw_rsm_mean_cn$coefficients[1]
# # lrs_cn = predict(nlsw2_rsm_mean_cn, newdata=df_sub)
# lrs_tx = exp(-lrs_cn)
# weights = df_sub$solid_angle
# weights = weights / sum(weights)
# tx model
tx = df_sub$transmission_s
cn = -log(tx)
lrs_tx = df_sub$lrs_tx_opt
lrs_cn = df_sub$lrs_cn_opt
weights = df_sub$solid_angle
weights = weights / sum(weights)
tx_error = lrs_tx - tx
cn_error = lrs_cn - cn
tx_lm = lm(tx ~ lrs_tx, weights=weights)
tx_r2_adj = summary(tx_lm)$adj.r.squared
tx_rmse = summary(tx_lm)$sigma
tx_p = pf(summary(tx_lm)$fstatistic[1], summary(tx_lm)$fstatistic[2], summary(tx_lm)$fstatistic[3], lower.tail=FALSE)
tx_wrmse = sqrt(sum(weights * (tx_error^2)))
tx_wmb = sum(tx_error * weights)
cn_lm = lm(cn ~ lrs_cn, weights=weights)
cn_r2_adj = summary(cn_lm)$adj.r.squared
cn_rmse = summary(cn_lm)$sigma
cn_p = pf(summary(cn_lm)$fstatistic[1], summary(cn_lm)$fstatistic[2], summary(cn_lm)$fstatistic[3], lower.tail=FALSE)
cn_wrmse = sqrt(sum(weights * (cn_error^2)))
cn_wmb = sum(cn_error * weights)
# c(tx_r2_adj, tx_p, tx_wrmse, tx_wmb, cn_r2_adj, cn_p, cn_wrmse, cn_wmb)
c(tx_r2_adj, tx_wrmse, tx_wmb, cn_r2_adj, cn_wrmse, cn_wmb)
}
model_eval(0)
model_eval(1)
model_eval(2)
model_eval(3)
model_eval(4)
# model_eval(5)
#
#
# ggplot(df, aes(x=exp(-0.195 * rsm_mean), y=exp(-contactnum), color=ring_number)) +
# geom_point() +
# geom_abline(intercept = 0, slope = 1)
#
# ggplot(df, aes(x=exp(-0.166 * rsm_mean), y=exp(-contactnum), color=ring_number)) +
# geom_point() +
# geom_abline(intercept = 0, slope = 1)
##
#
# # plot linear against nb
# rsm_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/synthetic_hemis/batches/lrs_hemi_optimization_r.25_px361_linear/outputs/contact_number_optimization.csv"
# rsm = read.csv(rsm_in, header=TRUE, na.strings = c("NA",""), sep=",")
# rsm$id = as.character(rsm$id)
# rsm_linear = rsm[, c("id", "rsm_mean_1", "rsm_mean_2", "rsm_mean_3", "rsm_mean_4", "rsm_mean_5", "rsm_med_1", "rsm_med_2", "rsm_med_3", "rsm_med_4", "rsm_med_5")]
# df_l = rsm_linear %>%
# gather(key, value, -id) %>%
# extract(key, c("cn_type", "ring_number"), "(\\D+)_(\\d)") %>%
# spread(cn_type, value)
#
# rsm_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/synthetic_hemis/batches/lrs_hemi_optimization_r.25_px100_experimental/outputs/contact_number_optimization.csv"
# rsm = read.csv(rsm_in, header=TRUE, na.strings = c("NA",""), sep=",")
# rsm$id = as.character(rsm$id)
# rsm_nb = rsm[, c("id", "rsm_mean_1", "rsm_mean_2", "rsm_mean_3", "rsm_mean_4", "rsm_mean_5", "rsm_med_1", "rsm_med_2", "rsm_med_3", "rsm_med_4", "rsm_med_5")]
# df_nb = rsm_nb %>%
# gather(key, value, -id) %>%
# extract(key, c("cn_type", "ring_number"), "(\\D+)_(\\d)") %>%
# spread(cn_type, value)
#
# df = merge(df_l, df_nb, by=c('id', 'ring_number'), suffixes = c('_l', '_nb'))
#
#
# l_coef = .11992
# nb_coef = .18734
# ggplot(df, aes(x=rsm_mean_l * l_coef, y=rsm_mean_nb * nb_coef, color=ring_number)) +
# geom_point()
|
/r/optimization/lrs_hemi_optimization.r
|
no_license
|
jstaines/upper-clearing-lidar
|
R
| false
| false
| 21,327
|
r
|
library('dplyr')
library('tidyr')
library('ggplot2')
plot_out_dir = "C:/Users/Cob/index/educational/usask/research/masters/graphics/thesis_graphics/validation/ray_sampling_validation/"
p_width = 8 # inches
p_height = 5.7 # inches
dpi = 100
photos_lai_in = "C:/Users/Cob/index/educational/usask/research/masters/data/hemispheres/19_149/clean/sized/thresholded/LAI_parsed.dat"
# photos_lai_in = "C:/Users/Cob/index/educational/usask/research/masters/data/hemispheres/045_052_050/LAI_045_050_052_parsed.dat"
photos_lai = read.csv(photos_lai_in, header=TRUE, na.strings = c("NA",""), sep=",")
photos_lai$original_file = toupper(gsub("_r.jpg", ".JPG", photos_lai$picture, ignore.case=TRUE))
photos_meta_in = "C:/Users/Cob/index/educational/usask/research/masters/data/hemispheres/hemi_lookup_cleaned.csv"
photos_meta = read.csv(photos_meta_in, header=TRUE, na.strings = c("NA",""), sep=",")
photos_meta$id <- as.numeric(rownames(photos_meta)) - 1
photos = merge(photos_lai, photos_meta, by.x='original_file', by.y='filename', all.x=TRUE)
# photos = photos[, c("original_file", "contactnum_1", "contactnum_2", "contactnum_3", "contactnum_4", "contactnum_5")]
# photos = photos[, c("original_file", "transmission_1", "transmission_2", "transmission_3", "transmission_4", "transmission_5", "transmission_s_1", "transmission_s_2", "transmission_s_3", "transmission_s_4", "transmission_s_5", "contactnum_1", "contactnum_2", "contactnum_3", "contactnum_4", "contactnum_5")]
photos = photos[, c("original_file", "transmission_s_1", "transmission_s_2", "transmission_s_3", "transmission_s_4", "transmission_s_5")]
rsm_big_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px1000_snow_off/outputs/rshmetalog_footprint_products.csv"
# rsm_big_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px1000_snow_on/outputs/rshmetalog_footprint_products.csv"
rsm_big = read.csv(rsm_big_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_big$id = as.character(rsm_big$id)
rsm_big = rsm_big[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5")]
colnames(rsm_big) = c("id", "lrs_cn_big_1", "lrs_cn_big_2", "lrs_cn_big_3", "lrs_cn_big_4", "lrs_cn_big_5")
rsm_far_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_off_max150m/outputs/rshmetalog_footprint_products.csv"
# rsm_far_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_on_max150m/outputs/rshmetalog_footprint_products.csv"
rsm_far = read.csv(rsm_far_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_far$id = as.character(rsm_far$id)
rsm_far = rsm_far[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5")]
colnames(rsm_far) = c("id", "lrs_cn_far_1", "lrs_cn_far_2", "lrs_cn_far_3", "lrs_cn_far_4", "lrs_cn_far_5")
rsm_bin_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_off/outputs/rshmetalog_footprint_products_opt_thresh.csv"
# rsm_bin_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_on/outputs/rshmetalog_footprint_products_opt_thresh.csv"
rsm_bin = read.csv(rsm_bin_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_bin$id = as.character(rsm_bin$id)
rsm_bin = rsm_bin[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5", "lrs_tx_1", "lrs_tx_2", "lrs_tx_3", "lrs_tx_4", "lrs_tx_5")]
colnames(rsm_bin) = c("id", "lrs_cn_bin_1", "lrs_cn_bin_2", "lrs_cn_bin_3", "lrs_cn_bin_4", "lrs_cn_bin_5", "lrs_tx_bin_1", "lrs_tx_bin_2", "lrs_tx_bin_3", "lrs_tx_bin_4", "lrs_tx_bin_5")
rsm_raw_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_off/outputs/rshmetalog_footprint_products.csv"
# rsm_raw_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_on/outputs/rshmetalog_footprint_products.csv"
rsm_raw = read.csv(rsm_raw_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_raw$id = as.character(rsm_raw$id)
rsm_raw = rsm_raw[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5", "lrs_tx_1", "lrs_tx_2", "lrs_tx_3", "lrs_tx_4", "lrs_tx_5")]
colnames(rsm_raw) = c("id", "lrs_cn_raw_1", "lrs_cn_raw_2", "lrs_cn_raw_3", "lrs_cn_raw_4", "lrs_cn_raw_5", "lrs_tx_raw_1", "lrs_tx_raw_2", "lrs_tx_raw_3", "lrs_tx_raw_4", "lrs_tx_raw_5")
rsm_opt_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_off/outputs/rshmetalog_footprint_products_opt.csv"
# rsm_opt_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/ray_sampling/batches/lrs_hemi_optimization_r.25_px181_snow_on/outputs/rshmetalog_footprint_products_opt.csv"
rsm_opt = read.csv(rsm_opt_in, header=TRUE, na.strings = c("NA",""), sep=",")
rsm_opt$id = as.character(rsm_opt$id)
rsm_opt = rsm_opt[, c("id", "lrs_cn_1", "lrs_cn_2", "lrs_cn_3", "lrs_cn_4", "lrs_cn_5", "lrs_tx_1", "lrs_tx_2", "lrs_tx_3", "lrs_tx_4", "lrs_tx_5")]
colnames(rsm_opt) = c("id", "lrs_cn_opt_1", "lrs_cn_opt_2", "lrs_cn_opt_3", "lrs_cn_opt_4", "lrs_cn_opt_5", "lrs_tx_opt_1", "lrs_tx_opt_2", "lrs_tx_opt_3", "lrs_tx_opt_4", "lrs_tx_opt_5")
rsm_merge = merge(rsm_bin, rsm_far, by='id')
rsm_merge = merge(rsm_big, rsm_merge, by='id')
rsm_merge = merge(rsm_raw, rsm_merge, by='id')
rsm_merge = merge(rsm_opt, rsm_merge, by='id')
rsm_df = rsm_merge %>%
gather(key, value, -id) %>%
extract(key, c("val_type", "ring_number"), "(\\D+)_(\\d)") %>%
spread(val_type, value) %>%
mutate(angle_mean = (as.numeric(ring_number) * 15 - 15/2) * pi / 180)
ggplot(rsm_df, aes(x=lrs_cn_raw, y=lrs_cn_big, color=ring_number)) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
labs(title="Mean band-wise expected returns E[<u>] resolution sensitivity (snow-on)", x='E[<u>] (1-degree resolution)', y='E[<u>] (0.18-degree resolution)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_res_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_res_eval.png"), width=p_width, height=p_height, dpi=dpi)
size_lm = lm(lrs_cn_big ~ lrs_cn_raw, data = rsm_df)
summary(size_lm)
ggplot(rsm_df, aes(x=lrs_cn_raw, y=lrs_cn_far, color=ring_number)) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
labs(title="Mean band-wise expected returns <u> max distance sensitivity (snow-on)", x='E[<u>] (max 50m)', y='E[<u>] (max 150m)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_max_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_max_eval.png"), width=p_width, height=p_height, dpi=dpi)
rsm_df_band = rsm_df %>%
filter(ring_number != 5)
max_lm = lm(lrs_cn_far ~ 0 + lrs_cn_raw, data = rsm_df_band)
summary(max_lm)
max_band = rsm_df %>%
group_by(ring_number) %>%
summarize(mb = mean(lrs_cn_far - lrs_cn_raw), rmse = sqrt(mean((lrs_cn_far - lrs_cn_raw)^2)))
ggplot(rsm_df, aes(x=-log(lrs_tx_opt), y=lrs_cn_opt, color=ring_number)) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
labs(title="Mean band-wise contact number E(X) commutation response (snow-on)", x='-ln(E[exp(-X)])', y='E[X]', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_bin_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_bin_eval.png"), width=p_width, height=p_height, dpi=dpi)
bin_lm = lm(lrs_cn_far ~ 0 + lrs_cn, data = rsm_df)
summary(bin_lm)
ggplot(rsm_df, aes(x=-log(lrs_tx_bin), y=-log(lrs_tx_opt), color=ring_number)) +
geom_abline(intercept = 0, slope = 1) +
geom_point() +
labs(title="Mean band-wise contact number -ln(E[T]) threshold response (snow-off)", x='-ln(E[H(T - E[T])])', y='-ln(E[T])', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_thresh_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_thresh_eval.png"), width=p_width, height=p_height, dpi=dpi)
thresh_lm = lm(lrs_cn_far ~ 0 + lrs_cn, data = rsm_df)
summary(thresh_lm)
# df = merge(rsm_raw, photos, by.x='id', by.y='original_file')
df = merge(rsm_opt, photos, by.x='id', by.y='original_file')
df = merge(rsm_bin, df, by='id', suffixes = c("_bin", ""))
df = df %>%
gather(key, value, -id) %>%
extract(key, c("val_type", "ring_number"), "(\\D+)_(\\d)") %>%
spread(val_type, value) %>%
mutate(angle_mean = (as.numeric(ring_number) * 15 - 15/2) * pi / 180) %>%
mutate(solid_angle = 2* pi * (cos((as.numeric(ring_number) - 1) * 15 * pi / 180) - cos(as.numeric(ring_number) * 15 * pi / 180)))
df_drop = df[df$ring_number != "5",]
# calculate error
# ggplot(df, aes(x=lrs_cn, y=-log(transmission_s), color=ring_number)) +
# geom_point()
#
# ggplot(df, aes(x=lrs_cn_bin, y=-log(transmission_s), color=ring_number)) +
# geom_point()
## many models to choose from
# least squares contact number
# equal weight
# lm_rsm_mean_cn = lm(-log(df$transmission_s) ~ 0 + df$lrs_cn)
# summary(lm_rsm_mean_cn)
# cn_lm = predict(lm_rsm_mean_cn, df)
# solid angle weight
# lmw_rsm_mean_cn = lm(-log(df$transmission_s) ~ 0 + df$lrs_cn, weights=df$solid_angle)
# summary(lmw_rsm_mean_cn)
# cn_lmw = predict(lmw_rsm_mean_cn, df)
#
# # solid anlge weight drop ring 5
# lmw_rsm_mean_cn_drop = lm(-log(df_drop$transmission_s) ~ 0 + df_drop$lrs_cn, weights=df_drop$solid_angle)
# summary(lmw_rsm_mean_cn_drop)
# cn_lmw_drop = predict(lmw_rsm_mean_cn_drop, df_drop)
#
# # solid angle weight drop ring 5 thresholded
# # lmw_rsm_mean_cn_drop_bin = lm(-log(df_drop$transmission_s) ~ 0 + df_drop$lrs_cn_bin, weights=df_drop$solid_angle)
# lmw_rsm_mean_cn_drop_bin = lm(-log(df_drop$transmission_s[is.finite(df_drop$lrs_cn_bin)]) ~ 0 + df_drop$lrs_cn_bin[is.finite(df_drop$lrs_cn_bin)], weights=df_drop$solid_angle[is.finite(df_drop$lrs_cn_bin)])
# summary(lmw_rsm_mean_cn_drop_bin)
# # cn_lmw_drop_bin = predict(lmw_rsm_mean_cn_drop_bin, df_drop)
# cn_lmw_drop_bin = df_drop$lrs_cn_bin * summary(lmw_rsm_mean_cn_drop_bin)$coefficients[1]
# bin_lm = lm(-log(transmission_s) ~ lrs_cn_bin, data = df)
# summary(bin_lm)
# least squares transmittance
# # equal weight
# nls_rsm_mean_cn = nls(transmission_s ~ exp(-a * lrs_cn), data=df, start=c(a=0.5))
# summary(nls_rsm_mean_cn)
# tx_nls = predict(nls_rsm_mean_cn, newdata=df)
#
# # solid angle weight
# nlsw_rsm_mean_tx = nls(transmission_s ~ exp(-a * lrs_cn), data=df, start=c(a=0.5), weights=solid_angle)
# summary(nlsw_rsm_mean_tx)
# tx_nlsw = predict(nlsw_rsm_mean_tx, newdata=df)
# summary(nlsw_rsm_mean_tx)$parameters[1]
#
# # solid anlge weight drop ring 5
# df_drop = df[df$ring_number != "5",]
# lmw_rsm_mean_tx_drop = lm(df_drop$transmission_s ~ 0 + df_drop$lrs_cn, weights=df_drop$solid_angle)
# summary(lmw_rsm_mean_tx_drop)
# tx_lmw_drop = predict(lmw_rsm_mean_tx_drop, df_drop)
# #
# # solid angle weight, quadratic cn -- both terms are significant... could be good!
# nlsw2_rsm_mean_cn = nls(-log(transmission_s) ~ a * lrs_cn ^ 2 + b * lrs_cn, data=df, start=c(a=0.5, b=0), weights=solid_angle)
# summary(nlsw2_rsm_mean_cn)
# cn_nlsw2 = predict(nlsw2_rsm_mean_cn, newdata=df)
# # solid angle weight, quadratic tx
# nlsw2_rsm_mean_tx = nls(transmission_s ~ exp(-a * lrs_cn * ( 1 + b * lrs_cn)), data=df, start=c(a=0, b=0.5), weights=solid_angle)
# summary(nlsw2_rsm_mean_tx)
# tx_nlsw2 = predict(nlsw2_rsm_mean_tx, newdata=df)
#
# test_nls = nls(transmission_s ~ a * exp(-lrs_cn), data = df, start=c(a = 1), weights=solid_angle)
# summary(test_nls)
# tx_test = predict(test_nls, newdata=df)
#
# wmae_tx = function(cx){
# tx_error = exp(-cx * df$lrs_cn) - df$transmission_s
# weights = df$solid_angle / sum(df$solid_angle, na.rm=TRUE)
# wmae = abs(sum(weights * tx_error, na.rm=TRUE))
# }
#
# wmae_cn = function(cx){
# cn_error = cx * df$lrs_cn - -log(df$transmission_s)
# weights = df$solid_angle / sum(df$solid_angle, na.rm=TRUE)
# wmae = abs(sum(weights * cn_error, na.rm=TRUE))
# }
#
# opt_tx = optimize(wmae_tx, lower=0, upper=1)
# opt_tx$minimum
# tx_wmae = exp(-opt_tx$minimum * df$lrs_cn)
#
# opt_cn = optimize(wmae_cn, lower=0, upper=1)
# opt_cn$minimum
# cn_wmae = opt_cn$minimum * df$lrs_cn
# cn vs cn
# ggplot(df_drop, aes(x=lrs_cn)) +
# geom_point(aes(y=-log(transmission_s), color=ring_number)) +
# geom_line(aes(y=cn_lmw_drop))
# geom_line(aes(y=cn_lmw), color="orange") +
# geom_line(aes(y=-log(tx_test)), color="blue") +
# geom_line(aes(y=cn_nlsw), color="red") +
# geom_line(aes(y=-log(tx_nlsw2)), color="brown")
# geom_line(aes(y=cn_lm), color="red") +
# geom_line(aes(y=cn_wmae), color="yellow") +
# geom_line(aes(y=-log(tx_nls)), color="green") +
# geom_line(aes(y=-log(tx_wmae)), color="purple") +
# geom_line(aes(y=-log(tx_nlsw2)), color="brown")
# tx vs tx
# ggplot(df, aes(y=transmission_s, x=tx_nlsw2, color=ring_number)) +
# geom_point() +
# geom_abline(intercept = 0, slope = 1)
# cn vs tx
# ggplot(df_drop, aes(x=lrs_cn)) +
# geom_point(aes(y=transmission_s, color=ring_number)) +
# geom_line(aes(y=tx_test), color="red")
# geom_line(aes(y=exp(-cn_lm)), color="red") +
# geom_line(aes(y=exp(-cn_lmw)), color="orange") +
# geom_line(aes(y=exp(-cn_wmae)), color="yellow") +
# geom_line(aes(y=tx_nls), color="green") +
# geom_line(aes(y=tx_nlsw), color="blue") +
# geom_line(aes(y=tx_wmae), color="purple") +
# geom_line(aes(y=tx_nlsw2), color="brown")
# tx
ggplot(df_drop, aes(x=lrs_tx_opt, y=transmission_s, color=ring_number)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
xlim(0, 1) +
ylim(0, 1) +
labs(title="Light transmittance (T) error analysis", x='T (ray sampling snow-on)', y='T (hemispherical photography)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_tx_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_tx_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# tx_bin
ggplot(df_drop, aes(x=lrs_tx_bin, y=transmission_s, color=ring_number)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
xlim(0, 1) +
ylim(0, 1) +
labs(title="Light transmittance (T) error analysis", x='T (ray sampling snow-on)', y='T (hemispherical photography)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# cn
ggplot(df_drop, aes(x=lrs_cn_opt, y=-log(transmission_s), color=ring_number)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
labs(title="Mean band-wise contact number E[X] methods comparison (snow-off)", x='E[X] (ray sampling)', y='E[X] (thresholded hemispherical photography)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_photo_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_photo_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# cn bin
# ggplot(df_drop, aes(x=summary(lmw_rsm_mean_cn_drop_bin)$coefficients[1] * lrs_cn_bin, y=-log(transmission_s), color=ring_number)) +
ggplot(df_drop, aes(x=lrs_cn_bin, y=-log(transmission_s), color=ring_number)) +
geom_point() +
geom_abline(intercept = 0, slope = 1) +
labs(title="Mean band-wise contact number E[X] thresholded methods comparison (snow-off)", x='E[X] (thresholded ray sampling)', y='E[X] (thresholded hemispherical photography)', color='Zenith angle\nband [deg]') +
scale_color_discrete(labels = c("0-15", "15-30", "30-45", "45-60", "60-75"), breaks=c(1, 2, 3, 4, 5))
# ggsave(paste0(plot_out_dir, "snow_off_cn_bin_photo_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# ggsave(paste0(plot_out_dir, "snow_on_cn_bin_photo_error_eval.png"), width=p_width, height=p_height, dpi=dpi)
# # # remove 5th ring due to horizon clipping
# df_anal = df[df$ring_number != 5,]
# # lm_rsm_mean_tx = lm(df_anal$contactnum ~ 0 + df_anal$rsm_mean)
# # lm_rsm_mean_cn = lm(-log(df_anal$transmission_s) ~ 0 + df_anal$lrs_cn)
# nls_rsm_mean_cn = nls(transmission_s ~ exp(-a * lrs_cn), data=df_anal, start=c(a=0.5))
# summary(nls_rsm_mean_cn)
#
# fo = paste0("hat(y) == ", sprintf("%.5f",summary(lmw_rsm_mean_cn_drop)$coefficients[1]), " * x")
# r2 = paste0("R^2 == ", sprintf("%.5f",summary(lmw_rsm_mean_cn_drop)$adj.r.squared))
#
#
# ggplot(df_drop, aes(x=lrs_cn, y=-log(transmission_s), color=ring_number)) +
# geom_point() +
# geom_abline(intercept = 0, slope = summary(lmw_rsm_mean_cn_drop)$coefficients[1]) +
# annotate("text", x=5, y=2, label=fo, parse=TRUE) +
# annotate("text", x=5, y=1.9, label=r2, parse=TRUE) +
# labs(title="", x='Lidar returns', y='-log(transmission)', color='Ring')
# # ggsave(paste0(plot_out_dir, "snow_off_returns_to_tx_optimization.png"), width=p_width, height=p_height, dpi=dpi)
# # ggsave(paste0(plot_out_dir, "snow-on_returns_to_tx_optimization.png"), width=p_width, height=p_height, dpi=dpi)
#
#
# stats
model_eval = function(nn = 0){
df_sub = df_drop
if(nn > 0){
df_sub = df_sub %>%
filter(ring_number == nn)
}
# cn model
# tx = df_sub$transmission_s
# cn = -log(tx)
# lrs_cn = df_sub$lrs_cn * summary(lmw_rsm_mean_cn_drop)$coefficients[1]
# # lrs_cn = df_sub$lrs_cn_bin * summary(lmw_rsm_mean_cn_drop_bin)$coefficients[1]
# # lrs_cn = df_sub$lrs_cn * lmw_rsm_mean_cn$coefficients[1]
# # lrs_cn = predict(nlsw2_rsm_mean_cn, newdata=df_sub)
# lrs_tx = exp(-lrs_cn)
# weights = df_sub$solid_angle
# weights = weights / sum(weights)
# tx model
tx = df_sub$transmission_s
cn = -log(tx)
lrs_tx = df_sub$lrs_tx_opt
lrs_cn = df_sub$lrs_cn_opt
weights = df_sub$solid_angle
weights = weights / sum(weights)
tx_error = lrs_tx - tx
cn_error = lrs_cn - cn
tx_lm = lm(tx ~ lrs_tx, weights=weights)
tx_r2_adj = summary(tx_lm)$adj.r.squared
tx_rmse = summary(tx_lm)$sigma
tx_p = pf(summary(tx_lm)$fstatistic[1], summary(tx_lm)$fstatistic[2], summary(tx_lm)$fstatistic[3], lower.tail=FALSE)
tx_wrmse = sqrt(sum(weights * (tx_error^2)))
tx_wmb = sum(tx_error * weights)
cn_lm = lm(cn ~ lrs_cn, weights=weights)
cn_r2_adj = summary(cn_lm)$adj.r.squared
cn_rmse = summary(cn_lm)$sigma
cn_p = pf(summary(cn_lm)$fstatistic[1], summary(cn_lm)$fstatistic[2], summary(cn_lm)$fstatistic[3], lower.tail=FALSE)
cn_wrmse = sqrt(sum(weights * (cn_error^2)))
cn_wmb = sum(cn_error * weights)
# c(tx_r2_adj, tx_p, tx_wrmse, tx_wmb, cn_r2_adj, cn_p, cn_wrmse, cn_wmb)
c(tx_r2_adj, tx_wrmse, tx_wmb, cn_r2_adj, cn_wrmse, cn_wmb)
}
model_eval(0)
model_eval(1)
model_eval(2)
model_eval(3)
model_eval(4)
# model_eval(5)
#
#
# ggplot(df, aes(x=exp(-0.195 * rsm_mean), y=exp(-contactnum), color=ring_number)) +
# geom_point() +
# geom_abline(intercept = 0, slope = 1)
#
# ggplot(df, aes(x=exp(-0.166 * rsm_mean), y=exp(-contactnum), color=ring_number)) +
# geom_point() +
# geom_abline(intercept = 0, slope = 1)
##
#
# # plot linear against nb
# rsm_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/synthetic_hemis/batches/lrs_hemi_optimization_r.25_px361_linear/outputs/contact_number_optimization.csv"
# rsm = read.csv(rsm_in, header=TRUE, na.strings = c("NA",""), sep=",")
# rsm$id = as.character(rsm$id)
# rsm_linear = rsm[, c("id", "rsm_mean_1", "rsm_mean_2", "rsm_mean_3", "rsm_mean_4", "rsm_mean_5", "rsm_med_1", "rsm_med_2", "rsm_med_3", "rsm_med_4", "rsm_med_5")]
# df_l = rsm_linear %>%
# gather(key, value, -id) %>%
# extract(key, c("cn_type", "ring_number"), "(\\D+)_(\\d)") %>%
# spread(cn_type, value)
#
# rsm_in = "C:/Users/Cob/index/educational/usask/research/masters/data/lidar/synthetic_hemis/batches/lrs_hemi_optimization_r.25_px100_experimental/outputs/contact_number_optimization.csv"
# rsm = read.csv(rsm_in, header=TRUE, na.strings = c("NA",""), sep=",")
# rsm$id = as.character(rsm$id)
# rsm_nb = rsm[, c("id", "rsm_mean_1", "rsm_mean_2", "rsm_mean_3", "rsm_mean_4", "rsm_mean_5", "rsm_med_1", "rsm_med_2", "rsm_med_3", "rsm_med_4", "rsm_med_5")]
# df_nb = rsm_nb %>%
# gather(key, value, -id) %>%
# extract(key, c("cn_type", "ring_number"), "(\\D+)_(\\d)") %>%
# spread(cn_type, value)
#
# df = merge(df_l, df_nb, by=c('id', 'ring_number'), suffixes = c('_l', '_nb'))
#
#
# l_coef = .11992
# nb_coef = .18734
# ggplot(df, aes(x=rsm_mean_l * l_coef, y=rsm_mean_nb * nb_coef, color=ring_number)) +
# geom_point()
|
data <- read.table("new_zeta_nodupes.csv", sep=",", header=TRUE)
data$X <- NULL
data$log_income <- log10(data$meanhouseholdincome)
library(plotly)
plot_ly(data, x = ~age, y = ~education, z = ~employment, color=~education, colors = c('#4AC6B7', '#1972A4', '#965F8A', '#FF7070', '#C61951'))
y <- data$log_income
#Create a scatter plot showing the effect age has on log_income and paste it here
x <- data$age
plot(x, y)
# Create a linear regression model between log_income and age
d <-lm(y ~ x)
str(d)
print(d)
par(mfrow=c(2,2))
plot(d)
ypred <-predict(d)
par(mfrow=c(1,1))
plot(y, y, type="l", xlab="true y", ylab="predicted y")
points(y, ypred)
d1 <-summary(d)
print(d1)
#Create a scatter plot showing the effect education has on log_income
x <- data$education
d <-lm(y ~ x)
str(d)
print(d)
par(mfrow=c(2,2))
plot(d)
ypred <-predict(d)
par(mfrow=c(1,1))
plot(y, y, type="l", xlab="true y", ylab="predicted y")
points(y, ypred)
d1 <-summary(d)
print(d1)
#Analyze a detailed summary of a linear regression model between the dependent variable log_income,
#and the independent variables age, education, and employment
x1 <- data$age
x2 <- data$education
x3 <- data$employment
m <- lm(y ~ x1 + x2 + x3)
str(m)
print(m)
par(mfrow=c(2,2))
plot(m)
ypred <-predict(m)
par(mfrow=c(1,1))
plot(y, y, type="l", xlab="true y", ylab="predicted y")
points(y, ypred)
d1 <-summary(m)
print(d1)
|
/Lab06.R
|
no_license
|
cptAngo/Big_Data_SBT
|
R
| false
| false
| 1,396
|
r
|
data <- read.table("new_zeta_nodupes.csv", sep=",", header=TRUE)
data$X <- NULL
data$log_income <- log10(data$meanhouseholdincome)
library(plotly)
plot_ly(data, x = ~age, y = ~education, z = ~employment, color=~education, colors = c('#4AC6B7', '#1972A4', '#965F8A', '#FF7070', '#C61951'))
y <- data$log_income
#Create a scatter plot showing the effect age has on log_income and paste it here
x <- data$age
plot(x, y)
# Create a linear regression model between log_income and age
d <-lm(y ~ x)
str(d)
print(d)
par(mfrow=c(2,2))
plot(d)
ypred <-predict(d)
par(mfrow=c(1,1))
plot(y, y, type="l", xlab="true y", ylab="predicted y")
points(y, ypred)
d1 <-summary(d)
print(d1)
#Create a scatter plot showing the effect education has on log_income
x <- data$education
d <-lm(y ~ x)
str(d)
print(d)
par(mfrow=c(2,2))
plot(d)
ypred <-predict(d)
par(mfrow=c(1,1))
plot(y, y, type="l", xlab="true y", ylab="predicted y")
points(y, ypred)
d1 <-summary(d)
print(d1)
#Analyze a detailed summary of a linear regression model between the dependent variable log_income,
#and the independent variables age, education, and employment
x1 <- data$age
x2 <- data$education
x3 <- data$employment
m <- lm(y ~ x1 + x2 + x3)
str(m)
print(m)
par(mfrow=c(2,2))
plot(m)
ypred <-predict(m)
par(mfrow=c(1,1))
plot(y, y, type="l", xlab="true y", ylab="predicted y")
points(y, ypred)
d1 <-summary(m)
print(d1)
|
################################################
# Interactive network viz from bibtex file
# Author: Damien Jacques
# Last update: November 29, 2018
################################################
library(bib2df)
library(igraph)
library(networkD3)
# Load bibliography
bib <- bib2df("/path/Biblio")
# Correct author list
unique.author <- unique(sub("^(\\S*\\s+\\S+).*", "\\1", unlist(bib$AUTHOR)))
unique.author <- unique.author[-which(is.na(unique.author))]
unique.author <- unique.author[-which(unique.author == "others")]
# Build adjacency matrix
all.pairs <- matrix(ncol = length(unique.author),
nrow = length(unique.author),
dimnames = list(unique.author, unique.author), 0)
# Loop (could be more efficient)
for (i in 1:nrow(bib)) {
# print(i)
authors <- sub("^(\\S*\\s+\\S+).*", "\\1", bib$AUTHOR[[i]])
for (k in 1:length(authors)) {
all.pairs[which(row.names(all.pairs) == authors[k]), which(colnames(all.pairs) %in% authors[-k])] <-
all.pairs[which(row.names(all.pairs) == authors[k]), which(colnames(all.pairs) %in% authors[-k])] + 1
}
}
# Make graph
author.adj <- graph.adjacency(all.pairs, mode = 'undirected', weighted = TRUE)
# Use igraph to make the graph and find membership
wc <- cluster_walktrap(author.adj)
members <- membership(wc)
# Convert to object suitable for networkD3
author.adj_d3 <- igraph_to_networkD3(author.adj, group = members)
# Create force directed network plot
forceNetwork(Links = author.adj_d3$links, Nodes = author.adj_d3$nodes,
Source = 'source', Target = 'target', NodeID = 'name',
Group = 'group', fontSize = 24,
zoom = T, Value = "value", radiusCalculation = "d.nodesize", opacity = 1)
|
/ResearchNetworkViz.R
|
permissive
|
damienjacques/ResearchNetwork
|
R
| false
| false
| 1,742
|
r
|
################################################
# Interactive network viz from bibtex file
# Author: Damien Jacques
# Last update: November 29, 2018
################################################
library(bib2df)
library(igraph)
library(networkD3)
# Load bibliography
bib <- bib2df("/path/Biblio")
# Correct author list
unique.author <- unique(sub("^(\\S*\\s+\\S+).*", "\\1", unlist(bib$AUTHOR)))
unique.author <- unique.author[-which(is.na(unique.author))]
unique.author <- unique.author[-which(unique.author == "others")]
# Build adjacency matrix
all.pairs <- matrix(ncol = length(unique.author),
nrow = length(unique.author),
dimnames = list(unique.author, unique.author), 0)
# Loop (could be more efficient)
for (i in 1:nrow(bib)) {
# print(i)
authors <- sub("^(\\S*\\s+\\S+).*", "\\1", bib$AUTHOR[[i]])
for (k in 1:length(authors)) {
all.pairs[which(row.names(all.pairs) == authors[k]), which(colnames(all.pairs) %in% authors[-k])] <-
all.pairs[which(row.names(all.pairs) == authors[k]), which(colnames(all.pairs) %in% authors[-k])] + 1
}
}
# Make graph
author.adj <- graph.adjacency(all.pairs, mode = 'undirected', weighted = TRUE)
# Use igraph to make the graph and find membership
wc <- cluster_walktrap(author.adj)
members <- membership(wc)
# Convert to object suitable for networkD3
author.adj_d3 <- igraph_to_networkD3(author.adj, group = members)
# Create force directed network plot
forceNetwork(Links = author.adj_d3$links, Nodes = author.adj_d3$nodes,
Source = 'source', Target = 'target', NodeID = 'name',
Group = 'group', fontSize = 24,
zoom = T, Value = "value", radiusCalculation = "d.nodesize", opacity = 1)
|
########################################################################################################################"
# #
# Tables and figures of the posterior distributions in the Supplementary Information #
# #
# Juliette Archambeau #
# 18/03/2022 #
# #
########################################################################################################################"
library(broom) # CRAN v0.5.2
library(latex2exp) # CRAN v0.4.0
library(ggplot2) # CRAN v3.3.1
library(ggpubr) # CRAN v0.2.1
library(tidybayes) # CRAN v2.0.1
library(dplyr) # CRAN v1.0.0
library(bayesplot) # CRAN v1.7.1
library(xtable) # CRAN v1.8-4
library(ggridges) # CRAN v0.5.1
library(tidyverse) # CRAN v1.3.0
library(tibble) # CRAN v2.1.3
library(brms) # CRAN v2.11.1
# Functions used in the script:
source("scripts/Functions/vir_lite.R") # available here: https://github.com/JulietteArchambeau/HeightPinpinClonapin/blob/master/scripts/Functions/vir_lite.R
square <- function(x) (x*x)
ds=0.7 # Parameter of the funtion vir_lite
# In the Supplementary Information, we report the medians and the 95% credible intervals of the posterior distributions.
prob=0.95
probs <- c((1 - prob) / 2, 1 - (1 - prob) / 2)
# Load the train dataset of the P1 partition
#data <- readRDS(file="../../data/TrainP1.RDS")
# Data partition
part <- "P1" # choose between P1, P2 and P3
# Model M0 ####
# ======== "
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD0.rds"))
# >> Tables S14 and S36. ####
# ---------------------- "
# extract the standard deviations:
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i]) # we want the variances and not the standard deviations
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
# Extract the coefficients of the fixed effects
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>%
mutate(Parameter = recode_factor(rownames(df2),'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M0_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M1 ####
# ======== "
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD1.rds"))
# >> Tables S15 and S37. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M1_MainVarPost.tex"), include.rownames=FALSE,sanitize.text.function = function(x) {x})
# >> Table S16. ####
# ------------- "
# only for the P1 partition
df <- mod %>% broom::tidyMCMC(estimate.method = "median",conf.int = T,conf.level = 0.95) %>%
filter(str_detect(term, "^(r_site\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter = recode_factor(Term,
'r_site[asturias,Intercept]'="$S_{Asturias}$",
'r_site[bordeaux,Intercept]'= '$S_{Bordeaux}$',
'r_site[caceres,Intercept]'='$S_{Caceres}$',
'r_site[madrid,Intercept]'='$S_{Madrid}$',
'r_site[portugal,Intercept]'="$S_{Portugal}$")) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M1_SiteInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Table S17. ####
# ------------- "
# only for the P1 partition
df <- mod %>% broom::tidyMCMC(estimate.method = "mean",conf.int = T) %>%
filter(str_detect(term, "^(r_prov\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter= recode_factor(Term,
'r_prov[MIM,Intercept]' = '$P_{MIM}$',
'r_prov[CEN,Intercept]' = '$P_{CEN}$',
'r_prov[ORI,Intercept]' = '$P_{ORI}$',
'r_prov[STJ,Intercept]' = '$P_{STJ}$',
'r_prov[HOU,Intercept]' = '$P_{HOU}$',
'r_prov[CUE,Intercept]' = '$P_{CUE}$',
'r_prov[TAM,Intercept]' = '$P_{TAM}$',
'r_prov[LEI,Intercept]' = '$P_{LEI}$',
'r_prov[VER,Intercept]' = '$P_{VER}$',
'r_prov[CAS,Intercept]' = '$P_{CAS}$',
'r_prov[PIA,Intercept]' = '$P_{PIA}$',
'r_prov[ARM,Intercept]' = '$P_{ARM}$',
'r_prov[PET,Intercept]' = '$P_{PET}$',
'r_prov[VAL,Intercept]' = '$P_{VAL}$',
'r_prov[SAL,Intercept]' = '$P_{SAL}$',
'r_prov[OLO,Intercept]' = '$P_{OLO}$',
'r_prov[CAD,Intercept]' = '$P_{CAD}$',
'r_prov[ARN,Intercept]' = '$P_{ARN}$',
'r_prov[BAY,Intercept]' = '$P_{BAY}$',
'r_prov[SIE,Intercept]' = '$P_{SIE}$',
'r_prov[SEG,Intercept]' = '$P_{SEG}$',
'r_prov[PLE,Intercept]' = '$P_{PLE}$',
'r_prov[BON,Intercept]' = '$P_{BON}$',
'r_prov[COC,Intercept]' = '$P_{COC}$',
'r_prov[SAC,Intercept]' = '$P_{SAC}$',
'r_prov[QUA,Intercept]' = '$P_{QUA}$',
'r_prov[CAR,Intercept]' = '$P_{CAR}$',
'r_prov[OLB,Intercept]' = '$P_{OLB}$',
'r_prov[PIE,Intercept]' = '$P_{PIE}$',
'r_prov[PUE,Intercept]' = '$P_{PUE}$',
'r_prov[ALT,Intercept]' = '$P_{ALT}$',
'r_prov[LAM,Intercept]' = '$P_{LAM}$',
'r_prov[MAD,Intercept]' = '$P_{MAD}$',
'r_prov[COM,Intercept]' = '$P_{COM}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M1_ProvInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Figure S11. ####
# ------------- "
# only for the P1 partition
p <- plot(conditional_effects(mod,"age.sc"),plot=FALSE)[[1]] +
xlab("Mean-centered age") +
ylab("Logarithm of height (mm)") +
theme_bw()
ggsave(p,file="figs/SuppInfo/M1_CondEffectAge.png",height=6,width=6)
# Model M2 ####
# ======== "
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD2.rds"))
# >> Tables S18 and S38. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_prov:site__Intercept'='$\\sigma^{2}_{Inter}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M2_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Figure S12. ####
# ------------- "
# only for the P1 partition
# >>>> Panel All sites ####
POST <- posterior_samples(mod,pars = "^r_prov\\[")
colnames(POST) <- str_sub(colnames(POST),8,-12)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
data <- read_csv("data_DRYAD/HeightClimateSoilData_33121obs_32variables.csv") %>% dplyr::filter(P1=="train")
data <- droplevels(data)
ps <- data %>%
group_by(prov) %>%
summarise_at(vars(paste0(rep("Q",6),1:6)), mean)
ps$max.Q.prov <- colnames(ps[,2:7])[apply(ps[,2:7],1,which.max)]
ps$prov <- as.factor(ps$prov)
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov) %>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value)) %>%
ungroup()
pm2_all <- ggplot()+
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
coord_cartesian(c(-0.35,0.3))+
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "All sites",
y = "",
x = TeX("Intercepts P_{p}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Portugal ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*portugal")
colnames(POST) <- str_sub(colnames(POST),13,-21)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov)%>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value))%>%
ungroup()
pm2_portugal <- ggplot()+
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
coord_cartesian(c(-0.35,0.3))+
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Portugal",
y = "",
x = TeX("Intercepts P_{p,Portugal}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Caceres ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*caceres")
colnames(POST) <- str_sub(colnames(POST),13,-20)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov)%>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value))%>%
ungroup()
pm2_caceres <- ggplot()+
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
coord_cartesian(c(-0.35,0.3))+
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Caceres",
y = "",
x = TeX("Intercepts P_{p,Caceres}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Madrid ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*madrid")
colnames(POST) <- str_sub(colnames(POST),13,-19)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov) %>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value)) %>%
ungroup()
pm2_madrid <- ggplot()+
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
coord_cartesian(c(-0.35,0.3))+
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Madrid",
y = "",
x = TeX("Intercepts P_{p,Madrid}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Bordeaux ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*bordeaux")
colnames(POST) <- str_sub(colnames(POST),13,-21)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov) %>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value)) %>%
ungroup()
pm2_bordeaux <- ggplot() +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
coord_cartesian(c(-0.35,0.3))+
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Bordeaux",
y = "",
x = TeX("Intercepts P_{p,Bordeaux}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Asturias ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*asturias")
colnames(POST) <- str_sub(colnames(POST),13,-21)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov) %>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value)) %>%
ungroup()
pm2_asturias <- ggplot() +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
coord_cartesian(c(-0.35,0.3))+
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Asturias",
y = "",
x = TeX("Intercepts P_{p,Asturias}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Merging the panels ####
pp2_all <- pm2_all + theme(legend.position = "none")
pp2_asturias <- pm2_asturias + theme(legend.position = "none")
pp2_bordeaux <- pm2_bordeaux + theme(legend.position = "none")
pp2_caceres <- pm2_caceres + theme(legend.position = "none")
pp2_portugal <- pm2_portugal + theme(legend.position = "none")
pp2_madrid <- pm2_madrid + theme(legend.position = "none")
g <- ggarrange(pp2_all,pp2_asturias,pp2_bordeaux,pp2_caceres,pp2_portugal,pp2_madrid ,
nrow=1)
ggsave(g,file="figs/SuppInfo/M2_SiteProvIntercepts.png",width=20,height=12)
# Model M3 ####
# ======== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD3.rds")
# >> Table S19. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Table S20. ####
# ------------- "
df <- mod %>%
broom::tidyMCMC(estimate.method = "median",conf.int = T,conf.level = 0.95) %>%
filter(str_detect(term, "^(r_site\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter = recode_factor(Term,
'r_site[asturias,Intercept]'="$S_{Asturias}$",
'r_site[bordeaux,Intercept]'= '$S_{Bordeaux}$',
'r_site[caceres,Intercept]'='$S_{Caceres}$',
'r_site[madrid,Intercept]'='$S_{Madrid}$',
'r_site[portugal,Intercept]'="$S_{Portugal}$")) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3_SiteInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Table S21. ####
# ------------- "
df <- mod %>%
broom::tidyMCMC(estimate.method = "median",conf.int = T,conf.level = 0.95) %>%
filter(str_detect(term, "^(r_site_age\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter = recode_factor(Term,
'r_site_age[asturias10,Intercept]' = '$cs_{1,Asturias}$',
'r_site_age[portugal27,Intercept]' = '$cs_{4,Portugal}$',
'r_site_age[portugal20,Intercept]' = '$cs_{3,Portugal}$',
'r_site_age[asturias21,Intercept]' = '$cs_{2,Asturias}$',
'r_site_age[portugal11,Intercept]' = '$cs_{1,Portugal}$',
'r_site_age[madrid13,Intercept]' = '$cs_{1,Madrid}$',
'r_site_age[asturias37,Intercept]' = '$cs_{3,Asturias}$',
'r_site_age[portugal15,Intercept]' = '$cs_{2,Portugal}$',
'r_site_age[bordeaux25,Intercept]' = '$cs_{1,Bordeaux}$',
'r_site_age[bordeaux37,Intercept]' = '$cs_{2,Bordeaux}$',
'r_site_age[caceres8,Intercept]' = '$cs_{1,Caceres}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3_SiteClimSimInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M3bis ####
# =========== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD13.rds")
# >> Table S23. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3bis_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Table S24. ####
# ------------- "
df <- mod %>%
broom::tidyMCMC(estimate.method = "median",conf.int = T,conf.level = 0.95) %>%
filter(str_detect(term, "^(r_site_age\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter = recode_factor(Term,
'r_site_age[asturias10,Intercept]' = '$cs_{1,Asturias}$',
'r_site_age[portugal27,Intercept]' = '$cs_{4,Portugal}$',
'r_site_age[portugal20,Intercept]' = '$cs_{3,Portugal}$',
'r_site_age[asturias21,Intercept]' = '$cs_{2,Asturias}$',
'r_site_age[portugal11,Intercept]' = '$cs_{1,Portugal}$',
'r_site_age[madrid13,Intercept]' = '$cs_{1,Madrid}$',
'r_site_age[asturias37,Intercept]' = '$cs_{3,Asturias}$',
'r_site_age[portugal15,Intercept]' = '$cs_{2,Portugal}$',
'r_site_age[bordeaux25,Intercept]' = '$cs_{1,Bordeaux}$',
'r_site_age[bordeaux37,Intercept]' = '$cs_{2,Bordeaux}$',
'r_site_age[caceres8,Intercept]' = '$cs_{1,Caceres}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3bis_SiteClimSimInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M4 ####
# ======== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD4.rds")
# >> Table S25. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M4_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M5 ####
# ======== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD5.rds")
# >> Table S26. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_clon1__Intercept'='$\\sigma^{2}_{A_{NA}}$',
'sd_clon2__Intercept'='$\\sigma^{2}_{A_{C}}$',
'sd_clon3__Intercept'='$\\sigma^{2}_{A_{CS}}$',
'sd_clon4__Intercept'='$\\sigma^{2}_{A_{FA}}$',
'sd_clon5__Intercept'='$\\sigma^{2}_{A_{IA}}$',
'sd_clon6__Intercept'='$\\sigma^{2}_{A_{SES}}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M5_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M6 ####
# ======== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD6.rds")
# >> Table S29. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_prov_clim__Intercept'='$\\sigma^{2}_{cp_{p}}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M6_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M7 ####
# ======== "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD7.rds"))
# >> Tables S30 and S39. ####
# --------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_site__bio5_prov.sc'="$\\sigma^{2}_{\\beta_{max.temp,s}}$",
'sd_site__bio14_prov.sc'="$\\sigma^{2}_{\\beta_{min.pre,s}}$",
'sd_site__gPEA.sc'="$\\sigma^{2}_{\\beta_{gPEA,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M7_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M8 ####
# ======== "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD8.rds"))
# >> Tables S31 and S40. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_site__bio5_prov.sc'="$\\sigma^{2}_{\\beta_{max.temp,s}}$",
'sd_site__bio14_prov.sc'="$\\sigma^{2}_{\\beta_{min.pre,s}}$",
'sd_site__rPEA.sc'="$\\sigma^{2}_{\\beta_{rPEA,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M8_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Figures S15, S17 and S19. ####
# ---------------------------- "
# for the three partitions
models <- list()
models[[1]] <- readRDS(file=paste0("outputs/models/",part,"/MOD7.rds"))
models[[2]] <- readRDS(file=paste0("outputs/models/",part,"/MOD8.rds"))
names(models) <- c("M7","M8")
figs.GP <- list()
figs.beta <- list()
# Panels a. Population structure
for (i in 1:2){
POST <- posterior_samples(models[[i]],pars = "^r_mmQ1Q2Q3Q4Q5Q6\\[")
colnames(POST) <- str_sub(colnames(POST),18,-12)
POST <- as.data.frame(t(POST))
POST$genepool <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>% as_tibble() %>%
gather(key = "key", value = "value", -genepool)%>%
group_by(genepool) %>%
dplyr::mutate(meanpergenepool = mean(value))%>%
ungroup()
figs.GP[[i]] <- ggplot()+
geom_vline(xintercept = 0,
col = "grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(genepool), meanpergenepool),
fill = as.factor(genepool),
vline_color = ..quantile..),
scale = 2,
alpha = .6,
rel_min_height=c(.0044),
size=0.5,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Median"),
name = NULL) +
scale_y_discrete(labels=c("Q1"=parse(text = TeX("$g_{NA}$")),
"Q2"=parse(text = TeX("$g_{C}$")),
"Q3"=parse(text = TeX("$g_{CS}$")),
"Q4"=parse(text = TeX("$g_{FA}$")),
"Q5"=parse(text = TeX("$g_{IA}$")),
"Q6"=parse(text = TeX("$g_{SES}$")))) +
coord_cartesian(c(-0.5,0.6))+
scale_fill_manual(values=c("orangered3",
"gold2",
"darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Northern Africa (NA)",
"Corsica (C)",
"Central Spain (CS)",
"French Atlantic (FA)",
"Iberian Atlantic (IA)",
"South-eastern Spain (SES)")) +
labs(fill = "Gene pools",
y = "",
x = "") +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.text = element_text(size=16),
legend.title = element_text(size=17))
}
figs.GP[[1]] <- figs.GP[[1]] + theme(legend.position = "none")
pGP <- ggarrange(figs.GP[[1]],figs.GP[[2]],
labels=c("M7. a)","M8. a)"),
font.label = list(size = 20),
hjust=c(-0.1,-0.1),
vjust=c(1.6,1.6),
nrow=1,
widths = c(1,1.3))
# Panels b. Provenance climates and PEAs
for(i in 1:2){
if(i==1){
pea="gPEA"
variable ="gPEA.sc"
} else if (i==2){
pea="rPEA"
variable ="rPEA.sc"
}
POST <- posterior_samples(models[[i]],pars = "^r_site\\[.*sc\\]") %>% dplyr::rename(
beta_PEA_Portugal = paste0('r_site[portugal,',variable,']'),
beta_PEA_Bordeaux = paste0('r_site[bordeaux,',variable,']'),
beta_PEA_Asturias = paste0('r_site[asturias,',variable,']'),
beta_PEA_Madrid = paste0('r_site[madrid,',variable,']'),
beta_PEA_Caceres = paste0('r_site[caceres,',variable,']'),
beta_MinPre_Portugal = 'r_site[portugal,bio14_prov.sc]',
beta_MinPre_Bordeaux = 'r_site[bordeaux,bio14_prov.sc]',
beta_MinPre_Asturias = 'r_site[asturias,bio14_prov.sc]',
beta_MinPre_Madrid = 'r_site[madrid,bio14_prov.sc]',
beta_MinPre_Caceres = 'r_site[caceres,bio14_prov.sc]',
beta_MaxTemp_Portugal = 'r_site[portugal,bio5_prov.sc]',
beta_MaxTemp_Bordeaux = 'r_site[bordeaux,bio5_prov.sc]',
beta_MaxTemp_Asturias = 'r_site[asturias,bio5_prov.sc]',
beta_MaxTemp_Madrid = 'r_site[madrid,bio5_prov.sc]',
beta_MaxTemp_Caceres = 'r_site[caceres,bio5_prov.sc]'
)
POST <- as.data.frame(t(POST))
POST$var <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>% as_tibble() %>%
gather(key = "key", value = "value", -var)
figs.beta[[i]] <- ggplot()+
geom_vline(xintercept = 0, col="grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = var,
fill = as.factor(var),
vline_color = ..quantile..),
scale = 1.8,
alpha = .8,
size=0.5,
rel_min_height=.01,
quantile_lines = TRUE,
quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("5% & 95% quantiles", "mean"),
name = NULL) +
scale_y_discrete(labels=c("beta_MaxTemp_Caceres"=parse(text = TeX("$\\beta_{max.temp,Caceres}$")),
"beta_MaxTemp_Bordeaux"=parse(text = TeX("$\\beta_{max.temp,Bordeaux}$")),
"beta_MaxTemp_Portugal"=parse(text = TeX("$\\beta_{max.temp,Portugal}$")),
"beta_MaxTemp_Madrid"=parse(text = TeX("$\\beta_{max.temp,Madrid}$")),
"beta_MaxTemp_Asturias"=parse(text = TeX("$\\beta_{max.temp,Asturias}$")),
"beta_MinPre_Caceres"=parse(text = TeX("$\\beta_{min.pre,Caceres}$")),
"beta_MinPre_Bordeaux"=parse(text = TeX("$\\beta_{min.pre,Bordeaux}$")),
"beta_MinPre_Portugal"=parse(text = TeX("$\\beta_{min.pre,Portugal}$")),
"beta_MinPre_Madrid"=parse(text = TeX("$\\beta_{min.pre,Madrid}$")),
"beta_MinPre_Asturias"=parse(text = TeX("$\\beta_{min.pre,Asturias}$")),
"beta_PEA_Caceres"=parse(text = TeX(paste0("$\\beta_{",pea,",Caceres}$"))),
"beta_PEA_Madrid"=parse(text = TeX(paste0("$\\beta_{",pea,",Madrid}$"))),
"beta_PEA_Portugal"=parse(text = TeX(paste0("$\\beta_{",pea,",Portugal}$"))),
"beta_PEA_Asturias"=parse(text = TeX(paste0("$\\beta_{",pea,",Asturias}$"))),
"beta_PEA_Bordeaux"=parse(text = TeX(paste0("$\\beta_{",pea,",Bordeaux}$")))
)) +
labs(y = "",
x = "") +
scale_fill_manual(values=c(vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds),
vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds),
vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds))) +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.position = "none",
legend.text = element_text(size=16),
legend.title = element_text(size=17))
}
pbeta <- ggarrange(figs.beta[[1]],figs.beta[[2]],
labels=c("M7. b)","M8. b)"),
font.label = list(size = 20),
hjust=c(-0.1,-0.1),
vjust=c(1.6,1.6),
nrow=1)
# Merge the panels:
figtot <- ggarrange(pGP,pbeta,nrow=2,heights=c(1,2))
# Save the figure:
ggsave(figtot,file=paste0("figs/SuppInfo/M7M8Posteriors",part,".png"),height=12,width=20)
# Model M9 ####
# ======== "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD9.rds"))
# >> Tables S32 and S41. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M9_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M10 ####
# ========= "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD10.rds"))
# >> Tables S33 and S42. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site__bio5_prov.sc'="$\\sigma^{2}_{\\beta_{max.temp,s}}$",
'sd_site__bio14_prov.sc'="$\\sigma^{2}_{\\beta_{min.pre,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M10_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M11 ####
# ========= "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD11.rds"))
# >> Tables S34 and S43. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site__gPEA.sc'="$\\sigma^{2}_{\\beta_{gPEA,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M11_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M12 ####
# ========= "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD12.rds"))
# >> Tables S35 and S44. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site__rPEA.sc'="$\\sigma^{2}_{\\beta_{rPEA,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M12_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Figures S16, S18 and S20. ####
# ---------------------------- "
# for the three partitions
# >>>> Panel M9 => Population structure
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD9.rds"))
POST <- posterior_samples(mod,pars = "^r_mmQ1Q2Q3Q4Q5Q6\\[")
colnames(POST) <- str_sub(colnames(POST),18,-12)
POST <- as.data.frame(t(POST))
POST$genepool <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>%
as_tibble() %>%
gather(key = "key", value = "value", -genepool)%>%
group_by(genepool) %>%
dplyr::mutate(meanpergenepool = mean(value)) %>%
ungroup()
pGP <- ggplot()+
geom_vline(xintercept = 0,
col = "grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(genepool), meanpergenepool),
fill = as.factor(genepool),
vline_color = ..quantile..),
scale = 2,
alpha = .6,
rel_min_height=c(.001),
size=0.5,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Median"),
name = NULL) +
coord_cartesian(c(-0.5,0.4))+
scale_fill_manual(values=c("orangered3",
"gold2",
"darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Northern Africa (NA)",
"Corsica (C)",
"Central Spain (CS)",
"French Atlantic (FA)",
"Iberian Atlantic (IA)",
"South-eastern Spain (SES)")) +
scale_y_discrete(labels=c("Q1"=parse(text = TeX("$g_{NA}$")),
"Q2"=parse(text = TeX("$g_{C}$")),
"Q3"=parse(text = TeX("$g_{CS}$")),
"Q4"=parse(text = TeX("$g_{FA}$")),
"Q5"=parse(text = TeX("$g_{IA}$")),
"Q6"=parse(text = TeX("$g_{SES}$")))) +
labs(fill = "Gene pools",
y = "",
x="") +
theme_bw() + theme(axis.text = element_text(size=18),
axis.title = element_text(size=18),
legend.text = element_text(size=16),
legend.title = element_text(size=17))
# >>>> Panel M10 => Climate in the provenances
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD10.rds"))
POST <- posterior_samples(mod,pars = "^r_site\\[.*sc\\]") %>% dplyr::rename(
beta_MinPre_Portugal = 'r_site[portugal,bio14_prov.sc]',
beta_MinPre_Bordeaux = 'r_site[bordeaux,bio14_prov.sc]',
beta_MinPre_Asturias = 'r_site[asturias,bio14_prov.sc]',
beta_MinPre_Madrid = 'r_site[madrid,bio14_prov.sc]',
beta_MinPre_Caceres = 'r_site[caceres,bio14_prov.sc]',
beta_MaxTemp_Portugal = 'r_site[portugal,bio5_prov.sc]',
beta_MaxTemp_Bordeaux = 'r_site[bordeaux,bio5_prov.sc]',
beta_MaxTemp_Asturias = 'r_site[asturias,bio5_prov.sc]',
beta_MaxTemp_Madrid = 'r_site[madrid,bio5_prov.sc]',
beta_MaxTemp_Caceres = 'r_site[caceres,bio5_prov.sc]'
)
POST <- as.data.frame(t(POST))
POST$var <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>%
as_tibble() %>%
gather(key = "key", value = "value", -var)
pCP <- ggplot()+
geom_vline(xintercept = 0, col="grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = var,
fill = as.factor(var),
vline_color = ..quantile..),
scale = 1.8,
alpha = .8,
size=0.5,
rel_min_height=.01,
quantile_lines = TRUE,
quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("5% & 95% quantiles", "mean"),
name = NULL) +
scale_y_discrete(labels=c("beta_MaxTemp_Caceres"=parse(text = TeX("$\\beta_{max.temp,Caceres}$")),
"beta_MaxTemp_Bordeaux"=parse(text = TeX("$\\beta_{max.temp,Bordeaux}$")),
"beta_MaxTemp_Portugal"=parse(text = TeX("$\\beta_{max.temp,Portugal}$")),
"beta_MaxTemp_Madrid"=parse(text = TeX("$\\beta_{max.temp,Madrid}$")),
"beta_MaxTemp_Asturias"=parse(text = TeX("$\\beta_{max.temp,Asturias}$")),
"beta_MinPre_Caceres"=parse(text = TeX("$\\beta_{min.pre,Caceres}$")),
"beta_MinPre_Bordeaux"=parse(text = TeX("$\\beta_{min.pre,Bordeaux}$")),
"beta_MinPre_Portugal"=parse(text = TeX("$\\beta_{min.pre,Portugal}$")),
"beta_MinPre_Madrid"=parse(text = TeX("$\\beta_{min.pre,Madrid}$")),
"beta_MinPre_Asturias"=parse(text = TeX("$\\beta_{min.pre,Asturias}$")))) +
labs(y = "",
x="") +
scale_fill_manual(values=c(vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds),
vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds))) +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.position = "none",
plot.title = element_text(size=22))
# >>>> Panel M11 => gPEAs
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD11.rds"))
pea="gPEA"
variable ="gPEA.sc"
POST <- posterior_samples(mod,pars = "^r_site\\[.*sc\\]") %>% dplyr::rename(
beta_PEA_Portugal = paste0('r_site[portugal,',variable,']'),
beta_PEA_Bordeaux = paste0('r_site[bordeaux,',variable,']'),
beta_PEA_Asturias = paste0('r_site[asturias,',variable,']'),
beta_PEA_Madrid = paste0('r_site[madrid,',variable,']'),
beta_PEA_Caceres = paste0('r_site[caceres,',variable,']')
)
POST <- as.data.frame(t(POST))
POST$var <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>%
as_tibble() %>%
gather(key = "key", value = "value", -var)
pgpea <- ggplot()+
geom_vline(xintercept = 0, col="grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = var,
fill = as.factor(var),
vline_color = ..quantile..),
scale = 1.8,
alpha = .8,
size=0.5,
rel_min_height=.01,
quantile_lines = TRUE,
quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("5% & 95% quantiles", "mean"),
name = NULL) +
scale_y_discrete(labels=c("beta_PEA_Caceres"=parse(text = TeX(paste0("$\\beta_{",pea,",Caceres}$"))),
"beta_PEA_Madrid"=parse(text = TeX(paste0("$\\beta_{",pea,",Madrid}$"))),
"beta_PEA_Portugal"=parse(text = TeX(paste0("$\\beta_{",pea,",Portugal}$"))),
"beta_PEA_Asturias"=parse(text = TeX(paste0("$\\beta_{",pea,",Asturias}$"))),
"beta_PEA_Bordeaux"=parse(text = TeX(paste0("$\\beta_{",pea,",Bordeaux}$")))
)) +
labs(fill = "Sites",
y = "",
x="") +
scale_fill_manual(values=c(vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds)),
labels = c("Asturias",
"Bordeaux",
"Caceres",
"Madrid",
"Portugal")) +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.text = element_text(size=16),
legend.title = element_text(size=17),
plot.title = element_text(size=22)) +
guides(vline_color = FALSE)
# >>>> Panel M12 => rPEAs
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD12.rds"))
pea="rPEA"
variable ="rPEA.sc"
POST <- posterior_samples(mod,pars = "^r_site\\[.*sc\\]") %>% dplyr::rename(
beta_PEA_Portugal = paste0('r_site[portugal,',variable,']'),
beta_PEA_Bordeaux = paste0('r_site[bordeaux,',variable,']'),
beta_PEA_Asturias = paste0('r_site[asturias,',variable,']'),
beta_PEA_Madrid = paste0('r_site[madrid,',variable,']'),
beta_PEA_Caceres = paste0('r_site[caceres,',variable,']')
)
POST <- as.data.frame(t(POST))
POST$var <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>%
as_tibble() %>%
gather(key = "key", value = "value", -var)
prpea <- ggplot()+
geom_vline(xintercept = 0, col="grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = var,
fill = as.factor(var),
vline_color = ..quantile..),
scale = 1.8,
alpha = .8,
size=0.5,
rel_min_height=.01,
quantile_lines = TRUE,
quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("5% & 95% quantiles", "mean"),
name = NULL) +
scale_y_discrete(labels=c("beta_PEA_Caceres"=parse(text = TeX(paste0("$\\beta_{",pea,",Caceres}$"))),
"beta_PEA_Madrid"=parse(text = TeX(paste0("$\\beta_{",pea,",Madrid}$"))),
"beta_PEA_Portugal"=parse(text = TeX(paste0("$\\beta_{",pea,",Portugal}$"))),
"beta_PEA_Asturias"=parse(text = TeX(paste0("$\\beta_{",pea,",Asturias}$"))),
"beta_PEA_Bordeaux"=parse(text = TeX(paste0("$\\beta_{",pea,",Bordeaux}$")))
)) +
labs(title="",
y = "",
x="") +
scale_fill_manual(values=c(vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds))) +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.position = "none",
plot.title = element_text(size=22))
p1 <- ggarrange(pGP,pCP,labels=c("M9","M10"),font.label = list(size = 20),nrow=1,widths = c(1.2,1))
p2 <- ggarrange(pgpea,prpea,labels=c("M11","M12"),font.label = list(size = 20),nrow=1,widths = c(1.2,1))
fig <- ggarrange(p1,p2,nrow=2)
if(part=="P1"){
ggsave(fig, file=paste0("figs/manuscript/",part,"M9toM12PosteriorDistri.png"),width=20,height=12)
} else{
ggsave(fig, file=paste0("figs/SuppInfo/",part,"M9toM12PosteriorDistri.png"),width=20,height=12)
}
|
/scripts_DRYAD/13_CreateTablesFiguresPosteriorDistributions.R
|
no_license
|
JulietteArchambeau/HeightPinpinClonapin
|
R
| false
| false
| 93,717
|
r
|
########################################################################################################################"
# #
# Tables and figures of the posterior distributions in the Supplementary Information #
# #
# Juliette Archambeau #
# 18/03/2022 #
# #
########################################################################################################################"
library(broom) # CRAN v0.5.2
library(latex2exp) # CRAN v0.4.0
library(ggplot2) # CRAN v3.3.1
library(ggpubr) # CRAN v0.2.1
library(tidybayes) # CRAN v2.0.1
library(dplyr) # CRAN v1.0.0
library(bayesplot) # CRAN v1.7.1
library(xtable) # CRAN v1.8-4
library(ggridges) # CRAN v0.5.1
library(tidyverse) # CRAN v1.3.0
library(tibble) # CRAN v2.1.3
library(brms) # CRAN v2.11.1
# Functions used in the script:
source("scripts/Functions/vir_lite.R") # available here: https://github.com/JulietteArchambeau/HeightPinpinClonapin/blob/master/scripts/Functions/vir_lite.R
square <- function(x) (x*x)
ds=0.7 # Parameter of the funtion vir_lite
# In the Supplementary Information, we report the medians and the 95% credible intervals of the posterior distributions.
prob=0.95
probs <- c((1 - prob) / 2, 1 - (1 - prob) / 2)
# Load the train dataset of the P1 partition
#data <- readRDS(file="../../data/TrainP1.RDS")
# Data partition
part <- "P1" # choose between P1, P2 and P3
# Model M0 ####
# ======== "
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD0.rds"))
# >> Tables S14 and S36. ####
# ---------------------- "
# extract the standard deviations:
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i]) # we want the variances and not the standard deviations
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
# Extract the coefficients of the fixed effects
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>%
mutate(Parameter = recode_factor(rownames(df2),'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M0_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M1 ####
# ======== "
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD1.rds"))
# >> Tables S15 and S37. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M1_MainVarPost.tex"), include.rownames=FALSE,sanitize.text.function = function(x) {x})
# >> Table S16. ####
# ------------- "
# only for the P1 partition
df <- mod %>% broom::tidyMCMC(estimate.method = "median",conf.int = T,conf.level = 0.95) %>%
filter(str_detect(term, "^(r_site\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter = recode_factor(Term,
'r_site[asturias,Intercept]'="$S_{Asturias}$",
'r_site[bordeaux,Intercept]'= '$S_{Bordeaux}$',
'r_site[caceres,Intercept]'='$S_{Caceres}$',
'r_site[madrid,Intercept]'='$S_{Madrid}$',
'r_site[portugal,Intercept]'="$S_{Portugal}$")) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M1_SiteInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Table S17. ####
# ------------- "
# only for the P1 partition
df <- mod %>% broom::tidyMCMC(estimate.method = "mean",conf.int = T) %>%
filter(str_detect(term, "^(r_prov\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter= recode_factor(Term,
'r_prov[MIM,Intercept]' = '$P_{MIM}$',
'r_prov[CEN,Intercept]' = '$P_{CEN}$',
'r_prov[ORI,Intercept]' = '$P_{ORI}$',
'r_prov[STJ,Intercept]' = '$P_{STJ}$',
'r_prov[HOU,Intercept]' = '$P_{HOU}$',
'r_prov[CUE,Intercept]' = '$P_{CUE}$',
'r_prov[TAM,Intercept]' = '$P_{TAM}$',
'r_prov[LEI,Intercept]' = '$P_{LEI}$',
'r_prov[VER,Intercept]' = '$P_{VER}$',
'r_prov[CAS,Intercept]' = '$P_{CAS}$',
'r_prov[PIA,Intercept]' = '$P_{PIA}$',
'r_prov[ARM,Intercept]' = '$P_{ARM}$',
'r_prov[PET,Intercept]' = '$P_{PET}$',
'r_prov[VAL,Intercept]' = '$P_{VAL}$',
'r_prov[SAL,Intercept]' = '$P_{SAL}$',
'r_prov[OLO,Intercept]' = '$P_{OLO}$',
'r_prov[CAD,Intercept]' = '$P_{CAD}$',
'r_prov[ARN,Intercept]' = '$P_{ARN}$',
'r_prov[BAY,Intercept]' = '$P_{BAY}$',
'r_prov[SIE,Intercept]' = '$P_{SIE}$',
'r_prov[SEG,Intercept]' = '$P_{SEG}$',
'r_prov[PLE,Intercept]' = '$P_{PLE}$',
'r_prov[BON,Intercept]' = '$P_{BON}$',
'r_prov[COC,Intercept]' = '$P_{COC}$',
'r_prov[SAC,Intercept]' = '$P_{SAC}$',
'r_prov[QUA,Intercept]' = '$P_{QUA}$',
'r_prov[CAR,Intercept]' = '$P_{CAR}$',
'r_prov[OLB,Intercept]' = '$P_{OLB}$',
'r_prov[PIE,Intercept]' = '$P_{PIE}$',
'r_prov[PUE,Intercept]' = '$P_{PUE}$',
'r_prov[ALT,Intercept]' = '$P_{ALT}$',
'r_prov[LAM,Intercept]' = '$P_{LAM}$',
'r_prov[MAD,Intercept]' = '$P_{MAD}$',
'r_prov[COM,Intercept]' = '$P_{COM}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M1_ProvInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Figure S11. ####
# ------------- "
# only for the P1 partition
p <- plot(conditional_effects(mod,"age.sc"),plot=FALSE)[[1]] +
xlab("Mean-centered age") +
ylab("Logarithm of height (mm)") +
theme_bw()
ggsave(p,file="figs/SuppInfo/M1_CondEffectAge.png",height=6,width=6)
# Model M2 ####
# ======== "
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD2.rds"))
# >> Tables S18 and S38. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_prov:site__Intercept'='$\\sigma^{2}_{Inter}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M2_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Figure S12. ####
# ------------- "
# only for the P1 partition
# >>>> Panel All sites ####
POST <- posterior_samples(mod,pars = "^r_prov\\[")
colnames(POST) <- str_sub(colnames(POST),8,-12)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
data <- read_csv("data_DRYAD/HeightClimateSoilData_33121obs_32variables.csv") %>% dplyr::filter(P1=="train")
data <- droplevels(data)
ps <- data %>%
group_by(prov) %>%
summarise_at(vars(paste0(rep("Q",6),1:6)), mean)
ps$max.Q.prov <- colnames(ps[,2:7])[apply(ps[,2:7],1,which.max)]
ps$prov <- as.factor(ps$prov)
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov) %>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value)) %>%
ungroup()
pm2_all <- ggplot()+
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
coord_cartesian(c(-0.35,0.3))+
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "All sites",
y = "",
x = TeX("Intercepts P_{p}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Portugal ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*portugal")
colnames(POST) <- str_sub(colnames(POST),13,-21)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov)%>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value))%>%
ungroup()
pm2_portugal <- ggplot()+
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
coord_cartesian(c(-0.35,0.3))+
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Portugal",
y = "",
x = TeX("Intercepts P_{p,Portugal}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Caceres ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*caceres")
colnames(POST) <- str_sub(colnames(POST),13,-20)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov)%>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value))%>%
ungroup()
pm2_caceres <- ggplot()+
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
coord_cartesian(c(-0.35,0.3))+
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Caceres",
y = "",
x = TeX("Intercepts P_{p,Caceres}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Madrid ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*madrid")
colnames(POST) <- str_sub(colnames(POST),13,-19)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov) %>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value)) %>%
ungroup()
pm2_madrid <- ggplot()+
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
coord_cartesian(c(-0.35,0.3))+
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Madrid",
y = "",
x = TeX("Intercepts P_{p,Madrid}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Bordeaux ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*bordeaux")
colnames(POST) <- str_sub(colnames(POST),13,-21)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov) %>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value)) %>%
ungroup()
pm2_bordeaux <- ggplot() +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
coord_cartesian(c(-0.35,0.3))+
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Bordeaux",
y = "",
x = TeX("Intercepts P_{p,Bordeaux}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Panel Asturias ####
POST <- posterior_samples(mod,pars = "^r_prov:site\\[.*asturias")
colnames(POST) <- str_sub(colnames(POST),13,-21)
POST <- as.data.frame(t(POST))
POST$prov <- as.factor(rownames(POST))
posteriorsimpelmodellong <- inner_join(POST, ps[,c("prov","max.Q.prov")],by="prov") %>%
as_tibble() %>%
gather(key = "key", value = "value", -prov,-max.Q.prov) %>%
group_by(prov) %>%
dplyr::mutate(meanperprov = mean(value)) %>%
ungroup()
pm2_asturias <- ggplot() +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(prov), meanperprov),
height = ..density..,
fill = as.factor(max.Q.prov),
vline_color = ..quantile..),
scale = 3,
alpha = .6,
rel_min_height=c(.01),
size=0.2,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
coord_cartesian(c(-0.35,0.3))+
scale_y_discrete(labels=c("ALT"=parse(text = TeX("$P_{ALT}$")),
"ARM"=parse(text = TeX("$P_{ARM}$")),
"ARN"=parse(text = TeX("$P_{ARN}$")),
"BAY"=parse(text = TeX("$P_{BAY}$")),
"BON"=parse(text = TeX("$P_{BON}$")),
"CAD"=parse(text = TeX("$P_{CAD}$")),
"CAR"=parse(text = TeX("$P_{CAR}$")),
"CAS"=parse(text = TeX("$P_{CAS}$")),
"CEN"=parse(text = TeX("$P_{CEN}$")),
"COC"=parse(text = TeX("$P_{COC}$")),
"COM"=parse(text = TeX("$P_{COM}$")),
"CUE"=parse(text = TeX("$P_{CUE}$")),
"HOU"=parse(text = TeX("$P_{HOU}$")),
"LAM"=parse(text = TeX("$P_{LAM}$")),
"LEI"=parse(text = TeX("$P_{LEI}$")),
"MAD"=parse(text = TeX("$P_{MAD}$")),
"MIM"=parse(text = TeX("$P_{MIM}$")),
"OLB"=parse(text = TeX("$P_{OLB}$")),
"OLO"=parse(text = TeX("$P_{OLO}$")),
"ORI"=parse(text = TeX("$P_{ORI}$")),
"PET"=parse(text = TeX("$P_{PET}$")),
"PIA"=parse(text = TeX("$P_{PIA}$")),
"PIE"=parse(text = TeX("$P_{PIE}$")),
"PLE"=parse(text = TeX("$P_{PLE}$")),
"PUE"=parse(text = TeX("$P_{PUE}$")),
"QUA"=parse(text = TeX("$P_{QUA}$")),
"SAC"=parse(text = TeX("$P_{SAC}$")),
"SAL"=parse(text = TeX("$P_{SAL}$")),
"SEG"=parse(text = TeX("$P_{SEG}$")),
"SIE"=parse(text = TeX("$P_{SIE}$")),
"STJ"=parse(text = TeX("$P_{STJ}$")),
"TAM"=parse(text = TeX("$P_{TAM}$")),
"VAL"=parse(text = TeX("$P_{VAL}$")),
"VER"=parse(text = TeX("$P_{VER}$")))) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Mean"),
name = NULL) +
scale_fill_manual(values=c("orangered3",
"gold2","darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Q1: Northern Africa",
"Q2: Corsica",
"Q3: Central Spain",
"Q4: French Atlantic",
"Q5: Iberian Atlantic",
"Q6: South-eastern Spain"),name="Gene pools") +
labs(title = "Asturias",
y = "",
x = TeX("Intercepts P_{p,Asturias}")
) +
theme_bw() +
theme(axis.text = element_text(size=12),
axis.title = element_text(size=14),
legend.text = element_text(size=18),
legend.title = element_text(size=20))
# >>>> Merging the panels ####
pp2_all <- pm2_all + theme(legend.position = "none")
pp2_asturias <- pm2_asturias + theme(legend.position = "none")
pp2_bordeaux <- pm2_bordeaux + theme(legend.position = "none")
pp2_caceres <- pm2_caceres + theme(legend.position = "none")
pp2_portugal <- pm2_portugal + theme(legend.position = "none")
pp2_madrid <- pm2_madrid + theme(legend.position = "none")
g <- ggarrange(pp2_all,pp2_asturias,pp2_bordeaux,pp2_caceres,pp2_portugal,pp2_madrid ,
nrow=1)
ggsave(g,file="figs/SuppInfo/M2_SiteProvIntercepts.png",width=20,height=12)
# Model M3 ####
# ======== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD3.rds")
# >> Table S19. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Table S20. ####
# ------------- "
df <- mod %>%
broom::tidyMCMC(estimate.method = "median",conf.int = T,conf.level = 0.95) %>%
filter(str_detect(term, "^(r_site\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter = recode_factor(Term,
'r_site[asturias,Intercept]'="$S_{Asturias}$",
'r_site[bordeaux,Intercept]'= '$S_{Bordeaux}$',
'r_site[caceres,Intercept]'='$S_{Caceres}$',
'r_site[madrid,Intercept]'='$S_{Madrid}$',
'r_site[portugal,Intercept]'="$S_{Portugal}$")) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3_SiteInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Table S21. ####
# ------------- "
df <- mod %>%
broom::tidyMCMC(estimate.method = "median",conf.int = T,conf.level = 0.95) %>%
filter(str_detect(term, "^(r_site_age\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter = recode_factor(Term,
'r_site_age[asturias10,Intercept]' = '$cs_{1,Asturias}$',
'r_site_age[portugal27,Intercept]' = '$cs_{4,Portugal}$',
'r_site_age[portugal20,Intercept]' = '$cs_{3,Portugal}$',
'r_site_age[asturias21,Intercept]' = '$cs_{2,Asturias}$',
'r_site_age[portugal11,Intercept]' = '$cs_{1,Portugal}$',
'r_site_age[madrid13,Intercept]' = '$cs_{1,Madrid}$',
'r_site_age[asturias37,Intercept]' = '$cs_{3,Asturias}$',
'r_site_age[portugal15,Intercept]' = '$cs_{2,Portugal}$',
'r_site_age[bordeaux25,Intercept]' = '$cs_{1,Bordeaux}$',
'r_site_age[bordeaux37,Intercept]' = '$cs_{2,Bordeaux}$',
'r_site_age[caceres8,Intercept]' = '$cs_{1,Caceres}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3_SiteClimSimInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M3bis ####
# =========== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD13.rds")
# >> Table S23. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3bis_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Table S24. ####
# ------------- "
df <- mod %>%
broom::tidyMCMC(estimate.method = "median",conf.int = T,conf.level = 0.95) %>%
filter(str_detect(term, "^(r_site_age\\[)")) %>%
rename_all(str_to_title) %>%
dplyr::rename("Median"=Estimate,"SD"=Std.error,"InfCI"=Conf.low,"SupCI"=Conf.high) %>%
mutate(Parameter = recode_factor(Term,
'r_site_age[asturias10,Intercept]' = '$cs_{1,Asturias}$',
'r_site_age[portugal27,Intercept]' = '$cs_{4,Portugal}$',
'r_site_age[portugal20,Intercept]' = '$cs_{3,Portugal}$',
'r_site_age[asturias21,Intercept]' = '$cs_{2,Asturias}$',
'r_site_age[portugal11,Intercept]' = '$cs_{1,Portugal}$',
'r_site_age[madrid13,Intercept]' = '$cs_{1,Madrid}$',
'r_site_age[asturias37,Intercept]' = '$cs_{3,Asturias}$',
'r_site_age[portugal15,Intercept]' = '$cs_{2,Portugal}$',
'r_site_age[bordeaux25,Intercept]' = '$cs_{1,Bordeaux}$',
'r_site_age[bordeaux37,Intercept]' = '$cs_{2,Bordeaux}$',
'r_site_age[caceres8,Intercept]' = '$cs_{1,Caceres}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
print(xtable(df, type = "latex",digits=3),
file = paste0("tables/Posteriors/M3bis_SiteClimSimInterceptsPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M4 ####
# ======== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD4.rds")
# >> Table S25. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M4_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M5 ####
# ======== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD5.rds")
# >> Table S26. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_clon1__Intercept'='$\\sigma^{2}_{A_{NA}}$',
'sd_clon2__Intercept'='$\\sigma^{2}_{A_{C}}$',
'sd_clon3__Intercept'='$\\sigma^{2}_{A_{CS}}$',
'sd_clon4__Intercept'='$\\sigma^{2}_{A_{FA}}$',
'sd_clon5__Intercept'='$\\sigma^{2}_{A_{IA}}$',
'sd_clon6__Intercept'='$\\sigma^{2}_{A_{SES}}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M5_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M6 ####
# ======== "
# only for the P1 partition
mod <- readRDS(file="outputs/models/P1/MOD6.rds")
# >> Table S29. ####
# ------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_site_age__Intercept'='$\\sigma^{2}_{cs_{is}}$',
'sd_prov_clim__Intercept'='$\\sigma^{2}_{cp_{p}}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M6_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M7 ####
# ======== "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD7.rds"))
# >> Tables S30 and S39. ####
# --------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_site__bio5_prov.sc'="$\\sigma^{2}_{\\beta_{max.temp,s}}$",
'sd_site__bio14_prov.sc'="$\\sigma^{2}_{\\beta_{min.pre,s}}$",
'sd_site__gPEA.sc'="$\\sigma^{2}_{\\beta_{gPEA,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M7_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M8 ####
# ======== "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD8.rds"))
# >> Tables S31 and S40. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_site__bio5_prov.sc'="$\\sigma^{2}_{\\beta_{max.temp,s}}$",
'sd_site__bio14_prov.sc'="$\\sigma^{2}_{\\beta_{min.pre,s}}$",
'sd_site__rPEA.sc'="$\\sigma^{2}_{\\beta_{rPEA,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M8_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Figures S15, S17 and S19. ####
# ---------------------------- "
# for the three partitions
models <- list()
models[[1]] <- readRDS(file=paste0("outputs/models/",part,"/MOD7.rds"))
models[[2]] <- readRDS(file=paste0("outputs/models/",part,"/MOD8.rds"))
names(models) <- c("M7","M8")
figs.GP <- list()
figs.beta <- list()
# Panels a. Population structure
for (i in 1:2){
POST <- posterior_samples(models[[i]],pars = "^r_mmQ1Q2Q3Q4Q5Q6\\[")
colnames(POST) <- str_sub(colnames(POST),18,-12)
POST <- as.data.frame(t(POST))
POST$genepool <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>% as_tibble() %>%
gather(key = "key", value = "value", -genepool)%>%
group_by(genepool) %>%
dplyr::mutate(meanpergenepool = mean(value))%>%
ungroup()
figs.GP[[i]] <- ggplot()+
geom_vline(xintercept = 0,
col = "grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(genepool), meanpergenepool),
fill = as.factor(genepool),
vline_color = ..quantile..),
scale = 2,
alpha = .6,
rel_min_height=c(.0044),
size=0.5,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Median"),
name = NULL) +
scale_y_discrete(labels=c("Q1"=parse(text = TeX("$g_{NA}$")),
"Q2"=parse(text = TeX("$g_{C}$")),
"Q3"=parse(text = TeX("$g_{CS}$")),
"Q4"=parse(text = TeX("$g_{FA}$")),
"Q5"=parse(text = TeX("$g_{IA}$")),
"Q6"=parse(text = TeX("$g_{SES}$")))) +
coord_cartesian(c(-0.5,0.6))+
scale_fill_manual(values=c("orangered3",
"gold2",
"darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Northern Africa (NA)",
"Corsica (C)",
"Central Spain (CS)",
"French Atlantic (FA)",
"Iberian Atlantic (IA)",
"South-eastern Spain (SES)")) +
labs(fill = "Gene pools",
y = "",
x = "") +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.text = element_text(size=16),
legend.title = element_text(size=17))
}
figs.GP[[1]] <- figs.GP[[1]] + theme(legend.position = "none")
pGP <- ggarrange(figs.GP[[1]],figs.GP[[2]],
labels=c("M7. a)","M8. a)"),
font.label = list(size = 20),
hjust=c(-0.1,-0.1),
vjust=c(1.6,1.6),
nrow=1,
widths = c(1,1.3))
# Panels b. Provenance climates and PEAs
for(i in 1:2){
if(i==1){
pea="gPEA"
variable ="gPEA.sc"
} else if (i==2){
pea="rPEA"
variable ="rPEA.sc"
}
POST <- posterior_samples(models[[i]],pars = "^r_site\\[.*sc\\]") %>% dplyr::rename(
beta_PEA_Portugal = paste0('r_site[portugal,',variable,']'),
beta_PEA_Bordeaux = paste0('r_site[bordeaux,',variable,']'),
beta_PEA_Asturias = paste0('r_site[asturias,',variable,']'),
beta_PEA_Madrid = paste0('r_site[madrid,',variable,']'),
beta_PEA_Caceres = paste0('r_site[caceres,',variable,']'),
beta_MinPre_Portugal = 'r_site[portugal,bio14_prov.sc]',
beta_MinPre_Bordeaux = 'r_site[bordeaux,bio14_prov.sc]',
beta_MinPre_Asturias = 'r_site[asturias,bio14_prov.sc]',
beta_MinPre_Madrid = 'r_site[madrid,bio14_prov.sc]',
beta_MinPre_Caceres = 'r_site[caceres,bio14_prov.sc]',
beta_MaxTemp_Portugal = 'r_site[portugal,bio5_prov.sc]',
beta_MaxTemp_Bordeaux = 'r_site[bordeaux,bio5_prov.sc]',
beta_MaxTemp_Asturias = 'r_site[asturias,bio5_prov.sc]',
beta_MaxTemp_Madrid = 'r_site[madrid,bio5_prov.sc]',
beta_MaxTemp_Caceres = 'r_site[caceres,bio5_prov.sc]'
)
POST <- as.data.frame(t(POST))
POST$var <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>% as_tibble() %>%
gather(key = "key", value = "value", -var)
figs.beta[[i]] <- ggplot()+
geom_vline(xintercept = 0, col="grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = var,
fill = as.factor(var),
vline_color = ..quantile..),
scale = 1.8,
alpha = .8,
size=0.5,
rel_min_height=.01,
quantile_lines = TRUE,
quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("5% & 95% quantiles", "mean"),
name = NULL) +
scale_y_discrete(labels=c("beta_MaxTemp_Caceres"=parse(text = TeX("$\\beta_{max.temp,Caceres}$")),
"beta_MaxTemp_Bordeaux"=parse(text = TeX("$\\beta_{max.temp,Bordeaux}$")),
"beta_MaxTemp_Portugal"=parse(text = TeX("$\\beta_{max.temp,Portugal}$")),
"beta_MaxTemp_Madrid"=parse(text = TeX("$\\beta_{max.temp,Madrid}$")),
"beta_MaxTemp_Asturias"=parse(text = TeX("$\\beta_{max.temp,Asturias}$")),
"beta_MinPre_Caceres"=parse(text = TeX("$\\beta_{min.pre,Caceres}$")),
"beta_MinPre_Bordeaux"=parse(text = TeX("$\\beta_{min.pre,Bordeaux}$")),
"beta_MinPre_Portugal"=parse(text = TeX("$\\beta_{min.pre,Portugal}$")),
"beta_MinPre_Madrid"=parse(text = TeX("$\\beta_{min.pre,Madrid}$")),
"beta_MinPre_Asturias"=parse(text = TeX("$\\beta_{min.pre,Asturias}$")),
"beta_PEA_Caceres"=parse(text = TeX(paste0("$\\beta_{",pea,",Caceres}$"))),
"beta_PEA_Madrid"=parse(text = TeX(paste0("$\\beta_{",pea,",Madrid}$"))),
"beta_PEA_Portugal"=parse(text = TeX(paste0("$\\beta_{",pea,",Portugal}$"))),
"beta_PEA_Asturias"=parse(text = TeX(paste0("$\\beta_{",pea,",Asturias}$"))),
"beta_PEA_Bordeaux"=parse(text = TeX(paste0("$\\beta_{",pea,",Bordeaux}$")))
)) +
labs(y = "",
x = "") +
scale_fill_manual(values=c(vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds),
vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds),
vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds))) +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.position = "none",
legend.text = element_text(size=16),
legend.title = element_text(size=17))
}
pbeta <- ggarrange(figs.beta[[1]],figs.beta[[2]],
labels=c("M7. b)","M8. b)"),
font.label = list(size = 20),
hjust=c(-0.1,-0.1),
vjust=c(1.6,1.6),
nrow=1)
# Merge the panels:
figtot <- ggarrange(pGP,pbeta,nrow=2,heights=c(1,2))
# Save the figure:
ggsave(figtot,file=paste0("figs/SuppInfo/M7M8Posteriors",part,".png"),height=12,width=20)
# Model M9 ####
# ======== "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD9.rds"))
# >> Tables S32 and S41. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_prov__Intercept'="$\\sigma^{2}_{P}$",
'sd_prov:clon__Intercept'='$\\sigma^{2}_{G}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_mmQ1Q2Q3Q4Q5Q6__Intercept'='$\\sigma^{2}_{g_{j}}$',
'sd_site:block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M9_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M10 ####
# ========= "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD10.rds"))
# >> Tables S33 and S42. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site__bio5_prov.sc'="$\\sigma^{2}_{\\beta_{max.temp,s}}$",
'sd_site__bio14_prov.sc'="$\\sigma^{2}_{\\beta_{min.pre,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M10_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M11 ####
# ========= "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD11.rds"))
# >> Tables S34 and S43. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site__gPEA.sc'="$\\sigma^{2}_{\\beta_{gPEA,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M11_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# Model M12 ####
# ========= "
# only for the P1 and P2 partition.
mod <- readRDS(file= paste0("outputs/models/",part,"/MOD12.rds"))
# >> Tables S35 and S44. ####
# ---------------------- "
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(sd|sigma)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- square(sims[, , i])
quan <- unname(quantile(sims_i, probs = probs))
df[i,"InfCI"] <- quan[1]
df[i,"SupCI"] <- quan[2]
df[i,"Median"] <- median(sims_i)
df[i,"SD"] <- sd(sims_i)}
df <- df %>% mutate(Parameter = recode_factor(rownames(df),
'sigma'='$\\sigma^{2}$',
'sd_site__Intercept'='$\\sigma^{2}_{S}$',
'sd_site__rPEA.sc'="$\\sigma^{2}_{\\beta_{rPEA,s}}$",
'sd_block__Intercept'='$\\sigma^{2}_{B}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
pars <- mod %>% get_variables() %>% as.vector() %>% str_subset("^(b)")
sims <- as.array(mod, pars = pars, fixed = TRUE)
df2 <- as.data.frame(matrix(NA,length(pars),4,dimnames = list(pars,c("Median","SD","InfCI","SupCI"))))
for(i in pars){
sims_i <- sims[, , i]
quan <- unname(quantile(sims_i, probs = probs))
df2[i,"InfCI"] <- quan[1]
df2[i,"SupCI"] <- quan[2]
df2[i,"Median"] <- median(sims_i)
df2[i,"SD"] <- sd(sims_i)}
df2 <- df2 %>% mutate(Parameter = recode_factor(rownames(df2),
'b_age.sc'="$\\beta_{age}$",
'b_Iage.scE2'= '$\\beta_{age2}$',
'b_Intercept'='$\\beta_{0}$')) %>%
remove_rownames() %>%
dplyr::select(Parameter,Median,SD,InfCI,SupCI)
tab <- bind_rows(df,df2)
print(xtable(tab, type = "latex",digits=3),
file = paste0("tables/Posteriors/M12_MainVarPost.tex"),
include.rownames=FALSE,
sanitize.text.function = function(x) {x})
# >> Figures S16, S18 and S20. ####
# ---------------------------- "
# for the three partitions
# >>>> Panel M9 => Population structure
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD9.rds"))
POST <- posterior_samples(mod,pars = "^r_mmQ1Q2Q3Q4Q5Q6\\[")
colnames(POST) <- str_sub(colnames(POST),18,-12)
POST <- as.data.frame(t(POST))
POST$genepool <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>%
as_tibble() %>%
gather(key = "key", value = "value", -genepool)%>%
group_by(genepool) %>%
dplyr::mutate(meanpergenepool = mean(value)) %>%
ungroup()
pGP <- ggplot()+
geom_vline(xintercept = 0,
col = "grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = reorder(as.factor(genepool), meanpergenepool),
fill = as.factor(genepool),
vline_color = ..quantile..),
scale = 2,
alpha = .6,
rel_min_height=c(.001),
size=0.5,
quantile_lines = TRUE, quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("2.5 and 97.5th percentiles", "Median"),
name = NULL) +
coord_cartesian(c(-0.5,0.4))+
scale_fill_manual(values=c("orangered3",
"gold2",
"darkorchid3",
"navyblue",
"turquoise2",
"green3"), labels = c("Northern Africa (NA)",
"Corsica (C)",
"Central Spain (CS)",
"French Atlantic (FA)",
"Iberian Atlantic (IA)",
"South-eastern Spain (SES)")) +
scale_y_discrete(labels=c("Q1"=parse(text = TeX("$g_{NA}$")),
"Q2"=parse(text = TeX("$g_{C}$")),
"Q3"=parse(text = TeX("$g_{CS}$")),
"Q4"=parse(text = TeX("$g_{FA}$")),
"Q5"=parse(text = TeX("$g_{IA}$")),
"Q6"=parse(text = TeX("$g_{SES}$")))) +
labs(fill = "Gene pools",
y = "",
x="") +
theme_bw() + theme(axis.text = element_text(size=18),
axis.title = element_text(size=18),
legend.text = element_text(size=16),
legend.title = element_text(size=17))
# >>>> Panel M10 => Climate in the provenances
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD10.rds"))
POST <- posterior_samples(mod,pars = "^r_site\\[.*sc\\]") %>% dplyr::rename(
beta_MinPre_Portugal = 'r_site[portugal,bio14_prov.sc]',
beta_MinPre_Bordeaux = 'r_site[bordeaux,bio14_prov.sc]',
beta_MinPre_Asturias = 'r_site[asturias,bio14_prov.sc]',
beta_MinPre_Madrid = 'r_site[madrid,bio14_prov.sc]',
beta_MinPre_Caceres = 'r_site[caceres,bio14_prov.sc]',
beta_MaxTemp_Portugal = 'r_site[portugal,bio5_prov.sc]',
beta_MaxTemp_Bordeaux = 'r_site[bordeaux,bio5_prov.sc]',
beta_MaxTemp_Asturias = 'r_site[asturias,bio5_prov.sc]',
beta_MaxTemp_Madrid = 'r_site[madrid,bio5_prov.sc]',
beta_MaxTemp_Caceres = 'r_site[caceres,bio5_prov.sc]'
)
POST <- as.data.frame(t(POST))
POST$var <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>%
as_tibble() %>%
gather(key = "key", value = "value", -var)
pCP <- ggplot()+
geom_vline(xintercept = 0, col="grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = var,
fill = as.factor(var),
vline_color = ..quantile..),
scale = 1.8,
alpha = .8,
size=0.5,
rel_min_height=.01,
quantile_lines = TRUE,
quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("5% & 95% quantiles", "mean"),
name = NULL) +
scale_y_discrete(labels=c("beta_MaxTemp_Caceres"=parse(text = TeX("$\\beta_{max.temp,Caceres}$")),
"beta_MaxTemp_Bordeaux"=parse(text = TeX("$\\beta_{max.temp,Bordeaux}$")),
"beta_MaxTemp_Portugal"=parse(text = TeX("$\\beta_{max.temp,Portugal}$")),
"beta_MaxTemp_Madrid"=parse(text = TeX("$\\beta_{max.temp,Madrid}$")),
"beta_MaxTemp_Asturias"=parse(text = TeX("$\\beta_{max.temp,Asturias}$")),
"beta_MinPre_Caceres"=parse(text = TeX("$\\beta_{min.pre,Caceres}$")),
"beta_MinPre_Bordeaux"=parse(text = TeX("$\\beta_{min.pre,Bordeaux}$")),
"beta_MinPre_Portugal"=parse(text = TeX("$\\beta_{min.pre,Portugal}$")),
"beta_MinPre_Madrid"=parse(text = TeX("$\\beta_{min.pre,Madrid}$")),
"beta_MinPre_Asturias"=parse(text = TeX("$\\beta_{min.pre,Asturias}$")))) +
labs(y = "",
x="") +
scale_fill_manual(values=c(vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds),
vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds))) +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.position = "none",
plot.title = element_text(size=22))
# >>>> Panel M11 => gPEAs
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD11.rds"))
pea="gPEA"
variable ="gPEA.sc"
POST <- posterior_samples(mod,pars = "^r_site\\[.*sc\\]") %>% dplyr::rename(
beta_PEA_Portugal = paste0('r_site[portugal,',variable,']'),
beta_PEA_Bordeaux = paste0('r_site[bordeaux,',variable,']'),
beta_PEA_Asturias = paste0('r_site[asturias,',variable,']'),
beta_PEA_Madrid = paste0('r_site[madrid,',variable,']'),
beta_PEA_Caceres = paste0('r_site[caceres,',variable,']')
)
POST <- as.data.frame(t(POST))
POST$var <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>%
as_tibble() %>%
gather(key = "key", value = "value", -var)
pgpea <- ggplot()+
geom_vline(xintercept = 0, col="grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = var,
fill = as.factor(var),
vline_color = ..quantile..),
scale = 1.8,
alpha = .8,
size=0.5,
rel_min_height=.01,
quantile_lines = TRUE,
quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("5% & 95% quantiles", "mean"),
name = NULL) +
scale_y_discrete(labels=c("beta_PEA_Caceres"=parse(text = TeX(paste0("$\\beta_{",pea,",Caceres}$"))),
"beta_PEA_Madrid"=parse(text = TeX(paste0("$\\beta_{",pea,",Madrid}$"))),
"beta_PEA_Portugal"=parse(text = TeX(paste0("$\\beta_{",pea,",Portugal}$"))),
"beta_PEA_Asturias"=parse(text = TeX(paste0("$\\beta_{",pea,",Asturias}$"))),
"beta_PEA_Bordeaux"=parse(text = TeX(paste0("$\\beta_{",pea,",Bordeaux}$")))
)) +
labs(fill = "Sites",
y = "",
x="") +
scale_fill_manual(values=c(vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds)),
labels = c("Asturias",
"Bordeaux",
"Caceres",
"Madrid",
"Portugal")) +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.text = element_text(size=16),
legend.title = element_text(size=17),
plot.title = element_text(size=22)) +
guides(vline_color = FALSE)
# >>>> Panel M12 => rPEAs
mod <- readRDS(file=paste0("outputs/models/",part,"/MOD12.rds"))
pea="rPEA"
variable ="rPEA.sc"
POST <- posterior_samples(mod,pars = "^r_site\\[.*sc\\]") %>% dplyr::rename(
beta_PEA_Portugal = paste0('r_site[portugal,',variable,']'),
beta_PEA_Bordeaux = paste0('r_site[bordeaux,',variable,']'),
beta_PEA_Asturias = paste0('r_site[asturias,',variable,']'),
beta_PEA_Madrid = paste0('r_site[madrid,',variable,']'),
beta_PEA_Caceres = paste0('r_site[caceres,',variable,']')
)
POST <- as.data.frame(t(POST))
POST$var <- as.factor(rownames(POST))
posteriorsimpelmodellong <- POST %>%
as_tibble() %>%
gather(key = "key", value = "value", -var)
prpea <- ggplot()+
geom_vline(xintercept = 0, col="grey70") +
stat_density_ridges(data = posteriorsimpelmodellong,
aes(x = value,
y = var,
fill = as.factor(var),
vline_color = ..quantile..),
scale = 1.8,
alpha = .8,
size=0.5,
rel_min_height=.01,
quantile_lines = TRUE,
quantiles = c(0.025,0.5,0.975)) +
scale_discrete_manual("vline_color",
values = c("blue", "red", "blue", "black"),
breaks = c(1, 2),
labels = c("5% & 95% quantiles", "mean"),
name = NULL) +
scale_y_discrete(labels=c("beta_PEA_Caceres"=parse(text = TeX(paste0("$\\beta_{",pea,",Caceres}$"))),
"beta_PEA_Madrid"=parse(text = TeX(paste0("$\\beta_{",pea,",Madrid}$"))),
"beta_PEA_Portugal"=parse(text = TeX(paste0("$\\beta_{",pea,",Portugal}$"))),
"beta_PEA_Asturias"=parse(text = TeX(paste0("$\\beta_{",pea,",Asturias}$"))),
"beta_PEA_Bordeaux"=parse(text = TeX(paste0("$\\beta_{",pea,",Bordeaux}$")))
)) +
labs(title="",
y = "",
x="") +
scale_fill_manual(values=c(vir_lite("cyan2",ds=ds),
vir_lite("navyblue",ds=ds),
vir_lite("pink",ds=ds),
vir_lite("deeppink",ds=ds),
vir_lite("dodgerblue2",ds=ds))) +
theme_bw() + theme(axis.text = element_text(size=22),
axis.title = element_text(size=22),
legend.position = "none",
plot.title = element_text(size=22))
p1 <- ggarrange(pGP,pCP,labels=c("M9","M10"),font.label = list(size = 20),nrow=1,widths = c(1.2,1))
p2 <- ggarrange(pgpea,prpea,labels=c("M11","M12"),font.label = list(size = 20),nrow=1,widths = c(1.2,1))
fig <- ggarrange(p1,p2,nrow=2)
if(part=="P1"){
ggsave(fig, file=paste0("figs/manuscript/",part,"M9toM12PosteriorDistri.png"),width=20,height=12)
} else{
ggsave(fig, file=paste0("figs/SuppInfo/",part,"M9toM12PosteriorDistri.png"),width=20,height=12)
}
|
#
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/4/09
# filename: Mowers_Ch6_Exercise
#############################################################################
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 1
# subquestion: a
# other files:
##########################################################################/--
ErinEigen <- function(M) {
a <- as.complex(M[1,1])
b <- as.complex(M[1,2])
c <- as.complex(M[2,1])
d <- as.complex(M[2,2])
trace <- a + d
determinant <- (a*d) - (b*c)
eigen1 <- (trace + sqrt((trace)^2 - (4 * determinant))) / 2
eigen2 <- (trace - sqrt((trace)^2 - (4 * determinant))) / 2
eigens <- c(eigen1, eigen2)
return(eigens)
}
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 1
# subquestion: b
# other files:
##########################################################################/--
testEigenFunction <- function(fun, nTests=50) {
for (ii in 1:nTests) {
A <- matrix(floor(runif(n=4, min=-100, max=100)), nrow=2);
sysResult <- as.complex(eigen(A)$values);
testResult <- as.complex(fun(A));
if (! (isTRUE(all.equal(sysResult, testResult)) ||
isTRUE(all.equal(sysResult, testResult[c(2, 1)])) ) ) {
cat(paste('Error encountered in test #', ii,
', further tests aborted.\n', sep='') );
return(list(testMatrix=A, systemResult=sysResult,
testResult=testResult));
}
}
cat(paste('Success. All ', nTests,
' tests completed successfully.\n', sep=''));
}
#To test:
testEigenFunction(ErinEigen, nTests=50)
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 1
# subquestion: c
# other files: NA
##########################################################################/--
M1 <- matrix(c(0,1,-2,0), nrow=2)
M2 <- matrix(c(3,4,2,1), nrow=2)
M3 <- matrix(c(-4,3,-2,-1), nrow=2)
M4 <- matrix(c(2,1,1,2), nrow=2)
#To find the eigenvalues:
ErinEigen(M1)
#Zero real part, nonzero imaginary part
ErinEigen(M2)
#Positive real part, zero imaginary part
#Negative real part, zero imaginary part
ErinEigen(M3)
#Negative real part, nonzero imaginary part
ErinEigen(M4)
#Positive real part, zero imaginary part
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 2
# subquestion: a
# other files: NA
##########################################################################/--
normalizeVector <- function(vec, total=1.0) {
iLength <- sqrt(sum(vec ^ 2));
return(vec * total / iLength)
}
#For the below, M is a 2x2 matrix,
#xRange and yRange are vectors of length two.
phase_plane <- function(M, xRange, yRange) {
a <- M[1,1]
b <- M[1,2]
c <- M[2,1]
d <- M[2,2]
plot(c(xRange[1], xRange[2]), c(yRange[1], yRange[2]),
type='n', main='Mowers 2a: Phase Plane',
xlab='x', ylab='y')
xstep <- abs(xRange[1] - xRange[2]) / 20
ystep <- abs(yRange[1] - yRange[2]) / 20
xVec <- yVec <- numeric(length=21)
xVec[1] <- xRange[1]
yVec[1] <- yRange[1]
for(ii in 2:21) {
xVec[ii] <- xVec[ii - 1] + xstep
yVec[ii] <- yVec[ii - 1] + ystep
}
for(ii in 1:21) {
for(iii in 1:21) {
xdot <- (a * xVec[ii]) + (b * yVec[iii])
ydot <- (c * xVec[ii]) + (d * yVec[iii])
derVec <- normalizeVector(c(xdot, ydot), total=1.0)
arrows(xVec[ii], yVec[iii],
xVec[ii] + derVec[1], yVec[iii] + derVec[2], length=0.05)
}
}
return('See graph')
}
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 2
# subquestion: b
# other files: NA
##########################################################################/--
normalizeVector <- function(vec, total=1.0) {
iLength <- sqrt(sum(vec ^ 2));
return(vec * total / iLength)
}
solveOdeSystem <- function(mat, x, y) {
e <- eigen(mat);
eVectors <- e$vectors;
eVals <- e$values;
ab <- solve(eVectors, matrix(c(x, y), nrow=2));
A <- ab[1];
B <- ab[2];
t <- seq(0, 100, by=0.01);
xx <- (A * eVectors[1, 1]) * exp(eVals[1] * t) +
(B * eVectors[1, 2]) * exp(eVals[2] * t);
yy <- (A * eVectors[2, 1]) * exp(eVals[1] * t) +
(B * eVectors[2, 2]) * exp(eVals[2] * t);
return(list(x=xx, y=yy));
}
#For the below, M is a 2x2 matrix,
#xRange and yRange are vectors of length two, and
#initialVals is a list of paired x & y values.
phase_plane2 <- function(M, xRange, yRange, initialVals=NULL) {
a <- (M[1,1])
b <- (M[1,2])
c <- (M[2,1])
d <- (M[2,2])
plot(c(xRange[1], xRange[2]), c(yRange[1], yRange[2]),
type='n', main='Mowers: Phase Plane with Solution Curves',
xlab='x', ylab='y')
xstep <- abs(xRange[1] - xRange[2]) / 20
ystep <- abs(yRange[1] - yRange[2]) / 20
xVec <- yVec <- numeric(length=21)
xVec[1] <- xRange[1]
yVec[1] <- yRange[1]
for(ii in 2:21) {
xVec[ii] <- xVec[ii - 1] + xstep
yVec[ii] <- yVec[ii - 1] + ystep
}
for(ii in 1:21) {
for(iii in 1:21) {
xdot <- (a * xVec[ii]) + (b * yVec[iii])
ydot <- (c * xVec[ii]) + (d * yVec[iii])
derVec <- (normalizeVector(c(xdot, ydot), total=1.0))
arrows(xVec[ii], yVec[iii],
xVec[ii] + derVec[1], yVec[iii] + derVec[2], length=0.05)
}
}
if(!is.null(initialVals)) {
iter2 <- length(initialVals)
for(ii in 1:iter2) {
solutions <- solveOdeSystem(M, initialVals[[ii]][1], initialVals[[ii]][2])
lines(solutions$x, solutions$y, col=ii+1, lwd=3)
}
}
return('See graph')
}
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 2
# subquestion: c
# other files: NA
##########################################################################/--
mat1 <- matrix(c(0,1,-2,0), nrow=2)
mat2 <- matrix(c(3,4,2,1), nrow=2)
mat3 <- matrix(c(-4,3,-2,-1), nrow=2)
mat4 <- matrix(c(2,1,1,2), nrow=2)
initialValues <- list(c(3,7), c(-4, 0.5))
#To produce the plots requested:
phase_plane2(mat1, c(-12,12), c(-10,10), initialValues)
phase_plane2(mat2, c(-20,20), c(-20,20), initialValues)
phase_plane2(mat3, c(-10,10), c(-10,10), initialValues)
phase_plane2(mat4, c(-10,10), c(-10,10), initialValues)
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 3
# subquestion: a
# other files: See handwritten pages.
##########################################################################/--
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 3
# subquestion: b
# other files: See handwritten pages.
##########################################################################/--
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 3
# subquestion: c
# other files: See print outs of graphs for explanation.
##########################################################################/--
relationship1 <- matrix(c(2,1,1,2), nrow=2)
phase_plane2(relationship1, c(-10,10), c(-10,10))
relationship2 <- matrix(c(1,-1,-1,1), nrow=2)
phase_plane2(relationship2, c(-10,10), c(-10,10))
relationship3 <- matrix(c(1,-2,-2,1), nrow=2)
phase_plane2(relationship3, c(-10,10), c(-10,10))
relationship4 <- matrix(c(-3,-2,-2,-3), nrow=2)
phase_plane2(relationship4, c(-10,10), c(-10,10))
##############
#Provided functions
##############
normalizeVector <- function(vec, total=1.0) {
iLength <- sqrt(sum(vec ^ 2));
return(vec * total / iLength)
}
solveOdeSystem <- function(mat, x, y) {
e <- eigen(mat);
eVectors <- e$vectors;
eVals <- e$values;
ab <- solve(eVectors, matrix(c(x, y), nrow=2));
A <- ab[1];
B <- ab[2];
t <- seq(0, 100, by=0.01);
xx <- (A * eVectors[1, 1]) * exp(eVals[1] * t) +
(B * eVectors[1, 2]) * exp(eVals[2] * t);
yy <- (A * eVectors[2, 1]) * exp(eVals[1] * t) +
(B * eVectors[2, 2]) * exp(eVals[2] * t);
return(list(x=xx, y=yy));
}
testEigenFunction <- function(fun, nTests=50) {
for (ii in 1:nTests) {
A <- matrix(floor(runif(n=4, min=-100, max=100)), nrow=2);
sysResult <- as.complex(eigen(A)$values);
testResult <- as.complex(fun(A));
if (! (isTRUE(all.equal(sysResult, testResult)) ||
isTRUE(all.equal(sysResult, testResult[c(2, 1)])) ) ) {
cat(paste('Error encountered in test #', ii,
', further tests aborted.\n', sep='') );
return(list(testMatrix=A, systemResult=sysResult,
testResult=testResult));
}
}
cat(paste('Success. All ', nTests,
' tests completed successfully.\n', sep=''));
}
testEigenFunction2 <- function(fun, nTests=50) {
for (ii in 1:nTests) {
mat <- matrix(floor(runif(n=4, min=-100, max=100)), nrow=2);
sSol <- eigen(mat);
sSol$vectors[, 1] <- as.complex(normalizeVector(sSol$vectors[, 1]));
sSol$vectors[, 2] <- as.complex(normalizeVector(sSol$vectors[, 2]));
sSol$values <- as.complex(sSol$values);
mySol <- fun(mat);
mySol$vectors[, 1] <- as.complex(normalizeVector(mySol$vectors[, 1]));
mySol$vectors[, 2] <- as.complex(normalizeVector(mySol$vectors[, 2]));
mySol$values <- as.complex(mySol$values);
if (!( isTRUE(all.equal(sSol$values, mySol$values)) ||
isTRUE(all.equal(sSol$values, mySol$values[c(2, 1)] )) )) {
cat(sprintf('Error: eigenvalue result mismatch on iteration %d.\n',
ii));
return(list(matrix=mat, sSolution=sSol, mySolution=mySol));
}
if (!(isTRUE(all.equal(sSol$values, mySol$values)))) {
sSol$vectors <- sSol$vectors[, c(2, 1)];
}
if ( (!isTRUE(all.equal(sSol$vectors[,1], mySol$vectors[,1]))) &&
(!isTRUE(all.equal(-1 * sSol$vectors[,1], mySol$vectors[,1]))) ) {
cat(sprintf('Error: eigenvector result mismatch on iteration %d.\n',
ii));
return(list(matrix=mat, sSolution=sSol, mySolution=mySol));
} else if ( (!isTRUE(all.equal(sSol$vectors[,2], mySol$vectors[,2]))) &&
(!isTRUE(all.equal(-1 * sSol$vectors[,2],
mySol$vectors[,2])))) {
cat(sprintf('Error: eigenvector result mismatch on iteration %d.\n',
ii));
return(list(matrix=mat, systemSolution=sSol, mySolution=mySol));
}
}
cat(sprintf('Success over %d trials.\n', nTests));
}
|
/homework/emowers/mowers.ch6.exercise.ver3.R
|
no_license
|
rosenbergdm/dynamicsystemslabs
|
R
| false
| false
| 10,844
|
r
|
#
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/4/09
# filename: Mowers_Ch6_Exercise
#############################################################################
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 1
# subquestion: a
# other files:
##########################################################################/--
ErinEigen <- function(M) {
a <- as.complex(M[1,1])
b <- as.complex(M[1,2])
c <- as.complex(M[2,1])
d <- as.complex(M[2,2])
trace <- a + d
determinant <- (a*d) - (b*c)
eigen1 <- (trace + sqrt((trace)^2 - (4 * determinant))) / 2
eigen2 <- (trace - sqrt((trace)^2 - (4 * determinant))) / 2
eigens <- c(eigen1, eigen2)
return(eigens)
}
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 1
# subquestion: b
# other files:
##########################################################################/--
testEigenFunction <- function(fun, nTests=50) {
for (ii in 1:nTests) {
A <- matrix(floor(runif(n=4, min=-100, max=100)), nrow=2);
sysResult <- as.complex(eigen(A)$values);
testResult <- as.complex(fun(A));
if (! (isTRUE(all.equal(sysResult, testResult)) ||
isTRUE(all.equal(sysResult, testResult[c(2, 1)])) ) ) {
cat(paste('Error encountered in test #', ii,
', further tests aborted.\n', sep='') );
return(list(testMatrix=A, systemResult=sysResult,
testResult=testResult));
}
}
cat(paste('Success. All ', nTests,
' tests completed successfully.\n', sep=''));
}
#To test:
testEigenFunction(ErinEigen, nTests=50)
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 1
# subquestion: c
# other files: NA
##########################################################################/--
M1 <- matrix(c(0,1,-2,0), nrow=2)
M2 <- matrix(c(3,4,2,1), nrow=2)
M3 <- matrix(c(-4,3,-2,-1), nrow=2)
M4 <- matrix(c(2,1,1,2), nrow=2)
#To find the eigenvalues:
ErinEigen(M1)
#Zero real part, nonzero imaginary part
ErinEigen(M2)
#Positive real part, zero imaginary part
#Negative real part, zero imaginary part
ErinEigen(M3)
#Negative real part, nonzero imaginary part
ErinEigen(M4)
#Positive real part, zero imaginary part
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 2
# subquestion: a
# other files: NA
##########################################################################/--
normalizeVector <- function(vec, total=1.0) {
iLength <- sqrt(sum(vec ^ 2));
return(vec * total / iLength)
}
#For the below, M is a 2x2 matrix,
#xRange and yRange are vectors of length two.
phase_plane <- function(M, xRange, yRange) {
a <- M[1,1]
b <- M[1,2]
c <- M[2,1]
d <- M[2,2]
plot(c(xRange[1], xRange[2]), c(yRange[1], yRange[2]),
type='n', main='Mowers 2a: Phase Plane',
xlab='x', ylab='y')
xstep <- abs(xRange[1] - xRange[2]) / 20
ystep <- abs(yRange[1] - yRange[2]) / 20
xVec <- yVec <- numeric(length=21)
xVec[1] <- xRange[1]
yVec[1] <- yRange[1]
for(ii in 2:21) {
xVec[ii] <- xVec[ii - 1] + xstep
yVec[ii] <- yVec[ii - 1] + ystep
}
for(ii in 1:21) {
for(iii in 1:21) {
xdot <- (a * xVec[ii]) + (b * yVec[iii])
ydot <- (c * xVec[ii]) + (d * yVec[iii])
derVec <- normalizeVector(c(xdot, ydot), total=1.0)
arrows(xVec[ii], yVec[iii],
xVec[ii] + derVec[1], yVec[iii] + derVec[2], length=0.05)
}
}
return('See graph')
}
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 2
# subquestion: b
# other files: NA
##########################################################################/--
normalizeVector <- function(vec, total=1.0) {
iLength <- sqrt(sum(vec ^ 2));
return(vec * total / iLength)
}
solveOdeSystem <- function(mat, x, y) {
e <- eigen(mat);
eVectors <- e$vectors;
eVals <- e$values;
ab <- solve(eVectors, matrix(c(x, y), nrow=2));
A <- ab[1];
B <- ab[2];
t <- seq(0, 100, by=0.01);
xx <- (A * eVectors[1, 1]) * exp(eVals[1] * t) +
(B * eVectors[1, 2]) * exp(eVals[2] * t);
yy <- (A * eVectors[2, 1]) * exp(eVals[1] * t) +
(B * eVectors[2, 2]) * exp(eVals[2] * t);
return(list(x=xx, y=yy));
}
#For the below, M is a 2x2 matrix,
#xRange and yRange are vectors of length two, and
#initialVals is a list of paired x & y values.
phase_plane2 <- function(M, xRange, yRange, initialVals=NULL) {
a <- (M[1,1])
b <- (M[1,2])
c <- (M[2,1])
d <- (M[2,2])
plot(c(xRange[1], xRange[2]), c(yRange[1], yRange[2]),
type='n', main='Mowers: Phase Plane with Solution Curves',
xlab='x', ylab='y')
xstep <- abs(xRange[1] - xRange[2]) / 20
ystep <- abs(yRange[1] - yRange[2]) / 20
xVec <- yVec <- numeric(length=21)
xVec[1] <- xRange[1]
yVec[1] <- yRange[1]
for(ii in 2:21) {
xVec[ii] <- xVec[ii - 1] + xstep
yVec[ii] <- yVec[ii - 1] + ystep
}
for(ii in 1:21) {
for(iii in 1:21) {
xdot <- (a * xVec[ii]) + (b * yVec[iii])
ydot <- (c * xVec[ii]) + (d * yVec[iii])
derVec <- (normalizeVector(c(xdot, ydot), total=1.0))
arrows(xVec[ii], yVec[iii],
xVec[ii] + derVec[1], yVec[iii] + derVec[2], length=0.05)
}
}
if(!is.null(initialVals)) {
iter2 <- length(initialVals)
for(ii in 1:iter2) {
solutions <- solveOdeSystem(M, initialVals[[ii]][1], initialVals[[ii]][2])
lines(solutions$x, solutions$y, col=ii+1, lwd=3)
}
}
return('See graph')
}
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 2
# subquestion: c
# other files: NA
##########################################################################/--
mat1 <- matrix(c(0,1,-2,0), nrow=2)
mat2 <- matrix(c(3,4,2,1), nrow=2)
mat3 <- matrix(c(-4,3,-2,-1), nrow=2)
mat4 <- matrix(c(2,1,1,2), nrow=2)
initialValues <- list(c(3,7), c(-4, 0.5))
#To produce the plots requested:
phase_plane2(mat1, c(-12,12), c(-10,10), initialValues)
phase_plane2(mat2, c(-20,20), c(-20,20), initialValues)
phase_plane2(mat3, c(-10,10), c(-10,10), initialValues)
phase_plane2(mat4, c(-10,10), c(-10,10), initialValues)
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 3
# subquestion: a
# other files: See handwritten pages.
##########################################################################/--
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 3
# subquestion: b
# other files: See handwritten pages.
##########################################################################/--
#/--#########################################################################
# name: Erin Mowers
# assignment: Chapter 6
# date: 12/04/09
# question: 3
# subquestion: c
# other files: See print outs of graphs for explanation.
##########################################################################/--
relationship1 <- matrix(c(2,1,1,2), nrow=2)
phase_plane2(relationship1, c(-10,10), c(-10,10))
relationship2 <- matrix(c(1,-1,-1,1), nrow=2)
phase_plane2(relationship2, c(-10,10), c(-10,10))
relationship3 <- matrix(c(1,-2,-2,1), nrow=2)
phase_plane2(relationship3, c(-10,10), c(-10,10))
relationship4 <- matrix(c(-3,-2,-2,-3), nrow=2)
phase_plane2(relationship4, c(-10,10), c(-10,10))
##############
#Provided functions
##############
normalizeVector <- function(vec, total=1.0) {
iLength <- sqrt(sum(vec ^ 2));
return(vec * total / iLength)
}
solveOdeSystem <- function(mat, x, y) {
e <- eigen(mat);
eVectors <- e$vectors;
eVals <- e$values;
ab <- solve(eVectors, matrix(c(x, y), nrow=2));
A <- ab[1];
B <- ab[2];
t <- seq(0, 100, by=0.01);
xx <- (A * eVectors[1, 1]) * exp(eVals[1] * t) +
(B * eVectors[1, 2]) * exp(eVals[2] * t);
yy <- (A * eVectors[2, 1]) * exp(eVals[1] * t) +
(B * eVectors[2, 2]) * exp(eVals[2] * t);
return(list(x=xx, y=yy));
}
testEigenFunction <- function(fun, nTests=50) {
for (ii in 1:nTests) {
A <- matrix(floor(runif(n=4, min=-100, max=100)), nrow=2);
sysResult <- as.complex(eigen(A)$values);
testResult <- as.complex(fun(A));
if (! (isTRUE(all.equal(sysResult, testResult)) ||
isTRUE(all.equal(sysResult, testResult[c(2, 1)])) ) ) {
cat(paste('Error encountered in test #', ii,
', further tests aborted.\n', sep='') );
return(list(testMatrix=A, systemResult=sysResult,
testResult=testResult));
}
}
cat(paste('Success. All ', nTests,
' tests completed successfully.\n', sep=''));
}
testEigenFunction2 <- function(fun, nTests=50) {
for (ii in 1:nTests) {
mat <- matrix(floor(runif(n=4, min=-100, max=100)), nrow=2);
sSol <- eigen(mat);
sSol$vectors[, 1] <- as.complex(normalizeVector(sSol$vectors[, 1]));
sSol$vectors[, 2] <- as.complex(normalizeVector(sSol$vectors[, 2]));
sSol$values <- as.complex(sSol$values);
mySol <- fun(mat);
mySol$vectors[, 1] <- as.complex(normalizeVector(mySol$vectors[, 1]));
mySol$vectors[, 2] <- as.complex(normalizeVector(mySol$vectors[, 2]));
mySol$values <- as.complex(mySol$values);
if (!( isTRUE(all.equal(sSol$values, mySol$values)) ||
isTRUE(all.equal(sSol$values, mySol$values[c(2, 1)] )) )) {
cat(sprintf('Error: eigenvalue result mismatch on iteration %d.\n',
ii));
return(list(matrix=mat, sSolution=sSol, mySolution=mySol));
}
if (!(isTRUE(all.equal(sSol$values, mySol$values)))) {
sSol$vectors <- sSol$vectors[, c(2, 1)];
}
if ( (!isTRUE(all.equal(sSol$vectors[,1], mySol$vectors[,1]))) &&
(!isTRUE(all.equal(-1 * sSol$vectors[,1], mySol$vectors[,1]))) ) {
cat(sprintf('Error: eigenvector result mismatch on iteration %d.\n',
ii));
return(list(matrix=mat, sSolution=sSol, mySolution=mySol));
} else if ( (!isTRUE(all.equal(sSol$vectors[,2], mySol$vectors[,2]))) &&
(!isTRUE(all.equal(-1 * sSol$vectors[,2],
mySol$vectors[,2])))) {
cat(sprintf('Error: eigenvector result mismatch on iteration %d.\n',
ii));
return(list(matrix=mat, systemSolution=sSol, mySolution=mySol));
}
}
cat(sprintf('Success over %d trials.\n', nTests));
}
|
# Author: Robert J. Hijmans
# Date : December 2011
# Version 1.0
# Licence GPL v3
setMethod('cover', signature(x='SpatialPolygons', y='SpatialPolygons'),
function(x, y, ..., identity=FALSE){
# warning("this method will be removed. You can use 'terra::cover<SpatVector,SpatVector>' instead")
# valgeos <- .checkGEOS(); on.exit(rgeos::set_RGEOS_CheckValidity(valgeos))
# prj <- x@proj4string
# if (is.na(prj)) prj <- y@proj4string
# x@proj4string <- sp::CRS(as.character(NA))
yy <- list(y, ...)
i <- which(sapply(yy, function(x) inherits(x, 'SpatialPolygons')))
if (length(i)==0) {
stop('additional arguments should be of class SpatialPolygons')
} else if (length(i) < length(yy)) {
warning('additional arguments that are not of class SpatialPolygons are ignored')
yy <- yy[i]
}
x <- vect(x)
for (y in yy) {
x <- cover(x, vect(y), identity=identity, expand=FALSE)
}
x
# if (identity) {
# x <- .coverIdentity(x, yy)
# if (inherits(x, "Spatial")) { x@proj4string <- prj }
# return(x)
# }
# for (y in yy) {
# y@proj4string <- sp::CRS(as.character(NA))
# subs <- rgeos::gIntersects(x, y, byid=TRUE)
# if (!any(subs)) {
# next
# } else {
# int <- crop(y, x)
# x <- erase(x, int)
# x <- bind(x, int)
# }
# }
# x@proj4string <- prj
# x
}
)
# .coverIdentity <- function(x, yy) {
# for (y in yy) {
# y@proj4string <- sp::CRS(as.character(NA))
# i <- rgeos::gIntersects(x, y)
# if (!i) {
# next
# }
# x <- sp::spChFIDs(x, as.character(1:length(x)))
# y <- sp::spChFIDs(y, as.character(1:length(y)))
# if (.hasSlot(x, 'data')) {
# xnames <- colnames(x@data)
# } else {
# xnames <-NULL
# }
# if (.hasSlot(y, 'data')) {
# ynames <- colnames(y@data)
# } else {
# ynames <-NULL
# }
# if (is.null(xnames) & !is.null(ynames)) {
# dat <- y@data[NULL, ,drop=FALSE]
# dat[1:length(x), ] <- NA
# x <- sp::SpatialPolygonsDataFrame(x, dat)
# xnames <- ynames
# }
# yinx <- which(ynames %in% xnames)
# doAtt <- TRUE
# if (length(yinx) == 0) {
# doAtt <- FALSE
# }
# subs <- rgeos::gIntersects(x, y, byid=TRUE)
# subsx <- apply(subs, 2, any)
# subsy <- apply(subs, 1, any)
# int <- rgeos::gIntersection(x[subsx,], y[subsy,], byid=TRUE, drop_lower_td=TRUE)
# #if (inherits(int, "SpatialCollections")) {
# # if (is.null(int@polyobj)) { # ??
# # warning('polygons do not intersect')
# # next
# # }
# # int <- int@polyobj
# #}
# if (!inherits(int, 'SpatialPolygons')) {
# warning('polygons do not intersect')
# next
# }
# if (doAtt) {
# ids <- do.call(rbind, strsplit(row.names(int), ' '))
# idsy <- match(ids[,2], rownames(y@data))
# rows <- 1:length(idsy)
# dat <- x@data[NULL, ,drop=FALSE]
# dat[rows, yinx] <- y@data[idsy, yinx]
# int <- sp::SpatialPolygonsDataFrame(int, dat, match.ID=FALSE)
# }
# x <- erase(x, int)
# if (is.null(x)) {
# x <- int
# } else {
# x <- bind(x, int)
# }
# }
# x
# }
|
/R/coverPolygons.R
|
no_license
|
cran/raster
|
R
| false
| false
| 3,142
|
r
|
# Author: Robert J. Hijmans
# Date : December 2011
# Version 1.0
# Licence GPL v3
setMethod('cover', signature(x='SpatialPolygons', y='SpatialPolygons'),
function(x, y, ..., identity=FALSE){
# warning("this method will be removed. You can use 'terra::cover<SpatVector,SpatVector>' instead")
# valgeos <- .checkGEOS(); on.exit(rgeos::set_RGEOS_CheckValidity(valgeos))
# prj <- x@proj4string
# if (is.na(prj)) prj <- y@proj4string
# x@proj4string <- sp::CRS(as.character(NA))
yy <- list(y, ...)
i <- which(sapply(yy, function(x) inherits(x, 'SpatialPolygons')))
if (length(i)==0) {
stop('additional arguments should be of class SpatialPolygons')
} else if (length(i) < length(yy)) {
warning('additional arguments that are not of class SpatialPolygons are ignored')
yy <- yy[i]
}
x <- vect(x)
for (y in yy) {
x <- cover(x, vect(y), identity=identity, expand=FALSE)
}
x
# if (identity) {
# x <- .coverIdentity(x, yy)
# if (inherits(x, "Spatial")) { x@proj4string <- prj }
# return(x)
# }
# for (y in yy) {
# y@proj4string <- sp::CRS(as.character(NA))
# subs <- rgeos::gIntersects(x, y, byid=TRUE)
# if (!any(subs)) {
# next
# } else {
# int <- crop(y, x)
# x <- erase(x, int)
# x <- bind(x, int)
# }
# }
# x@proj4string <- prj
# x
}
)
# .coverIdentity <- function(x, yy) {
# for (y in yy) {
# y@proj4string <- sp::CRS(as.character(NA))
# i <- rgeos::gIntersects(x, y)
# if (!i) {
# next
# }
# x <- sp::spChFIDs(x, as.character(1:length(x)))
# y <- sp::spChFIDs(y, as.character(1:length(y)))
# if (.hasSlot(x, 'data')) {
# xnames <- colnames(x@data)
# } else {
# xnames <-NULL
# }
# if (.hasSlot(y, 'data')) {
# ynames <- colnames(y@data)
# } else {
# ynames <-NULL
# }
# if (is.null(xnames) & !is.null(ynames)) {
# dat <- y@data[NULL, ,drop=FALSE]
# dat[1:length(x), ] <- NA
# x <- sp::SpatialPolygonsDataFrame(x, dat)
# xnames <- ynames
# }
# yinx <- which(ynames %in% xnames)
# doAtt <- TRUE
# if (length(yinx) == 0) {
# doAtt <- FALSE
# }
# subs <- rgeos::gIntersects(x, y, byid=TRUE)
# subsx <- apply(subs, 2, any)
# subsy <- apply(subs, 1, any)
# int <- rgeos::gIntersection(x[subsx,], y[subsy,], byid=TRUE, drop_lower_td=TRUE)
# #if (inherits(int, "SpatialCollections")) {
# # if (is.null(int@polyobj)) { # ??
# # warning('polygons do not intersect')
# # next
# # }
# # int <- int@polyobj
# #}
# if (!inherits(int, 'SpatialPolygons')) {
# warning('polygons do not intersect')
# next
# }
# if (doAtt) {
# ids <- do.call(rbind, strsplit(row.names(int), ' '))
# idsy <- match(ids[,2], rownames(y@data))
# rows <- 1:length(idsy)
# dat <- x@data[NULL, ,drop=FALSE]
# dat[rows, yinx] <- y@data[idsy, yinx]
# int <- sp::SpatialPolygonsDataFrame(int, dat, match.ID=FALSE)
# }
# x <- erase(x, int)
# if (is.null(x)) {
# x <- int
# } else {
# x <- bind(x, int)
# }
# }
# x
# }
|
## "INFOF422 Statistical foundations of machine learning" course
## R package gbcode
## Author: G. Bontempi
#### plotStu ####
#' Plot Student distribution
#' @author Gianluca Bontempi \email{gbonte@@ulb.ac.be}
#' @references \url{mlg.ulb.ac.be}
#' @title Plot the Student density and cumulative distribution
#'
plotStu<-function(N=10){
x<-seq(-5,5,by=.1)
par(ask=TRUE)
plot(x,dt(x,N),main=paste("Student (N=" ,N,") density"),type="l")
plot(x,pt(x,N),main=paste("Student (N=" ,N,") cumulative distribution"),type="l")
}
|
/inst/scripts/Probability/plotStu.R
|
no_license
|
gbonte/gbcode
|
R
| false
| false
| 533
|
r
|
## "INFOF422 Statistical foundations of machine learning" course
## R package gbcode
## Author: G. Bontempi
#### plotStu ####
#' Plot Student distribution
#' @author Gianluca Bontempi \email{gbonte@@ulb.ac.be}
#' @references \url{mlg.ulb.ac.be}
#' @title Plot the Student density and cumulative distribution
#'
plotStu<-function(N=10){
x<-seq(-5,5,by=.1)
par(ask=TRUE)
plot(x,dt(x,N),main=paste("Student (N=" ,N,") density"),type="l")
plot(x,pt(x,N),main=paste("Student (N=" ,N,") cumulative distribution"),type="l")
}
|
#' Sort Inequalities by Acceptance Rate
#'
#' Uses samples from the prior/posterior to order the inequalities by the acceptance rate.
#'
#' @inheritParams inside
#' @param k optional: number of observed frequencies (only for posterior sampling).
#' @param options optional: number of options per item type/category system.
#' Uniform sampling on [0,1] for each parameter is used if omitted.
#' @param M number of samples.
#' @param drop_irrelevant whether to drop irrelevant constraints for probabilities such as
#' \code{theta[1] >= 0}, \code{theta[1] <= 1}, or \code{sum(theta) <= 1}.
#' @details
#'
#' Those constraints that are rejected most often are placed at the first positions.
#' This can help when computing the encompassing Bayes factor and counting how many samples
#' satisfy the constraints (e.g., \code{\link{count_binom}} or \code{\link{bf_multinom}}).
#' Essentially, it becomes more likely that the while-loop for testing
#' whether the inequalities hold can stop earlier, thus making the computation faster.
#'
#' The function could also be helpful to improve the efficiency of the stepwise
#' sampling implemented in \code{\link{count_binom}} and \code{\link{count_multinom}}.
#' First, one can use accept-reject sampling to test the first few, rejected
#' inequalities. Next, one can use a Gibbs sampler to draw samples conditional on the
#' first constraints.
#'
#'
#' @examples
#' ### Binomial probabilities
#' b <- c(0,0,.30,.70, 1)
#' A <- matrix(c(-1,1,0, # p1 >= p2
#' 0,1,-1, # p2 <= p3
#' 1,0,0, # p1 <=.30
#' 0,1,0, # p2 <= .70
#' 0,0,1), # p3 <= 1 (redundant)
#' ncol = 3, byrow = 2)
#' Ab_sort(A, b)
#'
#'
#' ### Multinomial probabilities
#' # prior sampling:
#' Ab_sort(A, b, options = 4)
#' # posterior sampling:
#' Ab_sort(A, b, k = c(10,3, 2, 14), options = 4)
#'
#' @export
Ab_sort <- function (A, b, k = 0, options, M = 1000, drop_irrelevant = TRUE){
check_Ab(A, b)
S <- ncol(A)
if (missing(options)){
x <- matrix(runif(M * S), M, S)
} else {
if (length(k) == 1)
k <- rep(k, sum(options))
check_ko(k, options)
x <- rpdirichlet(M, k + 1, options)
}
if (drop_irrelevant){
Ab <- Ab_drop_irrelevant(A, b, options)
A <- Ab$A
b <- Ab$b
}
accept <- A %*% t(x) <= b
accept_rate <- rowMeans(accept)
o <- order(accept_rate, decreasing = FALSE)
list("A" = A[o,], "b" = b[o], "accept_rate" = accept_rate[o])
}
### drop constraints: 0<p<1
Ab_drop_irrelevant <- function(A, b, options){
lower1 <- apply(A, 1, function(a) sum(a==0) == ncol(A)-1 && sum(a==1) == 1) & b==1
greater0 <- apply(A, 1, function(a) sum(a==0) == ncol(A)-1 && sum(a==-1) == 1) & b==0
sum1 <- rep(FALSE, nrow(A))
if (!missing(options)){
for (i in seq_along(options)){
prev <- sum(options[seq(0, i-1)] - 1)
idx <- prev + seq(options[i] - 1)
checki <- b == 1 & apply(A[,idx,drop=FALSE]==1, 1, all) & apply(A[,-idx,drop=FALSE]==0, 1, all)
sum1 <- sum1 | checki
}
}
A1 <- A[!lower1 & !greater0 & !sum1,,drop = FALSE]
b1 <- b[!lower1 & !greater0 & !sum1]
list(A = A1, b = b1)
}
# ### split A into block-diagonal matrices => allows to compute encompassing BF independently
# Ab_split <- function(A, b){
#
# order1 <- order(-abs(A1[,1]))
# A2 <- A1[order1,]
# b2 <- b1[order1]
# for (i in 2:ncol(A2) ){
# sel <- apply(A2[,seq(1, i - 1), drop = FALSE] == 0, 1, all)
# if (any(sel)){
# orderi <- order(-abs(A2[sel,i]))
# A2[sel,] <- A2[sel,,drop=FALSE][orderi,]
# b2[sel] <- b2[sel][orderi]
# }
# }
# list(cbind(A2, .....b2 = b2))
# }
|
/multinomineq/R/sort_inequalities.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 3,767
|
r
|
#' Sort Inequalities by Acceptance Rate
#'
#' Uses samples from the prior/posterior to order the inequalities by the acceptance rate.
#'
#' @inheritParams inside
#' @param k optional: number of observed frequencies (only for posterior sampling).
#' @param options optional: number of options per item type/category system.
#' Uniform sampling on [0,1] for each parameter is used if omitted.
#' @param M number of samples.
#' @param drop_irrelevant whether to drop irrelevant constraints for probabilities such as
#' \code{theta[1] >= 0}, \code{theta[1] <= 1}, or \code{sum(theta) <= 1}.
#' @details
#'
#' Those constraints that are rejected most often are placed at the first positions.
#' This can help when computing the encompassing Bayes factor and counting how many samples
#' satisfy the constraints (e.g., \code{\link{count_binom}} or \code{\link{bf_multinom}}).
#' Essentially, it becomes more likely that the while-loop for testing
#' whether the inequalities hold can stop earlier, thus making the computation faster.
#'
#' The function could also be helpful to improve the efficiency of the stepwise
#' sampling implemented in \code{\link{count_binom}} and \code{\link{count_multinom}}.
#' First, one can use accept-reject sampling to test the first few, rejected
#' inequalities. Next, one can use a Gibbs sampler to draw samples conditional on the
#' first constraints.
#'
#'
#' @examples
#' ### Binomial probabilities
#' b <- c(0,0,.30,.70, 1)
#' A <- matrix(c(-1,1,0, # p1 >= p2
#' 0,1,-1, # p2 <= p3
#' 1,0,0, # p1 <=.30
#' 0,1,0, # p2 <= .70
#' 0,0,1), # p3 <= 1 (redundant)
#' ncol = 3, byrow = 2)
#' Ab_sort(A, b)
#'
#'
#' ### Multinomial probabilities
#' # prior sampling:
#' Ab_sort(A, b, options = 4)
#' # posterior sampling:
#' Ab_sort(A, b, k = c(10,3, 2, 14), options = 4)
#'
#' @export
Ab_sort <- function (A, b, k = 0, options, M = 1000, drop_irrelevant = TRUE){
check_Ab(A, b)
S <- ncol(A)
if (missing(options)){
x <- matrix(runif(M * S), M, S)
} else {
if (length(k) == 1)
k <- rep(k, sum(options))
check_ko(k, options)
x <- rpdirichlet(M, k + 1, options)
}
if (drop_irrelevant){
Ab <- Ab_drop_irrelevant(A, b, options)
A <- Ab$A
b <- Ab$b
}
accept <- A %*% t(x) <= b
accept_rate <- rowMeans(accept)
o <- order(accept_rate, decreasing = FALSE)
list("A" = A[o,], "b" = b[o], "accept_rate" = accept_rate[o])
}
### drop constraints: 0<p<1
Ab_drop_irrelevant <- function(A, b, options){
lower1 <- apply(A, 1, function(a) sum(a==0) == ncol(A)-1 && sum(a==1) == 1) & b==1
greater0 <- apply(A, 1, function(a) sum(a==0) == ncol(A)-1 && sum(a==-1) == 1) & b==0
sum1 <- rep(FALSE, nrow(A))
if (!missing(options)){
for (i in seq_along(options)){
prev <- sum(options[seq(0, i-1)] - 1)
idx <- prev + seq(options[i] - 1)
checki <- b == 1 & apply(A[,idx,drop=FALSE]==1, 1, all) & apply(A[,-idx,drop=FALSE]==0, 1, all)
sum1 <- sum1 | checki
}
}
A1 <- A[!lower1 & !greater0 & !sum1,,drop = FALSE]
b1 <- b[!lower1 & !greater0 & !sum1]
list(A = A1, b = b1)
}
# ### split A into block-diagonal matrices => allows to compute encompassing BF independently
# Ab_split <- function(A, b){
#
# order1 <- order(-abs(A1[,1]))
# A2 <- A1[order1,]
# b2 <- b1[order1]
# for (i in 2:ncol(A2) ){
# sel <- apply(A2[,seq(1, i - 1), drop = FALSE] == 0, 1, all)
# if (any(sel)){
# orderi <- order(-abs(A2[sel,i]))
# A2[sel,] <- A2[sel,,drop=FALSE][orderi,]
# b2[sel] <- b2[sel][orderi]
# }
# }
# list(cbind(A2, .....b2 = b2))
# }
|
multistepARIMA <- function(order, coefficients, external_regressor){
exit <- function() {
.Internal(.invokeRestart(list(NULL, NULL), NULL))
}
step1 <- function(ts, step1res){
######## If mean is not zero
if (mean(ts, na.rm = T) > tol){
ts <- ts - mean(ts, na.rm = TRUE)
}
X <- matrix(ncol = (p + q + 1 + length(REG)), ## Number of terms
nrow = length(observed)
)
if (external_regressor == FALSE){
if ((p > 0) & (q > 0)){
for (i in ((r+nstep+d):length(observed))){ ## i is the n we are predicting, we need to add d
y <- ts[(i - 1 - d + d):(i - p - d + d)]
eps <- step1res[(i - 1):(i-q)]
X[i, ] <- c(1, y, eps) #X[(i-r-d), ]
}
} else if ((p > 0) & (q == 0)){
for (i in ((r+nstep+d):(length(observed)))){ ## i is the n we are predicting, we need to add d
y <- ts[(i - 1 - d + d):(i - p - d + d)]
X[i, ] <- c(1, y)
}
} else if ((p == 0) & (q > 0)){
for (i in ((r+nstep+d):(length(observed)))){ ## i is the n we are predicting, we need to add d
eps <- step1res[(i - 1 + d + d):(i-q + d + d)]
X[i, ] <- c(1, eps)
}
} else{
X[] <- 1
}
} else {
#### With regressor
if ((p > 0) & (q > 0)){
for (i in ((r+nstep+d):length(observed))){ ## i is the n we are predicting, we need to add d
y <- ts[(i - 1 - d + d):(i - p - d + d)]
eps <- step1res[(i - 1):(i-q)]
X[i, ] <- c(1, y, eps, Regressor[i])
}
} else if ((p > 0) & (q == 0)){
for (i in ((r+nstep+d):(length(observed)))){ ## i is the n we are predicting, we need to add d
y <- ts[(i - 1 - d + d):(i - p - d + d)]
X[i, ] <- c(1, y, Regressor[i])
}
} else if ((p == 0) & (q > 0)){
for (i in ((r+nstep+d):(length(observed)))){ ## i is the n we are predicting, we need to add d
eps <- step1res[(i - 1 + d + d):(i-q + d + d)]
X[i, ] <- c(1, eps, Regressor[i])
}
} else{
X[] <- 1
}
}
pred <- X %*% delta
if (d > 0){
pred_ <- pred
pred <- pred + lag(observed, 1)
res <- observed - pred
} else{
res <- observed - pred
pred_ <- NULL
}
return(list(pred,
res,
X,
pred_))
}
step_n <- function(priorPred, priorRes, nstep, X){## might not need ts or step1res
######## If mean is not zero
if (abs(mean(priorPred, na.rm = TRUE)) > tol){
priorPred <- priorPred - mean(priorPred, na.rm = TRUE)
}
if (nstep == 2){
priorPred <-append(NA, priorPred[(1):(length(priorPred)-1)])
priorRes <- rep(0, length(priorPred))
temp <- nstep + r - 1 + d
priorRes[1:temp] <- NA
if (p > 0){
X[,2] <- priorPred
X[temp, ] <- NA
}
if (q > 0){
X[,2+p] <- priorRes
}
X[temp, ] <- NA
}
else if (nstep > 2){
if ((p > 0) && (q > 0)){
P <- as.matrix(X[,(2:(p+1))])
Q <- as.matrix(X[,((p+2):(ncol(X)-length(REG)))])
P <- rbind(rep(NA, ncol(P)), P)
Q <- rbind(rep(NA, ncol(Q)), Q)
} else if ((p == 0) && (q > 0)){
P <- NULL
Q <- as.matrix(X[,((p+2):(ncol(X)-length(REG)))])
Q <- rbind(rep(NA, ncol(Q)), Q)
} else if ((p > 0) && (q == 0)){
P <- as.matrix(X[,(2:(p+1))])
P <- rbind(rep(NA, ncol(P)), P)
Q <- NULL
}
if (p >= 1){
priorPred <- priorPred[1:(length(priorPred)-1)]
priorPred <- append(NA, priorPred)
if (p == 1){
P1 <- NULL
P2 <- NULL
} else if (nstep >= p){
P1 <- as.matrix(P[(1:(nrow(P)-1)),(1:(p-1))])
P2 <- NULL
} else {
P1 <- as.matrix(P[(1:(nrow(P)-1)),(1:(nstep-2))]) ## This will be shifted
P2 <- as.matrix(P[2:nrow(P), (nstep-2+1):(ncol(P)-1)])
}
} else{
P1 <- NULL
P2 <- NULL
priorPred <- NULL
}
if (q >= 1){
priorRes <- rep(0, nrow(X))#priorRes[1:(length(priorRes)-1)]
priorRes[1:(r+nstep-1+d)] <- NA
if (q == 1){
Q1 <- NULL
Q2 <- NULL
} else if (nstep >= q){
Q1 <- as.matrix(Q[(1:(nrow(Q)-1)),1:(q - 1)]) ## This will be shifted
Q2 <- NULL
} else{
Q1 <- as.matrix(Q[(1:(nrow(Q)-1)),(1:(nstep-2))]) ## This will be shifted
Q2 <- as.matrix(Q[2:nrow(Q), (nstep-2+1):(ncol(Q)-1)])
}
} else{
Q1 <- NULL
Q2 <- NULL
priorRes <- NULL
}
X <- cbind(1,
priorPred,
P1,
P2,
priorRes,
Q1,
Q2,
Regressor)
X[1:(nstep+r-1+d),] <- NA
}
pred <- X %*% delta
if (d > 0){
pred_ <- pred
pred <- pred + lag(observed,1)
res <- observed-pred
} else{
res <- observed-pred
pred_ <- NULL
}
return(list(pred,
res,
X,
pred_))
}
tol <- 1e-4
p <- order[1]
q <- order[3]
d <- order[2]
r <- max(p, q)
if(external_regressor == FALSE){
model <- arima(x = ts,order = order, fixed = coefficients)
REG <- NULL
Regressor <- NULL
}else{
model <- arima(x = ts,order = order, fixed = coefficients, xreg = Regressor)
REG <- coefficients[length(coefficients)]
Regressor <- Regressor
}
step1res <- residuals(model)
if ((p > 0) && (q > 0)){
AR <- coefficients[1:p]
MA <- coefficients[(p+1) : (p + q)]
}else if ((p == 0) && (q > 0)){
AR <- NULL
MA <- coefficients[(p+1) : (q + p)]
}else if ((p > 0) && (q == 0)){
AR <- coefficients[1:p]
MA <- NULL
}else{
AR <- NULL
MA <- NULL
print("No parameters to optimize for")
exit()
}
if (d > 0){
I <- 0
} else{
I <- as.numeric(coefficients[(p + q + 1)])
}
delta <- as.matrix(c(interc = I, AR, MA, REG))
## 1-step prediction and getting treated time series and delta
nstep <- 1
output <- step1(ts, step1res)
pred <- unlist(output[1])
res <- unlist(output[2])
X <- matrix(unlist(output[3]), ncol = (p + q + 1 + length(REG)), nrow = length(ts))
if (d > 0){ pred_ <- unlist(output[4]) } ## pred_ is used for iteration when d > 0
res_list<- list()
res_list[[1]] <- res
pred_list <- list()
pred_list[[1]] <- pred
for (i in 2:10){
nstep <- i
if (d > 0){
output <- step_n(priorPred = pred_, priorRes = res, nstep = nstep, X = X)
pred_ <- unlist(output[4])
} else{
output <- step_n(priorPred = pred, priorRes = res, nstep = nstep, X = X)
}
pred <- unlist(output[1])
res <- unlist(output[2])
X <- matrix(unlist(output[3]), ncol = (p + q + 1 + length(REG)), nrow = length(observed))
res_list[[i]] <- res
pred_list[[i]] <- pred
}
return(list(residuals = res_list, predictions = pred_list))
}
compute_arime_mse=function(pars,order){
output <- multistepARIMA(order = order, coefficients = pars)
x <- matrix(unlist(output$residuals), ncol = length(unlist(output$residuals))[1], nrow = 10)
k <- 10
under <- 0
for (j in 1:k){ under <- under + k - j + 1 }
e <- vector()
for (i in 1:19781){
upper <- 0
for (j in 1:k){
upper <- upper + ((k - j + 1) * x[j,i] )
}
e[i] <- upper
}
xxx <- mean(e^2, na.rm = T)
return(xxx)
}
TS <- read.csv("../Data/Training data/s1_training.txt", header = TRUE, sep = "\t")
Regressor <- read.csv("../Data/Training data/d_training.txt", header = TRUE, sep = "\t")
TS <- TS$Value
Regressor <- Regressor$Value
ts <- TS
observed <- TS
# s <- seq(from = 0, to = 4, by = 2)
# orders <- expand.grid(s, 0:2, s)
# order <- c(0,2,4)
# n <- order[1]+order[3]
# if (order[2] == 0){n <- n+1}
# pars <- vector(length = n)
# pars <- rnorm(n = n, mean = 0, sd = 1)
# coefficients = pars
# coefficients = pars
# output <- multistepARIMA(order, coefficients = pars, regressor??/)
#Problems with c(0,1,8)
# (0, 2, 8)
order <- c(1,1,1) ## add fourth dimention
model <- arima(ts, order = order, xreg = Regressor)
coefficients = model$coef
output <- multistepARIMA(order, coefficients = coefficients, external_regressor = TRUE)
mse_regressed <- mean(unlist(output$residuals)^2, na.rm = T)
model <- arima(ts, order = order)
coefficients = model$coef
output <- multistepARIMA(order, coefficients = coefficients, external_regressor = FALSE)
mse <- mean(unlist(output$residuals)^2, na.rm = T)
print(paste("MSE:", mse))
print(paste("MSE Regressed", mse_regressed))
# system.time({
# result <- optim(par = pars, fn = compute_arime_mse, order=order, method = "L-BFGS-B", control = list(maxit = 500))
# })
|
/Code/Coefficient_optimization/neldermead_optimization/Multi_step_predictions/Test/test.R
|
no_license
|
arijoh/DataDrivenForecastModels
|
R
| false
| false
| 8,973
|
r
|
multistepARIMA <- function(order, coefficients, external_regressor){
exit <- function() {
.Internal(.invokeRestart(list(NULL, NULL), NULL))
}
step1 <- function(ts, step1res){
######## If mean is not zero
if (mean(ts, na.rm = T) > tol){
ts <- ts - mean(ts, na.rm = TRUE)
}
X <- matrix(ncol = (p + q + 1 + length(REG)), ## Number of terms
nrow = length(observed)
)
if (external_regressor == FALSE){
if ((p > 0) & (q > 0)){
for (i in ((r+nstep+d):length(observed))){ ## i is the n we are predicting, we need to add d
y <- ts[(i - 1 - d + d):(i - p - d + d)]
eps <- step1res[(i - 1):(i-q)]
X[i, ] <- c(1, y, eps) #X[(i-r-d), ]
}
} else if ((p > 0) & (q == 0)){
for (i in ((r+nstep+d):(length(observed)))){ ## i is the n we are predicting, we need to add d
y <- ts[(i - 1 - d + d):(i - p - d + d)]
X[i, ] <- c(1, y)
}
} else if ((p == 0) & (q > 0)){
for (i in ((r+nstep+d):(length(observed)))){ ## i is the n we are predicting, we need to add d
eps <- step1res[(i - 1 + d + d):(i-q + d + d)]
X[i, ] <- c(1, eps)
}
} else{
X[] <- 1
}
} else {
#### With regressor
if ((p > 0) & (q > 0)){
for (i in ((r+nstep+d):length(observed))){ ## i is the n we are predicting, we need to add d
y <- ts[(i - 1 - d + d):(i - p - d + d)]
eps <- step1res[(i - 1):(i-q)]
X[i, ] <- c(1, y, eps, Regressor[i])
}
} else if ((p > 0) & (q == 0)){
for (i in ((r+nstep+d):(length(observed)))){ ## i is the n we are predicting, we need to add d
y <- ts[(i - 1 - d + d):(i - p - d + d)]
X[i, ] <- c(1, y, Regressor[i])
}
} else if ((p == 0) & (q > 0)){
for (i in ((r+nstep+d):(length(observed)))){ ## i is the n we are predicting, we need to add d
eps <- step1res[(i - 1 + d + d):(i-q + d + d)]
X[i, ] <- c(1, eps, Regressor[i])
}
} else{
X[] <- 1
}
}
pred <- X %*% delta
if (d > 0){
pred_ <- pred
pred <- pred + lag(observed, 1)
res <- observed - pred
} else{
res <- observed - pred
pred_ <- NULL
}
return(list(pred,
res,
X,
pred_))
}
step_n <- function(priorPred, priorRes, nstep, X){## might not need ts or step1res
######## If mean is not zero
if (abs(mean(priorPred, na.rm = TRUE)) > tol){
priorPred <- priorPred - mean(priorPred, na.rm = TRUE)
}
if (nstep == 2){
priorPred <-append(NA, priorPred[(1):(length(priorPred)-1)])
priorRes <- rep(0, length(priorPred))
temp <- nstep + r - 1 + d
priorRes[1:temp] <- NA
if (p > 0){
X[,2] <- priorPred
X[temp, ] <- NA
}
if (q > 0){
X[,2+p] <- priorRes
}
X[temp, ] <- NA
}
else if (nstep > 2){
if ((p > 0) && (q > 0)){
P <- as.matrix(X[,(2:(p+1))])
Q <- as.matrix(X[,((p+2):(ncol(X)-length(REG)))])
P <- rbind(rep(NA, ncol(P)), P)
Q <- rbind(rep(NA, ncol(Q)), Q)
} else if ((p == 0) && (q > 0)){
P <- NULL
Q <- as.matrix(X[,((p+2):(ncol(X)-length(REG)))])
Q <- rbind(rep(NA, ncol(Q)), Q)
} else if ((p > 0) && (q == 0)){
P <- as.matrix(X[,(2:(p+1))])
P <- rbind(rep(NA, ncol(P)), P)
Q <- NULL
}
if (p >= 1){
priorPred <- priorPred[1:(length(priorPred)-1)]
priorPred <- append(NA, priorPred)
if (p == 1){
P1 <- NULL
P2 <- NULL
} else if (nstep >= p){
P1 <- as.matrix(P[(1:(nrow(P)-1)),(1:(p-1))])
P2 <- NULL
} else {
P1 <- as.matrix(P[(1:(nrow(P)-1)),(1:(nstep-2))]) ## This will be shifted
P2 <- as.matrix(P[2:nrow(P), (nstep-2+1):(ncol(P)-1)])
}
} else{
P1 <- NULL
P2 <- NULL
priorPred <- NULL
}
if (q >= 1){
priorRes <- rep(0, nrow(X))#priorRes[1:(length(priorRes)-1)]
priorRes[1:(r+nstep-1+d)] <- NA
if (q == 1){
Q1 <- NULL
Q2 <- NULL
} else if (nstep >= q){
Q1 <- as.matrix(Q[(1:(nrow(Q)-1)),1:(q - 1)]) ## This will be shifted
Q2 <- NULL
} else{
Q1 <- as.matrix(Q[(1:(nrow(Q)-1)),(1:(nstep-2))]) ## This will be shifted
Q2 <- as.matrix(Q[2:nrow(Q), (nstep-2+1):(ncol(Q)-1)])
}
} else{
Q1 <- NULL
Q2 <- NULL
priorRes <- NULL
}
X <- cbind(1,
priorPred,
P1,
P2,
priorRes,
Q1,
Q2,
Regressor)
X[1:(nstep+r-1+d),] <- NA
}
pred <- X %*% delta
if (d > 0){
pred_ <- pred
pred <- pred + lag(observed,1)
res <- observed-pred
} else{
res <- observed-pred
pred_ <- NULL
}
return(list(pred,
res,
X,
pred_))
}
tol <- 1e-4
p <- order[1]
q <- order[3]
d <- order[2]
r <- max(p, q)
if(external_regressor == FALSE){
model <- arima(x = ts,order = order, fixed = coefficients)
REG <- NULL
Regressor <- NULL
}else{
model <- arima(x = ts,order = order, fixed = coefficients, xreg = Regressor)
REG <- coefficients[length(coefficients)]
Regressor <- Regressor
}
step1res <- residuals(model)
if ((p > 0) && (q > 0)){
AR <- coefficients[1:p]
MA <- coefficients[(p+1) : (p + q)]
}else if ((p == 0) && (q > 0)){
AR <- NULL
MA <- coefficients[(p+1) : (q + p)]
}else if ((p > 0) && (q == 0)){
AR <- coefficients[1:p]
MA <- NULL
}else{
AR <- NULL
MA <- NULL
print("No parameters to optimize for")
exit()
}
if (d > 0){
I <- 0
} else{
I <- as.numeric(coefficients[(p + q + 1)])
}
delta <- as.matrix(c(interc = I, AR, MA, REG))
## 1-step prediction and getting treated time series and delta
nstep <- 1
output <- step1(ts, step1res)
pred <- unlist(output[1])
res <- unlist(output[2])
X <- matrix(unlist(output[3]), ncol = (p + q + 1 + length(REG)), nrow = length(ts))
if (d > 0){ pred_ <- unlist(output[4]) } ## pred_ is used for iteration when d > 0
res_list<- list()
res_list[[1]] <- res
pred_list <- list()
pred_list[[1]] <- pred
for (i in 2:10){
nstep <- i
if (d > 0){
output <- step_n(priorPred = pred_, priorRes = res, nstep = nstep, X = X)
pred_ <- unlist(output[4])
} else{
output <- step_n(priorPred = pred, priorRes = res, nstep = nstep, X = X)
}
pred <- unlist(output[1])
res <- unlist(output[2])
X <- matrix(unlist(output[3]), ncol = (p + q + 1 + length(REG)), nrow = length(observed))
res_list[[i]] <- res
pred_list[[i]] <- pred
}
return(list(residuals = res_list, predictions = pred_list))
}
compute_arime_mse=function(pars,order){
output <- multistepARIMA(order = order, coefficients = pars)
x <- matrix(unlist(output$residuals), ncol = length(unlist(output$residuals))[1], nrow = 10)
k <- 10
under <- 0
for (j in 1:k){ under <- under + k - j + 1 }
e <- vector()
for (i in 1:19781){
upper <- 0
for (j in 1:k){
upper <- upper + ((k - j + 1) * x[j,i] )
}
e[i] <- upper
}
xxx <- mean(e^2, na.rm = T)
return(xxx)
}
TS <- read.csv("../Data/Training data/s1_training.txt", header = TRUE, sep = "\t")
Regressor <- read.csv("../Data/Training data/d_training.txt", header = TRUE, sep = "\t")
TS <- TS$Value
Regressor <- Regressor$Value
ts <- TS
observed <- TS
# s <- seq(from = 0, to = 4, by = 2)
# orders <- expand.grid(s, 0:2, s)
# order <- c(0,2,4)
# n <- order[1]+order[3]
# if (order[2] == 0){n <- n+1}
# pars <- vector(length = n)
# pars <- rnorm(n = n, mean = 0, sd = 1)
# coefficients = pars
# coefficients = pars
# output <- multistepARIMA(order, coefficients = pars, regressor??/)
#Problems with c(0,1,8)
# (0, 2, 8)
order <- c(1,1,1) ## add fourth dimention
model <- arima(ts, order = order, xreg = Regressor)
coefficients = model$coef
output <- multistepARIMA(order, coefficients = coefficients, external_regressor = TRUE)
mse_regressed <- mean(unlist(output$residuals)^2, na.rm = T)
model <- arima(ts, order = order)
coefficients = model$coef
output <- multistepARIMA(order, coefficients = coefficients, external_regressor = FALSE)
mse <- mean(unlist(output$residuals)^2, na.rm = T)
print(paste("MSE:", mse))
print(paste("MSE Regressed", mse_regressed))
# system.time({
# result <- optim(par = pars, fn = compute_arime_mse, order=order, method = "L-BFGS-B", control = list(maxit = 500))
# })
|
#!/usr/bin/env Rscript
#setwd('~/Documents/REU /Github Repo/General Solver')
names = c("VanDerPol","Flame","Pendulum")
length(names)
for(i in 1:length(names)){
fileName = paste(names[i],"AllTests.dat",sep = "")
outputName = paste("4_",names[i],"Plots.png",sep = "")
DataFlame <- read.csv(fileName, header = FALSE, sep = ' ', dec = '.')
adaptive1 = DataFlame[(DataFlame[1] == "Adaptive1"),]
adaptive2 = DataFlame[(DataFlame[1] == "Adaptive2"),]
adaptive3 = DataFlame[(DataFlame[1] == "Adaptive3"),]
png(outputName, width = 1254, height = 614)
x = 1.2;
minY = min(adaptive1[,3], adaptive2[,3])
maxY = max(adaptive1[,3], adaptive2[,3])
par(mfrow=c(1,2))
plot(adaptive1[,2],adaptive1[,3],main="Time vs Degree",ylab="Time(microseconds)",xlab="Degree", cex.lab = x,
col=rgb(0.2,0.4,0.1,0.7), ylim = c(minY,maxY))
points(adaptive2[,2],adaptive2[,3], pch = 2,col=rgb(0.8,0.4,0.1,0.7))
legend("topleft",
c("KR","Warne"),cex = x, pch = c(1,2),
col = c(rgb(0.2,0.4,0.1,0.7),col=rgb(0.8,0.4,0.1,0.7))
)
plot(adaptive1[,2],adaptive1[,4],main="Number of Steps vs Degree",ylab="Number of Steps",xlab="Degree", cex.lab = x,col=rgb(0.2,0.4,0.1,0.7))
points(adaptive2[,2],adaptive2[,4], pch = 2,col=rgb(0.8,0.4,0.1,0.7))
legend("topright",
c("KR","Warne"), pch = c(1,2), cex = x, col = c(rgb(0.2,0.4,0.1,0.7),col=rgb(0.8,0.4,0.1,0.7))
)
dev.off()
}
print("Finished")
|
/General Solver/Plots.R
|
permissive
|
KevinRojas1499-zz/jmu-reu-ode
|
R
| false
| false
| 1,459
|
r
|
#!/usr/bin/env Rscript
#setwd('~/Documents/REU /Github Repo/General Solver')
names = c("VanDerPol","Flame","Pendulum")
length(names)
for(i in 1:length(names)){
fileName = paste(names[i],"AllTests.dat",sep = "")
outputName = paste("4_",names[i],"Plots.png",sep = "")
DataFlame <- read.csv(fileName, header = FALSE, sep = ' ', dec = '.')
adaptive1 = DataFlame[(DataFlame[1] == "Adaptive1"),]
adaptive2 = DataFlame[(DataFlame[1] == "Adaptive2"),]
adaptive3 = DataFlame[(DataFlame[1] == "Adaptive3"),]
png(outputName, width = 1254, height = 614)
x = 1.2;
minY = min(adaptive1[,3], adaptive2[,3])
maxY = max(adaptive1[,3], adaptive2[,3])
par(mfrow=c(1,2))
plot(adaptive1[,2],adaptive1[,3],main="Time vs Degree",ylab="Time(microseconds)",xlab="Degree", cex.lab = x,
col=rgb(0.2,0.4,0.1,0.7), ylim = c(minY,maxY))
points(adaptive2[,2],adaptive2[,3], pch = 2,col=rgb(0.8,0.4,0.1,0.7))
legend("topleft",
c("KR","Warne"),cex = x, pch = c(1,2),
col = c(rgb(0.2,0.4,0.1,0.7),col=rgb(0.8,0.4,0.1,0.7))
)
plot(adaptive1[,2],adaptive1[,4],main="Number of Steps vs Degree",ylab="Number of Steps",xlab="Degree", cex.lab = x,col=rgb(0.2,0.4,0.1,0.7))
points(adaptive2[,2],adaptive2[,4], pch = 2,col=rgb(0.8,0.4,0.1,0.7))
legend("topright",
c("KR","Warne"), pch = c(1,2), cex = x, col = c(rgb(0.2,0.4,0.1,0.7),col=rgb(0.8,0.4,0.1,0.7))
)
dev.off()
}
print("Finished")
|
#data <- as.matrix(read.table("redirectsData",header=TRUE, sep=","))
data <- read.table("textwords",header=FALSE,sep=",")
barplot(as.matrix(data), main="Text Words", beside=TRUE,col="blue",ylab="Word Frequency",xlab="Word Rank")
|
/a3/documentation/text.R
|
permissive
|
jmcco018/cs851-s15
|
R
| false
| false
| 229
|
r
|
#data <- as.matrix(read.table("redirectsData",header=TRUE, sep=","))
data <- read.table("textwords",header=FALSE,sep=",")
barplot(as.matrix(data), main="Text Words", beside=TRUE,col="blue",ylab="Word Frequency",xlab="Word Rank")
|
\name{inventdummy}
\alias{invent.mxn}
\title{Functions to dummy datasets}
\description{
Functions to dummy data.
}
\usage{
invent.mxn(m,n=5,d=1,p,f2="random")
}
\arguments{
\item{m}{number of groups or samples}
\item{n}{number of observations in each sample}
\item{d}{digits for rounding the result}
\item{p}{mean and sd for random or min and max for step}
\item{f2}{select step or random}
}
\details{
Generates random or step
if(f2=="random") x[,i] <- rnorm(n,p[i,1],p[i,2])
if(f2=="step") x[,i] <- seq(p[i,1],p[i,2],(p[i,2]-p[i,1])/(n-1))
For one-way analysis, you can use function invent.mxn to generate different datasets and see how the results are less significant as you use means that are more similar and increase within sample standard deviation. For two-way analysis, you can use function invent.mxn to generate different datasets and see how the results are less significant as you cluster the groups and increase the "within" range.
}
\value{
\item{x}{generated data as a matrix}
}
\references{
Acevedo M.F. 2013. "Data Analysis and Statistics for Geography, Environmental Science, and Engineering", CRC Press.
}
\author{
Miguel F. Acevedo \email{Acevedo@unt.edu}
}
\note{
Input files are in 'datafiles.zip' in directory 'datafiles' and organized by chapters of Acevedo (2013).
}
\seealso{
\code{\link{rnorm}}, \code{\link{matrix}}, \code{\link{aov}}, \code{\link{factor}}, \code{\link{boxplot}}
}
\examples{
m<-4; n<-5
p <- matrix(c(30,1,32,1,34,1,38,1),byrow=TRUE,ncol=2)
Xr <- invent.mxn(m,n,d=1,p,f2="random")
y <- c(Xr)
f <- factor(rep(LETTERS[1:m], rep(n,m)))
f.y <- data.frame(f, y)
boxplot(y~f, data=f.y,ylab="y", xlab="f")
summary(aov(y~f, data=f.y))
}
\keyword{datagen}
\keyword{univar}
|
/man/invent.functions.Rd
|
no_license
|
cran/seeg
|
R
| false
| false
| 1,738
|
rd
|
\name{inventdummy}
\alias{invent.mxn}
\title{Functions to dummy datasets}
\description{
Functions to dummy data.
}
\usage{
invent.mxn(m,n=5,d=1,p,f2="random")
}
\arguments{
\item{m}{number of groups or samples}
\item{n}{number of observations in each sample}
\item{d}{digits for rounding the result}
\item{p}{mean and sd for random or min and max for step}
\item{f2}{select step or random}
}
\details{
Generates random or step
if(f2=="random") x[,i] <- rnorm(n,p[i,1],p[i,2])
if(f2=="step") x[,i] <- seq(p[i,1],p[i,2],(p[i,2]-p[i,1])/(n-1))
For one-way analysis, you can use function invent.mxn to generate different datasets and see how the results are less significant as you use means that are more similar and increase within sample standard deviation. For two-way analysis, you can use function invent.mxn to generate different datasets and see how the results are less significant as you cluster the groups and increase the "within" range.
}
\value{
\item{x}{generated data as a matrix}
}
\references{
Acevedo M.F. 2013. "Data Analysis and Statistics for Geography, Environmental Science, and Engineering", CRC Press.
}
\author{
Miguel F. Acevedo \email{Acevedo@unt.edu}
}
\note{
Input files are in 'datafiles.zip' in directory 'datafiles' and organized by chapters of Acevedo (2013).
}
\seealso{
\code{\link{rnorm}}, \code{\link{matrix}}, \code{\link{aov}}, \code{\link{factor}}, \code{\link{boxplot}}
}
\examples{
m<-4; n<-5
p <- matrix(c(30,1,32,1,34,1,38,1),byrow=TRUE,ncol=2)
Xr <- invent.mxn(m,n,d=1,p,f2="random")
y <- c(Xr)
f <- factor(rep(LETTERS[1:m], rep(n,m)))
f.y <- data.frame(f, y)
boxplot(y~f, data=f.y,ylab="y", xlab="f")
summary(aov(y~f, data=f.y))
}
\keyword{datagen}
\keyword{univar}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Race / ethnicity
# Starting in 2012, RACETHX replaced RACEX;
if(year >= 2012){
FYC <- FYC %>%
mutate(white_oth=F,
hisp = (RACETHX == 1),
white = (RACETHX == 2),
black = (RACETHX == 3),
native = (RACETHX > 3 & RACEV1X %in% c(3,6)),
asian = (RACETHX > 3 & RACEV1X %in% c(4,5)))
}else if(year >= 2002){
FYC <- FYC %>%
mutate(white_oth=0,
hisp = (RACETHNX == 1),
white = (RACETHNX == 4 & RACEX == 1),
black = (RACETHNX == 2),
native = (RACETHNX >= 3 & RACEX %in% c(3,6)),
asian = (RACETHNX >= 3 & RACEX %in% c(4,5)))
}else{
FYC <- FYC %>%
mutate(
hisp = (RACETHNX == 1),
black = (RACETHNX == 2),
white_oth = (RACETHNX == 3),
white = 0,native=0,asian=0)
}
FYC <- FYC %>% mutate(
race = 1*hisp + 2*white + 3*black + 4*native + 5*asian + 9*white_oth,
race = recode_factor(race, .default = "Missing", .missing = "Missing",
"1" = "Hispanic",
"2" = "White",
"3" = "Black",
"4" = "Amer. Indian, AK Native, or mult. races",
"5" = "Asian, Hawaiian, or Pacific Islander",
"9" = "White and other"))
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~(TOTEXP.yy. > 0), FUN = svymean, by = ~agegrps + race, design = FYCdsgn)
print(results)
|
/mepstrends/hc_use/json/code/r/pctEXP__agegrps__race__.r
|
permissive
|
RandomCriticalAnalysis/MEPS-summary-tables
|
R
| false
| false
| 2,893
|
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Race / ethnicity
# Starting in 2012, RACETHX replaced RACEX;
if(year >= 2012){
FYC <- FYC %>%
mutate(white_oth=F,
hisp = (RACETHX == 1),
white = (RACETHX == 2),
black = (RACETHX == 3),
native = (RACETHX > 3 & RACEV1X %in% c(3,6)),
asian = (RACETHX > 3 & RACEV1X %in% c(4,5)))
}else if(year >= 2002){
FYC <- FYC %>%
mutate(white_oth=0,
hisp = (RACETHNX == 1),
white = (RACETHNX == 4 & RACEX == 1),
black = (RACETHNX == 2),
native = (RACETHNX >= 3 & RACEX %in% c(3,6)),
asian = (RACETHNX >= 3 & RACEX %in% c(4,5)))
}else{
FYC <- FYC %>%
mutate(
hisp = (RACETHNX == 1),
black = (RACETHNX == 2),
white_oth = (RACETHNX == 3),
white = 0,native=0,asian=0)
}
FYC <- FYC %>% mutate(
race = 1*hisp + 2*white + 3*black + 4*native + 5*asian + 9*white_oth,
race = recode_factor(race, .default = "Missing", .missing = "Missing",
"1" = "Hispanic",
"2" = "White",
"3" = "Black",
"4" = "Amer. Indian, AK Native, or mult. races",
"5" = "Asian, Hawaiian, or Pacific Islander",
"9" = "White and other"))
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~(TOTEXP.yy. > 0), FUN = svymean, by = ~agegrps + race, design = FYCdsgn)
print(results)
|
### grab TROPOMI SIF downscaled by Alex Turner,
# written by DW, 10/24/2019
# based on Alex's latest SIF file, need to convert SIF from vectors to grids
# return a data.frame
# 02/04/2020, DW, calculate the mean SIF of multiple TSIF files if nhrs != NULL
grab.tsif <- function(tsif.path, timestr, minlon, maxlon, minlat, maxlat,
nhrs = NULL, form = c('data.frame', 'raster')[2]) {
library(ncdf4)
# select the ones that need for scaling GPP
tsif.files <- list.files(path = tsif.path, pattern = '.nc',
recursive = T, full.names = T)
# mask grids to SIF vectors
mask.file <- tsif.files[basename(tsif.files) == 'SIFgrid.nc']
tsif.files <- tsif.files[basename(tsif.files) != 'SIFgrid.nc']
cat('grab.tsif(): Read mask file...\n')
mask.dat <- nc_open(mask.file)
#tsif.grd <- ncvar_get(mask.dat, 'mask')
mask.lat <- ncvar_get(mask.dat, 'lat')
mask.lon <- ncvar_get(mask.dat, 'lon')
mask.id <- ncvar_get(mask.dat, 'id')
mask.grd <- expand.grid(lon = as.numeric(mask.lon), lat = as.numeric(mask.lat))
mask.grd$id <- 1:nrow(mask.grd)
sel.grd <- mask.grd %>% filter(id >= min(mask.id), id <= max(mask.id))
# select TROPOMI SIF files
tsif.timestr <- paste0(strsplit.to.df(gsub('.nc', '', basename(tsif.files)))$V3)
if (!is.null(nhrs)) { # if multiple nc files
time.info <- get.doy.from.timestr(paste0(timestr, '00'), nday = 1, nhrs = nhrs)
tsif.file <- tsif.files[tsif.timestr >= substr(time.info$min.timestr, 1, 8) &
tsif.timestr <= substr(time.info$max.timestr, 1, 8)]
} else tsif.file <- tsif.files[tsif.timestr == timestr]
# Start reading files
if (length(tsif.file) == 0) {
cat(paste('grab.tsif(): No downscaled TROPOMI SIF file found for', timestr,
', please check...\n'))
return(NULL)
} else {
# form SIF into data.frame
cat(paste('grab.tsif(): Need to process', length(tsif.file),
'file(s)...it takes a while\n'))
rds.file <- file.path(dirname(tsif.file[1]), 'TSIF_JJA_mean.rds')
if (length(tsif.file) == 92 & file.exists(rds.file)) {
tsif.mean.df <- readRDS(rds.file)
} else {
# calculate the sum
tsif.sum <- list(ncvar_get(nc_open(tsif.file[1]), 'SIF'))
if (length(tsif.file) > 1) {
for (t in 2 : length(tsif.file)) {
print(t)
tsif.tmp <- list(ncvar_get(nc_open(tsif.file[t]), 'SIF'))
tsif.sum <- mapply('+', tsif.sum, tsif.tmp, SIMPLIFY = FALSE)
} # end for t
# calculate the mean
cat('grab.tsif: Cal-ing the mean SIF from multiple files...\n')
tsif.mean <- as.numeric(unlist(tsif.sum)) / length(tsif.file)
} else tsif.mean = tsif.sum # end if
# add lat/lon id
tsif.mean.df <- data.frame(SIF = tsif.mean, id = as.numeric(mask.id))
if (length(tsif.file) == 92) saveRDS(tsif.mean.df, file = rds.file)
} # end if
# for debugging
# minlon = -125; maxlon = -120; minlat = 35; maxlat = 40
# minlon = -112.5; maxlon = -110.5; minlat = 39.5; maxlat = 41.5
# select spatial regions
cat('grab.tsif: forming TROPOMI SIF from vectors to grids...\n')
crop.grd <- sel.grd %>% filter(lon >= minlon, lon <= maxlon,
lat >= minlat, lat <= maxlat)
crop.tsif <- tsif.mean.df %>% filter(id >= min(crop.grd$id),
id <= max(crop.grd$id)) %>%
left_join(crop.grd, by = 'id') %>% na.omit()
if (form == 'raster') {
cat('grab.tsif: forming TROPOMI SIF from grids to rasterLayer...\n')
crop.tsif <- suppressMessages(df2raster(crop.tsif[, c('SIF', 'lon', 'lat')]))
#levelplot(tsif.rt, at = seq(0, 3, 0.1), main = timestr)
} # end if
return(crop.tsif)
} # end if
} # end of subroutine
|
/r/src/extract_load_data/grab.tsif.r
|
no_license
|
sabrinamadsen/SMUrF-1
|
R
| false
| false
| 4,051
|
r
|
### grab TROPOMI SIF downscaled by Alex Turner,
# written by DW, 10/24/2019
# based on Alex's latest SIF file, need to convert SIF from vectors to grids
# return a data.frame
# 02/04/2020, DW, calculate the mean SIF of multiple TSIF files if nhrs != NULL
grab.tsif <- function(tsif.path, timestr, minlon, maxlon, minlat, maxlat,
nhrs = NULL, form = c('data.frame', 'raster')[2]) {
library(ncdf4)
# select the ones that need for scaling GPP
tsif.files <- list.files(path = tsif.path, pattern = '.nc',
recursive = T, full.names = T)
# mask grids to SIF vectors
mask.file <- tsif.files[basename(tsif.files) == 'SIFgrid.nc']
tsif.files <- tsif.files[basename(tsif.files) != 'SIFgrid.nc']
cat('grab.tsif(): Read mask file...\n')
mask.dat <- nc_open(mask.file)
#tsif.grd <- ncvar_get(mask.dat, 'mask')
mask.lat <- ncvar_get(mask.dat, 'lat')
mask.lon <- ncvar_get(mask.dat, 'lon')
mask.id <- ncvar_get(mask.dat, 'id')
mask.grd <- expand.grid(lon = as.numeric(mask.lon), lat = as.numeric(mask.lat))
mask.grd$id <- 1:nrow(mask.grd)
sel.grd <- mask.grd %>% filter(id >= min(mask.id), id <= max(mask.id))
# select TROPOMI SIF files
tsif.timestr <- paste0(strsplit.to.df(gsub('.nc', '', basename(tsif.files)))$V3)
if (!is.null(nhrs)) { # if multiple nc files
time.info <- get.doy.from.timestr(paste0(timestr, '00'), nday = 1, nhrs = nhrs)
tsif.file <- tsif.files[tsif.timestr >= substr(time.info$min.timestr, 1, 8) &
tsif.timestr <= substr(time.info$max.timestr, 1, 8)]
} else tsif.file <- tsif.files[tsif.timestr == timestr]
# Start reading files
if (length(tsif.file) == 0) {
cat(paste('grab.tsif(): No downscaled TROPOMI SIF file found for', timestr,
', please check...\n'))
return(NULL)
} else {
# form SIF into data.frame
cat(paste('grab.tsif(): Need to process', length(tsif.file),
'file(s)...it takes a while\n'))
rds.file <- file.path(dirname(tsif.file[1]), 'TSIF_JJA_mean.rds')
if (length(tsif.file) == 92 & file.exists(rds.file)) {
tsif.mean.df <- readRDS(rds.file)
} else {
# calculate the sum
tsif.sum <- list(ncvar_get(nc_open(tsif.file[1]), 'SIF'))
if (length(tsif.file) > 1) {
for (t in 2 : length(tsif.file)) {
print(t)
tsif.tmp <- list(ncvar_get(nc_open(tsif.file[t]), 'SIF'))
tsif.sum <- mapply('+', tsif.sum, tsif.tmp, SIMPLIFY = FALSE)
} # end for t
# calculate the mean
cat('grab.tsif: Cal-ing the mean SIF from multiple files...\n')
tsif.mean <- as.numeric(unlist(tsif.sum)) / length(tsif.file)
} else tsif.mean = tsif.sum # end if
# add lat/lon id
tsif.mean.df <- data.frame(SIF = tsif.mean, id = as.numeric(mask.id))
if (length(tsif.file) == 92) saveRDS(tsif.mean.df, file = rds.file)
} # end if
# for debugging
# minlon = -125; maxlon = -120; minlat = 35; maxlat = 40
# minlon = -112.5; maxlon = -110.5; minlat = 39.5; maxlat = 41.5
# select spatial regions
cat('grab.tsif: forming TROPOMI SIF from vectors to grids...\n')
crop.grd <- sel.grd %>% filter(lon >= minlon, lon <= maxlon,
lat >= minlat, lat <= maxlat)
crop.tsif <- tsif.mean.df %>% filter(id >= min(crop.grd$id),
id <= max(crop.grd$id)) %>%
left_join(crop.grd, by = 'id') %>% na.omit()
if (form == 'raster') {
cat('grab.tsif: forming TROPOMI SIF from grids to rasterLayer...\n')
crop.tsif <- suppressMessages(df2raster(crop.tsif[, c('SIF', 'lon', 'lat')]))
#levelplot(tsif.rt, at = seq(0, 3, 0.1), main = timestr)
} # end if
return(crop.tsif)
} # end if
} # end of subroutine
|
library(glmnet)
library(doParallel)
#=========================================================================================
# 1. https://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html#lin (Official)
#=========================================================================================
###1. Linear Regression
#1.1 Gaussian Family
#1.2 Multiresponse Gaussian Family
load("E:/Program Files/R/R-3.5.3/library/glmnet/data/MultiGaussianExample.RData")
mfit = glmnet(x, y, family = "mgaussian")
plot(mfit, xvar = "lambda", label = TRUE, type.coef = "2norm")
predict(mfit, newx = x[1:5,], s = c(0.1, 0.01))
cvmfit = cv.glmnet(x, y, family = "mgaussian")
plot(cvmfit)
###2. Logistic Regression
#2.1 Binomial Models
load("E:/Program Files/R/R-3.5.3/library/glmnet/data/BinomialExample.RData")
fit = glmnet(x, y, family = "binomial")
plot(fit, xvar = "dev", label = TRUE)
predict(fit, newx = x[1:5,], type = "class", s = c(0.05, 0.01))
cvfit = cv.glmnet(x, y, family = "binomial", type.measure = "class")
plot(cvfit)
#2.2 Multinomial Models
###3. Poisson Models
load("E:/Program Files/R/R-3.5.3/library/glmnet/data/PoissonExample.RData")
fit = glmnet(x, y, family = "poisson")
plot(fit)
###4. Cox Models
load("E:/Program Files/R/R-3.5.3/library/glmnet/data/CoxExample.RData")
y[1:5,]
fit = glmnet(x, y, family = "cox")
plot(fit)
#2.5 Sparse Matrices
#=========================================================================================
# 2. https://www4.stat.ncsu.edu/~post/josh/LASSO_Ridge_Elastic_Net_-_Examples.html
#=========================================================================================
library(MASS) # Package needed to generate correlated precictors
library(glmnet) # Package to fit ridge/lasso/elastic net models
###1. Generate data
set.seed(19873)
n <- 100 # Number of observations
p <- 50 # Number of predictors included in model
CovMatrix <- outer(1:p, 1:p, function(x,y) {.7^abs(x-y)})
x <- mvrnorm(n, rep(0,p), CovMatrix)
y <- 10 * apply(x[, 1:2], 1, sum) +
5 * apply(x[, 3:4], 1, sum) +
apply(x[, 5:14], 1, sum) +
rnorm(n)
# Split data into train and test sets
train_rows <- sample(1:n, .66*n)
x.train <- x[train_rows, ]
x.test <- x[-train_rows, ]
y.train <- y[train_rows]
y.test <- y[-train_rows]
###2. Fit models
# (For plots on left):
fit.lasso <- glmnet(x.train, y.train, family="gaussian", alpha=1)
fit.ridge <- glmnet(x.train, y.train, family="gaussian", alpha=0)
fit.elnet <- glmnet(x.train, y.train, family="gaussian", alpha=.5)
# 10-fold Cross validation for each alpha = 0, 0.1, ... , 0.9, 1.0
# (For plots on Right)
for (i in 0:10) {
assign(paste("fit", i, sep=""), cv.glmnet(x.train, y.train, type.measure="mse",alpha=i/10,family="gaussian"))
}
###3. Plot solution path and cross-validated MSE as function of λ:
par(mfrow=c(3,2))
# For plotting options, type '?plot.glmnet' in R console
plot(fit.lasso, xvar="lambda")
plot(fit10, main="LASSO")
plot(fit.ridge, xvar="lambda")
plot(fit0, main="Ridge")
plot(fit.elnet, xvar="lambda")
plot(fit5, main="Elastic Net")
yhat0 <- predict(fit0, s=fit0$lambda.1se, newx=x.test)
coef(fit0,s=fit0$lambda.1se)
yhat1 <- predict(fit1, s=fit1$lambda.1se, newx=x.test)
yhat2 <- predict(fit2, s=fit2$lambda.1se, newx=x.test)
yhat3 <- predict(fit3, s=fit3$lambda.1se, newx=x.test)
yhat4 <- predict(fit4, s=fit4$lambda.1se, newx=x.test)
yhat5 <- predict(fit5, s=fit5$lambda.1se, newx=x.test)
yhat6 <- predict(fit6, s=fit6$lambda.1se, newx=x.test)
yhat7 <- predict(fit7, s=fit7$lambda.1se, newx=x.test)
yhat8 <- predict(fit8, s=fit8$lambda.1se, newx=x.test)
yhat9 <- predict(fit9, s=fit9$lambda.1se, newx=x.test)
yhat10 <- predict(fit10, s=fit10$lambda.1se, newx=x.test)
mse0 <- mean((y.test - yhat0)^2)
mse1 <- mean((y.test - yhat1)^2)
mse2 <- mean((y.test - yhat2)^2)
mse3 <- mean((y.test - yhat3)^2)
mse4 <- mean((y.test - yhat4)^2)
mse5 <- mean((y.test - yhat5)^2)
mse6 <- mean((y.test - yhat6)^2)
mse7 <- mean((y.test - yhat7)^2)
mse8 <- mean((y.test - yhat8)^2)
mse9 <- mean((y.test - yhat9)^2)
mse10 <- mean((y.test - yhat10)^2)
|
/R_machine_learning/glmnet_demo.R
|
permissive
|
HouyuZhang/Learn_R
|
R
| false
| false
| 4,054
|
r
|
library(glmnet)
library(doParallel)
#=========================================================================================
# 1. https://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html#lin (Official)
#=========================================================================================
###1. Linear Regression
#1.1 Gaussian Family
#1.2 Multiresponse Gaussian Family
load("E:/Program Files/R/R-3.5.3/library/glmnet/data/MultiGaussianExample.RData")
mfit = glmnet(x, y, family = "mgaussian")
plot(mfit, xvar = "lambda", label = TRUE, type.coef = "2norm")
predict(mfit, newx = x[1:5,], s = c(0.1, 0.01))
cvmfit = cv.glmnet(x, y, family = "mgaussian")
plot(cvmfit)
###2. Logistic Regression
#2.1 Binomial Models
load("E:/Program Files/R/R-3.5.3/library/glmnet/data/BinomialExample.RData")
fit = glmnet(x, y, family = "binomial")
plot(fit, xvar = "dev", label = TRUE)
predict(fit, newx = x[1:5,], type = "class", s = c(0.05, 0.01))
cvfit = cv.glmnet(x, y, family = "binomial", type.measure = "class")
plot(cvfit)
#2.2 Multinomial Models
###3. Poisson Models
load("E:/Program Files/R/R-3.5.3/library/glmnet/data/PoissonExample.RData")
fit = glmnet(x, y, family = "poisson")
plot(fit)
###4. Cox Models
load("E:/Program Files/R/R-3.5.3/library/glmnet/data/CoxExample.RData")
y[1:5,]
fit = glmnet(x, y, family = "cox")
plot(fit)
#2.5 Sparse Matrices
#=========================================================================================
# 2. https://www4.stat.ncsu.edu/~post/josh/LASSO_Ridge_Elastic_Net_-_Examples.html
#=========================================================================================
library(MASS) # Package needed to generate correlated precictors
library(glmnet) # Package to fit ridge/lasso/elastic net models
###1. Generate data
set.seed(19873)
n <- 100 # Number of observations
p <- 50 # Number of predictors included in model
CovMatrix <- outer(1:p, 1:p, function(x,y) {.7^abs(x-y)})
x <- mvrnorm(n, rep(0,p), CovMatrix)
y <- 10 * apply(x[, 1:2], 1, sum) +
5 * apply(x[, 3:4], 1, sum) +
apply(x[, 5:14], 1, sum) +
rnorm(n)
# Split data into train and test sets
train_rows <- sample(1:n, .66*n)
x.train <- x[train_rows, ]
x.test <- x[-train_rows, ]
y.train <- y[train_rows]
y.test <- y[-train_rows]
###2. Fit models
# (For plots on left):
fit.lasso <- glmnet(x.train, y.train, family="gaussian", alpha=1)
fit.ridge <- glmnet(x.train, y.train, family="gaussian", alpha=0)
fit.elnet <- glmnet(x.train, y.train, family="gaussian", alpha=.5)
# 10-fold Cross validation for each alpha = 0, 0.1, ... , 0.9, 1.0
# (For plots on Right)
for (i in 0:10) {
assign(paste("fit", i, sep=""), cv.glmnet(x.train, y.train, type.measure="mse",alpha=i/10,family="gaussian"))
}
###3. Plot solution path and cross-validated MSE as function of λ:
par(mfrow=c(3,2))
# For plotting options, type '?plot.glmnet' in R console
plot(fit.lasso, xvar="lambda")
plot(fit10, main="LASSO")
plot(fit.ridge, xvar="lambda")
plot(fit0, main="Ridge")
plot(fit.elnet, xvar="lambda")
plot(fit5, main="Elastic Net")
yhat0 <- predict(fit0, s=fit0$lambda.1se, newx=x.test)
coef(fit0,s=fit0$lambda.1se)
yhat1 <- predict(fit1, s=fit1$lambda.1se, newx=x.test)
yhat2 <- predict(fit2, s=fit2$lambda.1se, newx=x.test)
yhat3 <- predict(fit3, s=fit3$lambda.1se, newx=x.test)
yhat4 <- predict(fit4, s=fit4$lambda.1se, newx=x.test)
yhat5 <- predict(fit5, s=fit5$lambda.1se, newx=x.test)
yhat6 <- predict(fit6, s=fit6$lambda.1se, newx=x.test)
yhat7 <- predict(fit7, s=fit7$lambda.1se, newx=x.test)
yhat8 <- predict(fit8, s=fit8$lambda.1se, newx=x.test)
yhat9 <- predict(fit9, s=fit9$lambda.1se, newx=x.test)
yhat10 <- predict(fit10, s=fit10$lambda.1se, newx=x.test)
mse0 <- mean((y.test - yhat0)^2)
mse1 <- mean((y.test - yhat1)^2)
mse2 <- mean((y.test - yhat2)^2)
mse3 <- mean((y.test - yhat3)^2)
mse4 <- mean((y.test - yhat4)^2)
mse5 <- mean((y.test - yhat5)^2)
mse6 <- mean((y.test - yhat6)^2)
mse7 <- mean((y.test - yhat7)^2)
mse8 <- mean((y.test - yhat8)^2)
mse9 <- mean((y.test - yhat9)^2)
mse10 <- mean((y.test - yhat10)^2)
|
library(SpatialVx)
### Name: upscale2d
### Title: Upscaling Neighborhood Verification on a 2-d Verification Set
### Aliases: upscale2d plot.upscale2d print.upscale2d
### Keywords: math
### ** Examples
x <- matrix( 0, 50, 50)
x[ sample(1:50,10), sample(1:50,10)] <- rexp( 100, 0.25)
y <- kernel2dsmooth( x, kernel.type="disk", r=6.5)
x <- kernel2dsmooth( x, kernel.type="gauss", nx=50, ny=50, sigma=3.5)
hold <- make.SpatialVx( x, y, thresholds = seq(0.01,1,,5), field.type = "random")
look <- upscale2d( hold, levels=c(1, 3, 20) )
look
par( mfrow = c(4, 2 ) )
plot( look )
## Not run:
##D data( "geom001" )
##D data( "geom000" )
##D data( "ICPg240Locs" )
##D
##D hold <- make.SpatialVx( geom000, geom001, thresholds = c(0.01, 50.01),
##D loc = ICPg240Locs, projection = TRUE, map = TRUE, loc.byrow = TRUE,
##D field.type = "Precipitation", units = "mm/h",
##D data.name = "Geometric", obs.name = "geom000", model.name = "geom001" )
##D
##D look <- upscale2d(hold, levels=c(1, 3, 9, 17, 33, 65, 129, 257),
##D verbose=TRUE)
##D
##D par( mfrow = c(4, 2 ) )
##D
##D plot(look )
##D look <- upscale2d(hold, q.gt.zero=TRUE, verbose=TRUE)
##D plot(look)
##D look <- upscale2d(hold, verbose=TRUE)
##D plot(look)
##D
## End(Not run)
|
/data/genthat_extracted_code/SpatialVx/examples/upscale2d.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,258
|
r
|
library(SpatialVx)
### Name: upscale2d
### Title: Upscaling Neighborhood Verification on a 2-d Verification Set
### Aliases: upscale2d plot.upscale2d print.upscale2d
### Keywords: math
### ** Examples
x <- matrix( 0, 50, 50)
x[ sample(1:50,10), sample(1:50,10)] <- rexp( 100, 0.25)
y <- kernel2dsmooth( x, kernel.type="disk", r=6.5)
x <- kernel2dsmooth( x, kernel.type="gauss", nx=50, ny=50, sigma=3.5)
hold <- make.SpatialVx( x, y, thresholds = seq(0.01,1,,5), field.type = "random")
look <- upscale2d( hold, levels=c(1, 3, 20) )
look
par( mfrow = c(4, 2 ) )
plot( look )
## Not run:
##D data( "geom001" )
##D data( "geom000" )
##D data( "ICPg240Locs" )
##D
##D hold <- make.SpatialVx( geom000, geom001, thresholds = c(0.01, 50.01),
##D loc = ICPg240Locs, projection = TRUE, map = TRUE, loc.byrow = TRUE,
##D field.type = "Precipitation", units = "mm/h",
##D data.name = "Geometric", obs.name = "geom000", model.name = "geom001" )
##D
##D look <- upscale2d(hold, levels=c(1, 3, 9, 17, 33, 65, 129, 257),
##D verbose=TRUE)
##D
##D par( mfrow = c(4, 2 ) )
##D
##D plot(look )
##D look <- upscale2d(hold, q.gt.zero=TRUE, verbose=TRUE)
##D plot(look)
##D look <- upscale2d(hold, verbose=TRUE)
##D plot(look)
##D
## End(Not run)
|
# Prepare MEE data for the example_LG1 algo
#
#
#
require(FinDataWeb)
require(RQuantLib)
require(xts)
load("H:/Temporary/Data/IB/MEE.RData")
load("H:/Temporary/Data/IB/MEEGD.RData")
hdata <- xts(cbind(MEE, MEEGD))
# get the implied vol, so I can calculate delta
din <- data.frame(type="CALL", price=as.numeric(hdata$Close.MEEGD),
S=as.numeric(hdata$Close.MEE),
Tex=as.numeric((ISOdatetime(2009,07,19,16,0,0) - index(hdata))/365),
K=20, r=0.0016, index=index(hdata))
din <- na.omit(din)
IV <- impliedvol(din)
options <- NULL
options$calculate <- "DELTA"
res <- greeksEU(IV$S, IV$IVol, IV$Tex, IV$K, IV$r, IV$type, options)
res <- xts(data.frame(IVol=res$sigma, delta=res$DELTA), din$index)
hdata <- cbind(hdata, res)
colnames(hdata) <- gsub("MEE$", "stock", colnames(hdata))
colnames(hdata) <- gsub("MEEGD$", "call", colnames(hdata))
save(list=c("hdata"),
file="h:/user/R/Adrian/algotrader/data/hdata_LG1.RData")
##########################################################################
## EuropeanOptionImpliedVolatility("call", 5.70, 24.45,
## strike=20, dividendYield=0, riskFreeRate=0.0016, maturity=0.10719,
## volatility=1)
|
/R/finance/algotrader/tests/prepare_LG1_data.R
|
no_license
|
thumbert/rascal
|
R
| false
| false
| 1,204
|
r
|
# Prepare MEE data for the example_LG1 algo
#
#
#
require(FinDataWeb)
require(RQuantLib)
require(xts)
load("H:/Temporary/Data/IB/MEE.RData")
load("H:/Temporary/Data/IB/MEEGD.RData")
hdata <- xts(cbind(MEE, MEEGD))
# get the implied vol, so I can calculate delta
din <- data.frame(type="CALL", price=as.numeric(hdata$Close.MEEGD),
S=as.numeric(hdata$Close.MEE),
Tex=as.numeric((ISOdatetime(2009,07,19,16,0,0) - index(hdata))/365),
K=20, r=0.0016, index=index(hdata))
din <- na.omit(din)
IV <- impliedvol(din)
options <- NULL
options$calculate <- "DELTA"
res <- greeksEU(IV$S, IV$IVol, IV$Tex, IV$K, IV$r, IV$type, options)
res <- xts(data.frame(IVol=res$sigma, delta=res$DELTA), din$index)
hdata <- cbind(hdata, res)
colnames(hdata) <- gsub("MEE$", "stock", colnames(hdata))
colnames(hdata) <- gsub("MEEGD$", "call", colnames(hdata))
save(list=c("hdata"),
file="h:/user/R/Adrian/algotrader/data/hdata_LG1.RData")
##########################################################################
## EuropeanOptionImpliedVolatility("call", 5.70, 24.45,
## strike=20, dividendYield=0, riskFreeRate=0.0016, maturity=0.10719,
## volatility=1)
|
#NOTE remember to use stable ordering everywhere
CHURNRATE.PERIOD <- 28
FILTERING.SCRIPT <- "churnrate_filter.pl"
FILTERED.FILENAME <- "attempts_filtered.txt"
FILTERED.TEMP.FILENAME <- "attempts_filtered_temp.txt"
attempts <- read.csv("attempts.txt")
users <- unique(sort(attempts$user))
##users.total <- length(users)
users.by.problem <- table(unique(attempts[, c("user", "problem")])$problem)
attempts$submission.timestamp <- NULL
attempts <- attempts[order(attempts$user, attempts$problem,
attempts$attempt.timestamp), ]
attempts$date <- as.Date(as.POSIXct(attempts$attempt.timestamp,
origin="1970-01-01"))
attempts$attempt.timestamp <- NULL
write.table(attempts, FILTERED.TEMP.FILENAME, sep="\t",
row.names=F, quote=F)
system(paste("perl", FILTERING.SCRIPT,
"<", FILTERED.TEMP.FILENAME,
">", FILTERED.FILENAME))
file.remove(FILTERED.TEMP.FILENAME)
last.fails.daily <- read.table(FILTERED.FILENAME, header=T)
last.fails.daily$result <- NULL
last.fails.daily <- last.fails.daily[order(last.fails.daily$date,
last.fails.daily$user), ]
last.fails.daily <- last.fails.daily[!duplicated(last.fails.daily[c("user",
"date")],
fromLast=TRUE), ]
last.fails.daily$date <- as.Date(last.fails.daily$date)
last.date.seen <- last.fails.daily$date[nrow(last.fails.daily)]
churnrate.fails <- data.frame()
#NOTE couldn't avoid using loop here
for (user in users) {
curr.fails <- rbind(last.fails.daily[last.fails.daily$user == user, ], NA)
curr.fails$date[nrow(curr.fails)] <- last.date.seen
churnrate.fails <- rbind(churnrate.fails,
curr.fails[c(diff(curr.fails$date) >=
CHURNRATE.PERIOD,
FALSE), ])
}
churnrate.fails <- churnrate.fails[churnrate.fails$last.failed != -1, ]
##NOTE mostly useless
##sort(table(churnrate.fails$user))
#TODO(roman): rewrite R-style
tmp.drops <- table(churnrate.fails$last.failed)
drops.by.problem <- data.frame(problem=character(length(tmp.drops)),
users=numeric(length(tmp.drops)),
drops=numeric(length(tmp.drops)),
drops.share=numeric(length(tmp.drops)))
drops.by.problem$problem <- names(tmp.drops)
drops.by.problem$drops <- as.integer(tmp.drops)
for (row in 1:nrow(drops.by.problem)) {
problem <- drops.by.problem$problem[row]
drops.by.problem$drops.share[row] <-
drops.by.problem$drops[row] / users.by.problem[problem]
drops.by.problem$users[row] <- as.integer(users.by.problem[problem])
}
drops.by.problem <- drops.by.problem[order(drops.by.problem$drops.share), ]
row.names(drops.by.problem) <- NULL
|
/churnrate_test.R
|
no_license
|
bioinf/edm
|
R
| false
| false
| 2,911
|
r
|
#NOTE remember to use stable ordering everywhere
CHURNRATE.PERIOD <- 28
FILTERING.SCRIPT <- "churnrate_filter.pl"
FILTERED.FILENAME <- "attempts_filtered.txt"
FILTERED.TEMP.FILENAME <- "attempts_filtered_temp.txt"
attempts <- read.csv("attempts.txt")
users <- unique(sort(attempts$user))
##users.total <- length(users)
users.by.problem <- table(unique(attempts[, c("user", "problem")])$problem)
attempts$submission.timestamp <- NULL
attempts <- attempts[order(attempts$user, attempts$problem,
attempts$attempt.timestamp), ]
attempts$date <- as.Date(as.POSIXct(attempts$attempt.timestamp,
origin="1970-01-01"))
attempts$attempt.timestamp <- NULL
write.table(attempts, FILTERED.TEMP.FILENAME, sep="\t",
row.names=F, quote=F)
system(paste("perl", FILTERING.SCRIPT,
"<", FILTERED.TEMP.FILENAME,
">", FILTERED.FILENAME))
file.remove(FILTERED.TEMP.FILENAME)
last.fails.daily <- read.table(FILTERED.FILENAME, header=T)
last.fails.daily$result <- NULL
last.fails.daily <- last.fails.daily[order(last.fails.daily$date,
last.fails.daily$user), ]
last.fails.daily <- last.fails.daily[!duplicated(last.fails.daily[c("user",
"date")],
fromLast=TRUE), ]
last.fails.daily$date <- as.Date(last.fails.daily$date)
last.date.seen <- last.fails.daily$date[nrow(last.fails.daily)]
churnrate.fails <- data.frame()
#NOTE couldn't avoid using loop here
for (user in users) {
curr.fails <- rbind(last.fails.daily[last.fails.daily$user == user, ], NA)
curr.fails$date[nrow(curr.fails)] <- last.date.seen
churnrate.fails <- rbind(churnrate.fails,
curr.fails[c(diff(curr.fails$date) >=
CHURNRATE.PERIOD,
FALSE), ])
}
churnrate.fails <- churnrate.fails[churnrate.fails$last.failed != -1, ]
##NOTE mostly useless
##sort(table(churnrate.fails$user))
#TODO(roman): rewrite R-style
tmp.drops <- table(churnrate.fails$last.failed)
drops.by.problem <- data.frame(problem=character(length(tmp.drops)),
users=numeric(length(tmp.drops)),
drops=numeric(length(tmp.drops)),
drops.share=numeric(length(tmp.drops)))
drops.by.problem$problem <- names(tmp.drops)
drops.by.problem$drops <- as.integer(tmp.drops)
for (row in 1:nrow(drops.by.problem)) {
problem <- drops.by.problem$problem[row]
drops.by.problem$drops.share[row] <-
drops.by.problem$drops[row] / users.by.problem[problem]
drops.by.problem$users[row] <- as.integer(users.by.problem[problem])
}
drops.by.problem <- drops.by.problem[order(drops.by.problem$drops.share), ]
row.names(drops.by.problem) <- NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biummisc.R
\docType{package}
\name{BIUMmisc}
\alias{BIUMmisc}
\alias{BIUMmisc-package}
\title{BIUMmisc}
\description{
This is a collection of functions and scripts commonly used by the
BIUM-MZ Core Facility
}
\author{
Federico Marini
}
|
/man/BIUMmisc.Rd
|
permissive
|
imbeimainz/BIUMmisc
|
R
| false
| true
| 314
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biummisc.R
\docType{package}
\name{BIUMmisc}
\alias{BIUMmisc}
\alias{BIUMmisc-package}
\title{BIUMmisc}
\description{
This is a collection of functions and scripts commonly used by the
BIUM-MZ Core Facility
}
\author{
Federico Marini
}
|
test_that("prelims_finals works", {
file <-
system.file("extdata", "BigTen_WSWIM_2018.pdf", package = "SwimmeR")
BigTenRaw <- read_results(file)
BigTen <- swim_parse(
BigTenRaw,
typo = c(
# "^\\s{1,}\\*",
# "^\\s{1,}(\\d{1,2})\\s{2,}",
# not sure if needed
",\\s{1,}University\\s{1,}of",
"University\\s{1,}of\\s{1,}",
"\\s{1,}University"
# "SR\\s{2,}",
# "JR\\s{2,}",
# "SO\\s{2,}",
# "FR\\s{2,}"
),
replacement = c(
# " ",
# " \\1 ",
"", "", ""
# "SR ",
# "JR ",
# "SO ",
# "FR "
),
avoid = c("B1G", "Pool")
)
BigTen <- BigTen %>%
dplyr::filter(
stringr::str_detect(Event, "Time Trial") == FALSE,
stringr::str_detect(Event, "Swim-off") == FALSE
) %>%
dplyr::mutate(Team = dplyr::case_when(Team == "Wisconsin, Madi" ~ "Wisconsin",
TRUE ~ Team))
# begin results_score portion
df <- BigTen %>%
results_score(
events = unique(BigTen$Event),
meet_type = "prelims_finals",
lanes = 8,
scoring_heats = 3,
point_values = c(
32, 28, 27, 26, 25, 24, 23, 22, 20, 17, 16, 15, 14, 13, 12, 11, 9, 7, 6, 5, 4, 3, 2, 1
)
)
Total <- df %>%
dplyr::group_by(Team) %>%
dplyr::summarise(Score = sum(Points, na.rm = TRUE)) %>%
dplyr::arrange(dplyr::desc(Score)) %>%
dplyr::ungroup() %>%
dplyr::summarize(total = sum(Score)) # should total to 8596
expect_equal(Total$total[1], 8596)
})
test_that("timed_finals works", {
results <- readRDS(system.file("extdata", "TX_OH_Results.rds", package = "SwimmeR"))
results <- results %>%
rename("Team" = School) %>%
mutate(DQ = 0,
Exhibition = 0)
Results_Final <-
results_score(
results = results,
events = unique(results$Event),
meet_type = "timed_finals",
lanes = 8,
scoring_heats = 2,
point_values = c(20, 17, 16, 15, 14, 13, 12, 11, 9, 7, 6, 5, 4, 3, 2, 1)
)
Scores <- Results_Final %>%
group_by(State) %>%
summarise(Score = sum(Points))
expect_equal(Scores$Score[1], 2155.5)
})
# test_file("tests/testthat/test-results_score_works.R")
|
/tests/testthat/test-results_score.R
|
no_license
|
hareshsuppiah/SwimmeR-1
|
R
| false
| false
| 2,340
|
r
|
test_that("prelims_finals works", {
file <-
system.file("extdata", "BigTen_WSWIM_2018.pdf", package = "SwimmeR")
BigTenRaw <- read_results(file)
BigTen <- swim_parse(
BigTenRaw,
typo = c(
# "^\\s{1,}\\*",
# "^\\s{1,}(\\d{1,2})\\s{2,}",
# not sure if needed
",\\s{1,}University\\s{1,}of",
"University\\s{1,}of\\s{1,}",
"\\s{1,}University"
# "SR\\s{2,}",
# "JR\\s{2,}",
# "SO\\s{2,}",
# "FR\\s{2,}"
),
replacement = c(
# " ",
# " \\1 ",
"", "", ""
# "SR ",
# "JR ",
# "SO ",
# "FR "
),
avoid = c("B1G", "Pool")
)
BigTen <- BigTen %>%
dplyr::filter(
stringr::str_detect(Event, "Time Trial") == FALSE,
stringr::str_detect(Event, "Swim-off") == FALSE
) %>%
dplyr::mutate(Team = dplyr::case_when(Team == "Wisconsin, Madi" ~ "Wisconsin",
TRUE ~ Team))
# begin results_score portion
df <- BigTen %>%
results_score(
events = unique(BigTen$Event),
meet_type = "prelims_finals",
lanes = 8,
scoring_heats = 3,
point_values = c(
32, 28, 27, 26, 25, 24, 23, 22, 20, 17, 16, 15, 14, 13, 12, 11, 9, 7, 6, 5, 4, 3, 2, 1
)
)
Total <- df %>%
dplyr::group_by(Team) %>%
dplyr::summarise(Score = sum(Points, na.rm = TRUE)) %>%
dplyr::arrange(dplyr::desc(Score)) %>%
dplyr::ungroup() %>%
dplyr::summarize(total = sum(Score)) # should total to 8596
expect_equal(Total$total[1], 8596)
})
test_that("timed_finals works", {
results <- readRDS(system.file("extdata", "TX_OH_Results.rds", package = "SwimmeR"))
results <- results %>%
rename("Team" = School) %>%
mutate(DQ = 0,
Exhibition = 0)
Results_Final <-
results_score(
results = results,
events = unique(results$Event),
meet_type = "timed_finals",
lanes = 8,
scoring_heats = 2,
point_values = c(20, 17, 16, 15, 14, 13, 12, 11, 9, 7, 6, 5, 4, 3, 2, 1)
)
Scores <- Results_Final %>%
group_by(State) %>%
summarise(Score = sum(Points))
expect_equal(Scores$Score[1], 2155.5)
})
# test_file("tests/testthat/test-results_score_works.R")
|
#' Applies rrollup function
#'
#' This function applies the rrollup method to a pepData object for each unique protein and returns a proData object.
#'
#' @param pepData an omicsData object of class 'pepData'
#' @param combine_fn logical indicating what combine_fn to use, defaults to median, other option is mean
#' @param parallel logical indicating whether or not to use "doParallel" loop in applying rrollup function. Defaults to TRUE.
#'
#' @return an omicsData object of class 'proData'
#'
#' @details In the rrollup method, peptides are scaled based on a reference peptide and protein abundance is set as the mean of these scaled peptides.
#'
#' @references Matzke, M. M., Brown, J. N., Gritsenko, M. A., Metz, T. O., Pounds, J. G., Rodland, K. D., ... Webb-Robertson, B.-J. (2013). \emph{A comparative analysis of computational approaches to relative protein quantification using peptide peak intensities in label-free LC-MS proteomics experiments}. Proteomics, 13(0), 493-503. Polpitiya, A. D., Qian, W.-J., Jaitly, N., Petyuk, V. A., Adkins, J. N., Camp, D. G., ... Smith, R. D. (2008). \emph{DAnTE: a statistical tool for quantitative analysis of -omics data}. Bioinformatics (Oxford, England), 24(13), 1556-1558.
#'
#' @examples
#' dontrun{
#' library(pmartRdata)
#' data(pep_object)
#' result = rrollup(pepData = pep_object)
#'}
#'
#' @rdname rrollup
#'
rrollup<- function(pepData, combine_fn = "median", parallel = TRUE){
check_names = getchecknames(pepData)
# check that pepData is of appropraite class #
if(!inherits(pepData, "pepData")) stop("pepData is not an object of the appropriate class")
# check that a protein mapping is provided #
if(is.null(pepData$e_meta)){
stop("A mapping to proteins must be provided in order to use the protein_filter function.")
}
#check that combine_fn is one of 'mean', 'median'
if(!(combine_fn %in% c('median', 'mean'))) stop("combine_fn has to be one of 'mean' or 'median'")
pep_id = attr(pepData, "cnames")$edata_cname
pro_id = attr(pepData, "cnames")$emeta_cname
pep = data.table::data.table(pepData$e_data)
pro = data.table::data.table(pepData$e_meta[,c(pep_id, pro_id)])
temp = merge(x = pro, y = pep, by = pep_id, all.x = F, all.y = T)
temp = as.data.frame(temp, check.names=check_names)[,-which(names(temp)==pep_id)]
#pull protein column from temp and apply unique function
unique_proteins<- unique(temp[[pro_id]])
#assigning function to chosen_combine_fn
if(combine_fn == "median"){
chosen_combine_fn<- combine_fn_median
}else{chosen_combine_fn = combine_fn_mean}
if(parallel == TRUE){
final_list<- vector("list", length(unique_proteins))
cores<- parallel::detectCores()
cl<- parallel::makeCluster(cores - 1)
doParallel::registerDoParallel(cl)
r<-foreach::foreach(i=1:length(unique_proteins))%dopar%{
row_ind<- which(temp[ ,pro_id] == unique_proteins[i])
current_subset<- temp[row_ind,]
current_subset<- current_subset[,-which(names(temp) == pro_id)]
#### Perform R_Rollup ####
## store number of peptides ##
num_peps = nrow(current_subset)
res = matrix(NA, nrow = 1, ncol = ncol(current_subset))
## if only 1 peptide, set the protein value to the peptide ##
if(num_peps==1){
protein_val = unlist(current_subset)
}else{
## Step 1: Select Reference Peptide -- peptide with least amount of missing data ##
na.cnt = apply(is.na(current_subset),1,sum)
least.na = which(na.cnt == min(na.cnt))
## If tied, select one with highest median abundance##
if(length(least.na)>1){
mds = apply(current_subset,1,median,na.rm=T)[least.na]
least.na = least.na[which(mds==max(mds))]
}
prot_val = unlist(current_subset[least.na,])
## Step 2: Ratio all peptides to the reference. Since the data is on the log scale, this is the difference ##
scaling_factor = apply(matrix(prot_val, nrow = num_peps, ncol = ncol(current_subset), byrow=T) - current_subset,1,median,na.rm=T)
## Step 3: Use the median of the ratio as a scaling factor for each peptide ##
x_scaled = current_subset + matrix(scaling_factor, nrow = num_peps, ncol = ncol(current_subset))
## Step 4: Set Abundance as Median Peptide Abundance ##
protein_val = apply(x_scaled, 2, chosen_combine_fn)
}
res[1,] = protein_val
res<- data.frame(res)
names(res)<- names(current_subset)
final_list[[i]]<- res
}
parallel::stopCluster(cl)
final_result<- do.call(rbind, r)
final_result<- cbind(unique_proteins, final_result)
names(final_result)[1]<-pro_id
samp_id = attr(pepData, "cnames")$fdata_cname
data_scale = attr(pepData, "data_info")$data_scale
is_normalized = attr(pepData, "data_info")$norm_info$is_normalized
#subsetting pepData$e_meta by 'unique_proteins'
emeta_indices<- match(unique_proteins, pepData$e_meta[[pro_id]])
if(ncol(pepData$e_meta) == 2){
e_meta = as.data.frame(pepData$e_meta[emeta_indices, -which(names(pepData$e_meta)==pep_id)])
names(e_meta)<-pro_id
}else {e_meta = pepData$e_meta[emeta_indices, -which(names(pepData$e_meta)==pep_id)]}
prodata = as.proData(e_data = data.frame(final_result, check.names=check_names), f_data = pepData$f_data, e_meta = e_meta ,edata_cname = pro_id, fdata_cname = samp_id, emeta_cname = pro_id, data_scale = data_scale, is_normalized = is_normalized, check.names = check_names)
#check for isobaricpepData class
if(inherits(pepData, "isobaricpepData")){
#update attributes in prodata
attr(prodata, "isobaric_info") = attr(pepData, "isobaric_info")
attr(prodata, "isobaric_info")$norm_info$is_normalized = attr(pepData, "isobaric_info")$norm_info$is_normalized
}
#updating prodata attributes
attr(prodata, "data_info")$norm_info = attr(pepData, "data_info")$norm_info
attr(prodata, "data_info")$data_types = attr(pepData, "data_info")$data_types
attr(prodata, "data_info")$norm_method = attr(pepData, "data_info")$norm_method
attr(prodata, "filters")<- attr(pepData, "filters")
attr(prodata, "group_DF")<- attr(pepData, "group_DF")
attr(prodata, "imdanova")<- attr(pepData, "imdanova")
}
#applying rrollup without doParallel
else{
final_list<- vector("list", length(unique_proteins))
for(i in 1:length(unique_proteins)){
row_ind<- which(temp[ ,pro_id] == unique_proteins[i])
current_subset<- temp[row_ind,]
current_subset<- current_subset[,-which(names(temp) == pro_id)]
#### Perform R_Rollup ####
## store number of peptides ##
num_peps = nrow(current_subset)
res = matrix(NA, nrow = 1, ncol = ncol(current_subset))
## if only 1 peptide, set the protein value to the peptide ##
if(num_peps==1){
protein_val = unlist(current_subset)
}else{
## Step 1: Select Reference Peptide -- peptide with least amount of missing data ##
na.cnt = apply(is.na(current_subset),1,sum)
least.na = which(na.cnt == min(na.cnt))
## If tied, select one with highest median abundance##
if(length(least.na)>1){
mds = apply(current_subset,1,median,na.rm=T)[least.na]
least.na = least.na[which(mds==max(mds))]
}
prot_val = unlist(current_subset[least.na,])
## Step 2: Ratio all peptides to the reference. Since the data is on the log scale, this is the difference ##
scaling_factor = apply(matrix(prot_val, nrow = num_peps, ncol = ncol(current_subset), byrow=T) - current_subset,1,median,na.rm=T)
## Step 3: Use the median of the ratio as a scaling factor for each peptide ##
x_scaled = current_subset + matrix(scaling_factor, nrow = num_peps, ncol = ncol(current_subset))
## Step 4: Set Abundance as Median Peptide Abundance ##
protein_val = apply(x_scaled, 2, chosen_combine_fn)
}
res[1,] = protein_val
res<- data.frame(res)
names(res)<- names(current_subset)
final_list[[i]]<- res
}
final_result<- do.call(rbind, final_list)
final_result<- cbind(unique_proteins, final_result)
names(final_result)[1]<-pro_id
samp_id = attr(pepData, "cnames")$fdata_cname
data_scale = attr(pepData, "data_info")$data_scale
is_normalized = attr(pepData, "data_info")$norm_info$is_normalized
#subsetting pepData$e_meta by 'unique_proteins'
emeta_indices<- match(unique_proteins, pepData$e_meta[[pro_id]])
if(ncol(pepData$e_meta) == 2){
e_meta = as.data.frame(pepData$e_meta[emeta_indices, -which(names(pepData$e_meta)==pep_id)])
names(e_meta)<-pro_id
}else {e_meta = pepData$e_meta[emeta_indices, -which(names(pepData$e_meta)==pep_id)]}
prodata = as.proData(e_data = data.frame(final_result, check.names=check_names), f_data = pepData$f_data, e_meta = e_meta ,edata_cname = pro_id, fdata_cname = samp_id, emeta_cname = pro_id, data_scale = data_scale, is_normalized = is_normalized, check.names = check_names)
#check for isobaricpepData class
if(inherits(pepData, "isobaricpepData")){
#update attributes in prodata
attr(prodata, "isobaric_info") = attr(pepData, "isobaric_info")
attr(prodata, "isobaric_info")$norm_info$is_normalized = attr(pepData, "isobaric_info")$norm_info$is_normalized
}
#updating prodata attributes
attr(prodata, "data_info")$norm_info = attr(pepData, "data_info")$norm_info
attr(prodata, "data_info")$data_types = attr(pepData, "data_info")$data_types
attr(prodata, "data_info")$norm_method = attr(pepData, "data_info")$norm_method
attr(prodata, "filters")<- attr(pepData, "filters")
attr(prodata, "group_DF")<- attr(pepData, "group_DF")
attr(prodata, "imdanova")<- attr(pepData, "imdanova")
}
return(prodata)
}
|
/R/rrollup.R
|
permissive
|
rarichardson92/pmartR
|
R
| false
| false
| 10,112
|
r
|
#' Applies rrollup function
#'
#' This function applies the rrollup method to a pepData object for each unique protein and returns a proData object.
#'
#' @param pepData an omicsData object of class 'pepData'
#' @param combine_fn logical indicating what combine_fn to use, defaults to median, other option is mean
#' @param parallel logical indicating whether or not to use "doParallel" loop in applying rrollup function. Defaults to TRUE.
#'
#' @return an omicsData object of class 'proData'
#'
#' @details In the rrollup method, peptides are scaled based on a reference peptide and protein abundance is set as the mean of these scaled peptides.
#'
#' @references Matzke, M. M., Brown, J. N., Gritsenko, M. A., Metz, T. O., Pounds, J. G., Rodland, K. D., ... Webb-Robertson, B.-J. (2013). \emph{A comparative analysis of computational approaches to relative protein quantification using peptide peak intensities in label-free LC-MS proteomics experiments}. Proteomics, 13(0), 493-503. Polpitiya, A. D., Qian, W.-J., Jaitly, N., Petyuk, V. A., Adkins, J. N., Camp, D. G., ... Smith, R. D. (2008). \emph{DAnTE: a statistical tool for quantitative analysis of -omics data}. Bioinformatics (Oxford, England), 24(13), 1556-1558.
#'
#' @examples
#' dontrun{
#' library(pmartRdata)
#' data(pep_object)
#' result = rrollup(pepData = pep_object)
#'}
#'
#' @rdname rrollup
#'
rrollup<- function(pepData, combine_fn = "median", parallel = TRUE){
check_names = getchecknames(pepData)
# check that pepData is of appropraite class #
if(!inherits(pepData, "pepData")) stop("pepData is not an object of the appropriate class")
# check that a protein mapping is provided #
if(is.null(pepData$e_meta)){
stop("A mapping to proteins must be provided in order to use the protein_filter function.")
}
#check that combine_fn is one of 'mean', 'median'
if(!(combine_fn %in% c('median', 'mean'))) stop("combine_fn has to be one of 'mean' or 'median'")
pep_id = attr(pepData, "cnames")$edata_cname
pro_id = attr(pepData, "cnames")$emeta_cname
pep = data.table::data.table(pepData$e_data)
pro = data.table::data.table(pepData$e_meta[,c(pep_id, pro_id)])
temp = merge(x = pro, y = pep, by = pep_id, all.x = F, all.y = T)
temp = as.data.frame(temp, check.names=check_names)[,-which(names(temp)==pep_id)]
#pull protein column from temp and apply unique function
unique_proteins<- unique(temp[[pro_id]])
#assigning function to chosen_combine_fn
if(combine_fn == "median"){
chosen_combine_fn<- combine_fn_median
}else{chosen_combine_fn = combine_fn_mean}
if(parallel == TRUE){
final_list<- vector("list", length(unique_proteins))
cores<- parallel::detectCores()
cl<- parallel::makeCluster(cores - 1)
doParallel::registerDoParallel(cl)
r<-foreach::foreach(i=1:length(unique_proteins))%dopar%{
row_ind<- which(temp[ ,pro_id] == unique_proteins[i])
current_subset<- temp[row_ind,]
current_subset<- current_subset[,-which(names(temp) == pro_id)]
#### Perform R_Rollup ####
## store number of peptides ##
num_peps = nrow(current_subset)
res = matrix(NA, nrow = 1, ncol = ncol(current_subset))
## if only 1 peptide, set the protein value to the peptide ##
if(num_peps==1){
protein_val = unlist(current_subset)
}else{
## Step 1: Select Reference Peptide -- peptide with least amount of missing data ##
na.cnt = apply(is.na(current_subset),1,sum)
least.na = which(na.cnt == min(na.cnt))
## If tied, select one with highest median abundance##
if(length(least.na)>1){
mds = apply(current_subset,1,median,na.rm=T)[least.na]
least.na = least.na[which(mds==max(mds))]
}
prot_val = unlist(current_subset[least.na,])
## Step 2: Ratio all peptides to the reference. Since the data is on the log scale, this is the difference ##
scaling_factor = apply(matrix(prot_val, nrow = num_peps, ncol = ncol(current_subset), byrow=T) - current_subset,1,median,na.rm=T)
## Step 3: Use the median of the ratio as a scaling factor for each peptide ##
x_scaled = current_subset + matrix(scaling_factor, nrow = num_peps, ncol = ncol(current_subset))
## Step 4: Set Abundance as Median Peptide Abundance ##
protein_val = apply(x_scaled, 2, chosen_combine_fn)
}
res[1,] = protein_val
res<- data.frame(res)
names(res)<- names(current_subset)
final_list[[i]]<- res
}
parallel::stopCluster(cl)
final_result<- do.call(rbind, r)
final_result<- cbind(unique_proteins, final_result)
names(final_result)[1]<-pro_id
samp_id = attr(pepData, "cnames")$fdata_cname
data_scale = attr(pepData, "data_info")$data_scale
is_normalized = attr(pepData, "data_info")$norm_info$is_normalized
#subsetting pepData$e_meta by 'unique_proteins'
emeta_indices<- match(unique_proteins, pepData$e_meta[[pro_id]])
if(ncol(pepData$e_meta) == 2){
e_meta = as.data.frame(pepData$e_meta[emeta_indices, -which(names(pepData$e_meta)==pep_id)])
names(e_meta)<-pro_id
}else {e_meta = pepData$e_meta[emeta_indices, -which(names(pepData$e_meta)==pep_id)]}
prodata = as.proData(e_data = data.frame(final_result, check.names=check_names), f_data = pepData$f_data, e_meta = e_meta ,edata_cname = pro_id, fdata_cname = samp_id, emeta_cname = pro_id, data_scale = data_scale, is_normalized = is_normalized, check.names = check_names)
#check for isobaricpepData class
if(inherits(pepData, "isobaricpepData")){
#update attributes in prodata
attr(prodata, "isobaric_info") = attr(pepData, "isobaric_info")
attr(prodata, "isobaric_info")$norm_info$is_normalized = attr(pepData, "isobaric_info")$norm_info$is_normalized
}
#updating prodata attributes
attr(prodata, "data_info")$norm_info = attr(pepData, "data_info")$norm_info
attr(prodata, "data_info")$data_types = attr(pepData, "data_info")$data_types
attr(prodata, "data_info")$norm_method = attr(pepData, "data_info")$norm_method
attr(prodata, "filters")<- attr(pepData, "filters")
attr(prodata, "group_DF")<- attr(pepData, "group_DF")
attr(prodata, "imdanova")<- attr(pepData, "imdanova")
}
#applying rrollup without doParallel
else{
final_list<- vector("list", length(unique_proteins))
for(i in 1:length(unique_proteins)){
row_ind<- which(temp[ ,pro_id] == unique_proteins[i])
current_subset<- temp[row_ind,]
current_subset<- current_subset[,-which(names(temp) == pro_id)]
#### Perform R_Rollup ####
## store number of peptides ##
num_peps = nrow(current_subset)
res = matrix(NA, nrow = 1, ncol = ncol(current_subset))
## if only 1 peptide, set the protein value to the peptide ##
if(num_peps==1){
protein_val = unlist(current_subset)
}else{
## Step 1: Select Reference Peptide -- peptide with least amount of missing data ##
na.cnt = apply(is.na(current_subset),1,sum)
least.na = which(na.cnt == min(na.cnt))
## If tied, select one with highest median abundance##
if(length(least.na)>1){
mds = apply(current_subset,1,median,na.rm=T)[least.na]
least.na = least.na[which(mds==max(mds))]
}
prot_val = unlist(current_subset[least.na,])
## Step 2: Ratio all peptides to the reference. Since the data is on the log scale, this is the difference ##
scaling_factor = apply(matrix(prot_val, nrow = num_peps, ncol = ncol(current_subset), byrow=T) - current_subset,1,median,na.rm=T)
## Step 3: Use the median of the ratio as a scaling factor for each peptide ##
x_scaled = current_subset + matrix(scaling_factor, nrow = num_peps, ncol = ncol(current_subset))
## Step 4: Set Abundance as Median Peptide Abundance ##
protein_val = apply(x_scaled, 2, chosen_combine_fn)
}
res[1,] = protein_val
res<- data.frame(res)
names(res)<- names(current_subset)
final_list[[i]]<- res
}
final_result<- do.call(rbind, final_list)
final_result<- cbind(unique_proteins, final_result)
names(final_result)[1]<-pro_id
samp_id = attr(pepData, "cnames")$fdata_cname
data_scale = attr(pepData, "data_info")$data_scale
is_normalized = attr(pepData, "data_info")$norm_info$is_normalized
#subsetting pepData$e_meta by 'unique_proteins'
emeta_indices<- match(unique_proteins, pepData$e_meta[[pro_id]])
if(ncol(pepData$e_meta) == 2){
e_meta = as.data.frame(pepData$e_meta[emeta_indices, -which(names(pepData$e_meta)==pep_id)])
names(e_meta)<-pro_id
}else {e_meta = pepData$e_meta[emeta_indices, -which(names(pepData$e_meta)==pep_id)]}
prodata = as.proData(e_data = data.frame(final_result, check.names=check_names), f_data = pepData$f_data, e_meta = e_meta ,edata_cname = pro_id, fdata_cname = samp_id, emeta_cname = pro_id, data_scale = data_scale, is_normalized = is_normalized, check.names = check_names)
#check for isobaricpepData class
if(inherits(pepData, "isobaricpepData")){
#update attributes in prodata
attr(prodata, "isobaric_info") = attr(pepData, "isobaric_info")
attr(prodata, "isobaric_info")$norm_info$is_normalized = attr(pepData, "isobaric_info")$norm_info$is_normalized
}
#updating prodata attributes
attr(prodata, "data_info")$norm_info = attr(pepData, "data_info")$norm_info
attr(prodata, "data_info")$data_types = attr(pepData, "data_info")$data_types
attr(prodata, "data_info")$norm_method = attr(pepData, "data_info")$norm_method
attr(prodata, "filters")<- attr(pepData, "filters")
attr(prodata, "group_DF")<- attr(pepData, "group_DF")
attr(prodata, "imdanova")<- attr(pepData, "imdanova")
}
return(prodata)
}
|
testlist <- list(Beta = 0, CAL = numeric(0), CVLinf = 0, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), nage = 0L, nlen = 0L, pars = c(3.97874210805989e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rLens = numeric(0))
result <- do.call(DLMtool:::LBSPRopt,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRopt/AFL_LBSPRopt/LBSPRopt_valgrind_files/1615837966-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 406
|
r
|
testlist <- list(Beta = 0, CAL = numeric(0), CVLinf = 0, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), nage = 0L, nlen = 0L, pars = c(3.97874210805989e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rLens = numeric(0))
result <- do.call(DLMtool:::LBSPRopt,testlist)
str(result)
|
`stepacross` <-
function (dis, path = "shortest", toolong = 1, trace = TRUE, ...)
{
path <- match.arg(path, c("shortest", "extended"))
if (!inherits(dis, "dist"))
dis <- as.dist(dis)
oldatt <- attributes(dis)
n <- attr(dis, "Size")
if (path == "shortest")
dis <- .C(dykstrapath, dist = as.double(dis), n = as.integer(n),
as.double(toolong), as.integer(trace),
out = double(length(dis)), NAOK = TRUE)$out
else dis <- .C(C_stepacross, dis = as.double(dis), as.integer(n),
as.double(toolong), as.integer(trace), NAOK = TRUE)$dis
attributes(dis) <- oldatt
attr(dis, "method") <- paste(attr(dis, "method"), path)
dis
}
|
/R/stepacross.R
|
no_license
|
psolymos/vegan
|
R
| false
| false
| 725
|
r
|
`stepacross` <-
function (dis, path = "shortest", toolong = 1, trace = TRUE, ...)
{
path <- match.arg(path, c("shortest", "extended"))
if (!inherits(dis, "dist"))
dis <- as.dist(dis)
oldatt <- attributes(dis)
n <- attr(dis, "Size")
if (path == "shortest")
dis <- .C(dykstrapath, dist = as.double(dis), n = as.integer(n),
as.double(toolong), as.integer(trace),
out = double(length(dis)), NAOK = TRUE)$out
else dis <- .C(C_stepacross, dis = as.double(dis), as.integer(n),
as.double(toolong), as.integer(trace), NAOK = TRUE)$dis
attributes(dis) <- oldatt
attr(dis, "method") <- paste(attr(dis, "method"), path)
dis
}
|
# Calculo de Promedio de Grupos para salida de RNA
# retorna una vector con los promedio de los grupos de 100 iteraciones
prom_clases<-function(y,target)
{
yc<-y-matrix(rep(colMeans(y),nrow(y)),ncol=ncol(y),byrow=TRUE)
salida<-as.matrix(yc)%*%carga
return(salida)
}
|
/src/AppBundle/R/calculo promedio grupos kmeans.r
|
permissive
|
armandojg12/sistemaPrediccion
|
R
| false
| false
| 265
|
r
|
# Calculo de Promedio de Grupos para salida de RNA
# retorna una vector con los promedio de los grupos de 100 iteraciones
prom_clases<-function(y,target)
{
yc<-y-matrix(rep(colMeans(y),nrow(y)),ncol=ncol(y),byrow=TRUE)
salida<-as.matrix(yc)%*%carga
return(salida)
}
|
rm(list = ls())
# install.packages("readxl", dependencies = T)
library(readxl)
## Set working directory
# getwd() set to the project file
setwd(paste(getwd(), "/Inference/EmployeeRetention", sep = ""))
## Import data and create binary classes for target variables
data <- read_excel("../DataRepo.xlsx", "EmployeeRetenion")
alpha <- 0.05
## Null Hypothesis:
## Mean years at PLE of male employees equal to mean years at PLE of female employees
## Create vectors of data to compare
years.m <- subset(data, Gender=="M")[ ,1]
years.f <- subset(data, Gender=="F")[ ,1]
## Compute T-stat paramters
n <- length(years.m) # Sample size
mu.m <- mean(years.m) # Sample mean
mu.f <- mean(years.f) # Hypothesized mean
s.m <- sd(years.m) # Sample StDev
## T-Stats
t.gender <- (mu.m - mu.f) / (s.m / sqrt(n))
t.half.alpha <- qt(1 - alpha / 2, df = n - 1)
## Test Hypothesis with Critical Values and P-Value
criticalVals <- c(-t.half.alpha, t.half.alpha)
pVal.gender <- 2 * pt(t.gender, df = n - 1)
## Should we Reject?
t.gender < criticalVals[1] || t.gender > criticalVals[2] # TRUE
pVal.gender <= alpha # TRUE
## Conclusion:
## Both solutions verify that the null hypothesis cannot be rejected
|
/Inference/EmployeeRetention/genderYearsPLE.R
|
no_license
|
yalotfi/Inference-and-Regression
|
R
| false
| false
| 1,197
|
r
|
rm(list = ls())
# install.packages("readxl", dependencies = T)
library(readxl)
## Set working directory
# getwd() set to the project file
setwd(paste(getwd(), "/Inference/EmployeeRetention", sep = ""))
## Import data and create binary classes for target variables
data <- read_excel("../DataRepo.xlsx", "EmployeeRetenion")
alpha <- 0.05
## Null Hypothesis:
## Mean years at PLE of male employees equal to mean years at PLE of female employees
## Create vectors of data to compare
years.m <- subset(data, Gender=="M")[ ,1]
years.f <- subset(data, Gender=="F")[ ,1]
## Compute T-stat paramters
n <- length(years.m) # Sample size
mu.m <- mean(years.m) # Sample mean
mu.f <- mean(years.f) # Hypothesized mean
s.m <- sd(years.m) # Sample StDev
## T-Stats
t.gender <- (mu.m - mu.f) / (s.m / sqrt(n))
t.half.alpha <- qt(1 - alpha / 2, df = n - 1)
## Test Hypothesis with Critical Values and P-Value
criticalVals <- c(-t.half.alpha, t.half.alpha)
pVal.gender <- 2 * pt(t.gender, df = n - 1)
## Should we Reject?
t.gender < criticalVals[1] || t.gender > criticalVals[2] # TRUE
pVal.gender <= alpha # TRUE
## Conclusion:
## Both solutions verify that the null hypothesis cannot be rejected
|
library(shiny)
library(shiny)
library(dplyr)
library(ggplot2)
library(ROCR)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
utility <- reactive ({
# loop across thresholds and create utility curve
out.list <- list()
for(i in 1:length(dummy$temp)){
x <- c()
y <- c()
x <- dummy$temp[i]
dummy$flag <- ifelse(dummy$temp > x,1,0)
y <- (sum(dummy$flag == 1 & dummy$stat == 1) * input$truePos) +
(sum(dummy$flag == 1 & dummy$stat == 0) * input$falsePos) +
(sum(dummy$flag == 0 & dummy$stat == 0) * input$trueNeg) +
(sum(dummy$flag == 0 & dummy$stat ==1 ) * input$falseNeg)
xy <- c(x,y)
out.list[[i]] <- xy
}
utility <- data.frame(do.call(rbind,out.list))
colnames(utility) <- c('temperature.thresh','utility.score')
utility
})
# create utility plot
output$utilityPlot <-renderPlot({
ggplot(data = utility(), aes(x = temperature.thresh, y = utility.score)) +
geom_line()+
geom_vline(xintercept = input$thresh)
})
# create ROC curve
output$rocPlot <- renderPlot({
pred <- prediction(dummy$temp, dummy$stat)
perf <- performance(pred,"tpr","fpr")
index <- which.min(abs(perf@alpha.values[[1]] - input$thresh))
x.index <- perf@x.values[[1]][index]
y.index <- perf@y.values[[1]][index]
plot(perf) +
abline(0,1) +
points(x = x.index, y = y.index, col ='red', pch = 19)
})
})
|
/thresholdUtilityApp/server.R
|
no_license
|
zmwm37/dataVizPractice
|
R
| false
| false
| 1,475
|
r
|
library(shiny)
library(shiny)
library(dplyr)
library(ggplot2)
library(ROCR)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
utility <- reactive ({
# loop across thresholds and create utility curve
out.list <- list()
for(i in 1:length(dummy$temp)){
x <- c()
y <- c()
x <- dummy$temp[i]
dummy$flag <- ifelse(dummy$temp > x,1,0)
y <- (sum(dummy$flag == 1 & dummy$stat == 1) * input$truePos) +
(sum(dummy$flag == 1 & dummy$stat == 0) * input$falsePos) +
(sum(dummy$flag == 0 & dummy$stat == 0) * input$trueNeg) +
(sum(dummy$flag == 0 & dummy$stat ==1 ) * input$falseNeg)
xy <- c(x,y)
out.list[[i]] <- xy
}
utility <- data.frame(do.call(rbind,out.list))
colnames(utility) <- c('temperature.thresh','utility.score')
utility
})
# create utility plot
output$utilityPlot <-renderPlot({
ggplot(data = utility(), aes(x = temperature.thresh, y = utility.score)) +
geom_line()+
geom_vline(xintercept = input$thresh)
})
# create ROC curve
output$rocPlot <- renderPlot({
pred <- prediction(dummy$temp, dummy$stat)
perf <- performance(pred,"tpr","fpr")
index <- which.min(abs(perf@alpha.values[[1]] - input$thresh))
x.index <- perf@x.values[[1]][index]
y.index <- perf@y.values[[1]][index]
plot(perf) +
abline(0,1) +
points(x = x.index, y = y.index, col ='red', pch = 19)
})
})
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/SSMethod.Cond.TA1.8.R
\name{SSMethod.Cond.TA1.8}
\alias{SSMethod.Cond.TA1.8}
\title{Apply Francis composition weighting method TA1.8 for conditional age-at-length fits}
\usage{
SSMethod.Cond.TA1.8(fit, fleet, part = 0:2, seas = NULL, plotit = TRUE,
maxpanel = 1000, FullDiagOut = FALSE)
}
\arguments{
\item{fit}{Stock Synthesis output as read by r4SS function SS_output}
\item{fleet}{vector of one or more fleet numbers whose data are to
be analysed simultaneously (the output N multiplier applies
to all fleets combined)}
\item{part}{vector of one or more partition values; analysis is restricted
to composition data with one of these partition values.
Default is to include all partition values (0, 1, 2).}
\item{seas}{string indicating how to treat data from multiple seasons
'comb' - combine seasonal data for each year and plot against Yr
'sep' - treat seasons separately, plotting against Yr.S
If is.null(seas) it is assumed that there is only one season in
the selected data (a warning is output if this is not true) and
option 'comb' is used.}
\item{plotit}{if TRUE, make an illustrative plot like one or more
panels of Fig. 4 in Francis (2011).}
\item{maxpanel}{maximum number of panels within a plot}
\item{FullDiagOut}{Print full diagnostics?}
}
\description{
Uses an extension of method TA1.8 (described in Appendix A of Francis 2011)
to do stage-2 weighting of conditional age at length composition data from a
Stock Synthesis model. Outputs two versions (A and B) of a mutiplier, \emph{w},
(with bootstrap 95\% confidence intervals) so that
\emph{N2i} = \emph{w} x \emph{N1i},
where \emph{N1i} and \emph{N2i} are the stage-1 and stage-2 multinomial
sample sizes for the \emph{i}th composition. Optionally makes a plot
(for version A) of observed and expected mean ages, with two alternative
sets of confidence limits - based on \emph{N1i} (thin lines) and \emph{N2i}
(thick lines) - for the observed values.\cr
\cr
The two versions of w differ according to whether the calculated mean ages are
indexed by year (version A) or by year and length bin (version B). Version A is
recommended; version B is included for historical reasons.\cr
\cr
CAUTIONARY/EXPLANATORY NOTE. The large number of options available in SS makes it
very difficult to be sure that what this function does is appropriate for all
combinations of options. The following notes (for version A) might help anyone
wanting to check or correct the code.
\enumerate{
\item The code first removes un-needed rows
from database condbase.
\item The remaining rows of the database are grouped
(indexed by vector indx) and relevant statistics (e.g., observed and expected
mean age), and ancillary data, are calculated for each group (these are stored
in pldat - one row per group).
\item If the data are to be plotted they are further
grouped by fleet, with one panel of the plot per fleet.
\item A single multiplier, \emph{w}, is calculated to apply to all the
selected data.
}
}
\author{
Chris Francis, Andre Punt, Ian Taylor
}
\references{
Francis, R.I.C.C. (2011). Data weighting in statistical
fisheries stock assessment models. Canadian Journal of
Fisheries and Aquatic Sciences 68: 1124-1138.
}
\seealso{
\code{\link{SSMethod.TA1.8}}
}
|
/man/SSMethod.Cond.TA1.8.Rd
|
no_license
|
tennma/r4ss
|
R
| false
| false
| 3,317
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/SSMethod.Cond.TA1.8.R
\name{SSMethod.Cond.TA1.8}
\alias{SSMethod.Cond.TA1.8}
\title{Apply Francis composition weighting method TA1.8 for conditional age-at-length fits}
\usage{
SSMethod.Cond.TA1.8(fit, fleet, part = 0:2, seas = NULL, plotit = TRUE,
maxpanel = 1000, FullDiagOut = FALSE)
}
\arguments{
\item{fit}{Stock Synthesis output as read by r4SS function SS_output}
\item{fleet}{vector of one or more fleet numbers whose data are to
be analysed simultaneously (the output N multiplier applies
to all fleets combined)}
\item{part}{vector of one or more partition values; analysis is restricted
to composition data with one of these partition values.
Default is to include all partition values (0, 1, 2).}
\item{seas}{string indicating how to treat data from multiple seasons
'comb' - combine seasonal data for each year and plot against Yr
'sep' - treat seasons separately, plotting against Yr.S
If is.null(seas) it is assumed that there is only one season in
the selected data (a warning is output if this is not true) and
option 'comb' is used.}
\item{plotit}{if TRUE, make an illustrative plot like one or more
panels of Fig. 4 in Francis (2011).}
\item{maxpanel}{maximum number of panels within a plot}
\item{FullDiagOut}{Print full diagnostics?}
}
\description{
Uses an extension of method TA1.8 (described in Appendix A of Francis 2011)
to do stage-2 weighting of conditional age at length composition data from a
Stock Synthesis model. Outputs two versions (A and B) of a mutiplier, \emph{w},
(with bootstrap 95\% confidence intervals) so that
\emph{N2i} = \emph{w} x \emph{N1i},
where \emph{N1i} and \emph{N2i} are the stage-1 and stage-2 multinomial
sample sizes for the \emph{i}th composition. Optionally makes a plot
(for version A) of observed and expected mean ages, with two alternative
sets of confidence limits - based on \emph{N1i} (thin lines) and \emph{N2i}
(thick lines) - for the observed values.\cr
\cr
The two versions of w differ according to whether the calculated mean ages are
indexed by year (version A) or by year and length bin (version B). Version A is
recommended; version B is included for historical reasons.\cr
\cr
CAUTIONARY/EXPLANATORY NOTE. The large number of options available in SS makes it
very difficult to be sure that what this function does is appropriate for all
combinations of options. The following notes (for version A) might help anyone
wanting to check or correct the code.
\enumerate{
\item The code first removes un-needed rows
from database condbase.
\item The remaining rows of the database are grouped
(indexed by vector indx) and relevant statistics (e.g., observed and expected
mean age), and ancillary data, are calculated for each group (these are stored
in pldat - one row per group).
\item If the data are to be plotted they are further
grouped by fleet, with one panel of the plot per fleet.
\item A single multiplier, \emph{w}, is calculated to apply to all the
selected data.
}
}
\author{
Chris Francis, Andre Punt, Ian Taylor
}
\references{
Francis, R.I.C.C. (2011). Data weighting in statistical
fisheries stock assessment models. Canadian Journal of
Fisheries and Aquatic Sciences 68: 1124-1138.
}
\seealso{
\code{\link{SSMethod.TA1.8}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/file_managment.R
\name{activeteam2dcf}
\alias{activeteam2dcf}
\title{Create slackr dcf file}
\usage{
activeteam2dcf(file = "~/.slackr", verbose = TRUE)
}
\arguments{
\item{file}{character, path to write the dcf file to, Default: '~/.slackr'}
\item{verbose}{logical, Print messages to console, Default: TRUE}
}
\description{
Convert the active team to a slackr compatible dcf file on the local system
}
\details{
If the file is "" then the output will be printed to the console
}
\concept{files}
|
/man/activeteam2dcf.Rd
|
permissive
|
kabhatia7/slackteams
|
R
| false
| true
| 574
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/file_managment.R
\name{activeteam2dcf}
\alias{activeteam2dcf}
\title{Create slackr dcf file}
\usage{
activeteam2dcf(file = "~/.slackr", verbose = TRUE)
}
\arguments{
\item{file}{character, path to write the dcf file to, Default: '~/.slackr'}
\item{verbose}{logical, Print messages to console, Default: TRUE}
}
\description{
Convert the active team to a slackr compatible dcf file on the local system
}
\details{
If the file is "" then the output will be printed to the console
}
\concept{files}
|
## load data
trainData <- read.csv('../data/eBayiPadTrain.csv', stringsAsFactors=FALSE)
testData <- read.csv('../data/eBayiPadTest.csv', stringsAsFactors=FALSE)
trainData$sold <- as.factor(trainData$sold)
data <- rbind(c(trainData$description, testData$description))
###
library(tm)
library(SnowballC)
# Create corpus
corpus = Corpus(VectorSource(data))
# Convert to lower-case
corpus = tm_map(corpus, tolower)
print(corpus[[1]])
corpus = tm_map(corpus, PlainTextDocument)
print(corpus[[1]])
corpus = tm_map(corpus, removePunctuation)
print(corpus[[1]])
corpus = tm_map(corpus, removeNumbers)
print(corpus[[1]])
corpus = tm_map(corpus, removeWords, c("sold", "ipad", "appl",
stopwords("english")))
print(corpus[[1]])
corpus = tm_map(corpus, stemDocument)
print(corpus[[1]])
# Create matrix
frequencies = DocumentTermMatrix(corpus)
findFreqTerms(frequencies, lowfreq=10)
# Remove sparse terms
sparse <- removeSparseTerms(frequencies, 0.995)
# Convert to a data frame
termSparse = as.data.frame(as.matrix(sparse))
# Make all variable names R-friendly
colnames(termSparse) = make.names(colnames(termSparse))
# Add dependent variable
test <- termSparse[1862:2659,]
termSparse <- termSparse[1:1861,]
termSparse$sold <- as.factor(trainData$sold)
# Split the data
library(caTools)
set.seed(144)
split = sample.split(termSparse$sold, SplitRatio = 0.8)
trainSparse = subset(termSparse, split==TRUE)
cvSparse = subset(termSparse, split==FALSE)
### RANDOM FOREST MODEL ###
library(randomForest)
ntree <- 400
nodesize <- 2
# tuneRF(trainSparse[,!colnames(trainSparse) %in% c("sold") ],
# trainSparse$sold,ntreeTry=ntree, stepFactor=1.5,
# improve=0.05, trace=TRUE, plot=TRUE, doBest=FALSE,
# nodesize=nodesize)
ebayRandomForestModel <- randomForest(sold ~ .,
ntree=ntree,
mtry = 12,
nodesize=nodesize,
data=trainSparse)
plot(ebayRandomForestModel)
# plot ROC
library(ROCR)
predROCR <- prediction(predict(ebayRandomForestModel, type="prob", newdata=cvSparse)[,2],
cvSparse$sold)
perfROCR <- performance(predROCR, "tpr", "fpr")
plot(perfROCR, colorize=TRUE, print.cutoffs.at=seq(0,1,0.1),
text.adj=c(-0.2, 1.7), main="ROC curve for random-forest model on CV set")
# Compute Accuracy
print("RF cross-validation train data Accuracy: ")
print(mean(performance(predROCR, "acc")@y.values[[1]]))
# Compute Accuracy
print("RF cross-validation train data Sensitivity (true posititive rate): ")
print(mean(performance(predROCR, "sens")@y.values[[1]]))
# Compute Accuracy
print("RF cross-validation train data Specificity (true negative rate): ")
print(mean(performance(predROCR, "spec")@y.values[[1]]))
# Compute AUC
print("RF cross-validation train data AUC: ")
print(performance(predROCR, "auc")@y.values[[1]])
predROCR <- prediction(predict(ebayRandomForestModel, type="prob", newdata=trainSparse)[,2],
trainSparse$sold)
print("RF train data AUC: ")
print(performance(predROCR, "auc")@y.values[[1]])
### CART MODEL ###
library(rpart)
library(rpart.plot)
# choose complexity parameter
library(caret)
library(e1071)
# Define cross-validation experiment
numFolds = trainControl(method = "cv", number = 10)
cpGrid = expand.grid(.cp = seq(0.0001,0.008,0.0001))
# Perform the cross validation
# ebayTreeCV <- train(sold ~ .,
# data = trainSparse,
# method = "rpart",
# trControl = numFolds,
# tuneGrid = cpGrid )
ebayTreeModel <- rpart(sold ~ . ,
method="class",
#cp = 0.0057001,
cp = 4e-04,
data=trainSparse)
prp(ebayTreeModel)
# plot ROC
library(ROCR)
predROCR <- prediction(predict(ebayTreeModel, type="prob", newdata=trainSparse)[,2],
trainSparse$sold)
perfROCR <- performance(predROCR, "tpr", "fpr")
plot(perfROCR, colorize=TRUE, print.cutoffs.at=seq(0,1,0.1),
text.adj=c(-0.2, 1.7), main="CART train data ROC")
# Compute Accuracy
print("CART train data Accuracy: ")
print(mean(performance(predROCR, "acc")@y.values[[1]]))
# Compute Accuracy
print("CART train data Sensitivity (true posititive rate): ")
print(mean(performance(predROCR, "sens")@y.values[[1]]))
# Compute Accuracy
print("CART train data Specificity (true negative rate): ")
print(mean(performance(predROCR, "spec")@y.values[[1]]))
# Compute AUC
print("CART train data AUC: ")
print(performance(predROCR, "auc")@y.values[[1]])
cvROCR <- prediction(predict(ebayTreeModel, type="prob", newdata = cvSparse)[,2],
cvSparse$sold)
print("CART cross-validation train data AUC: ")
print(performance(cvROCR, "auc")@y.values[[1]])
perfROCR <- performance(cvROCR, "tpr", "fpr")
plot(perfROCR, colorize=TRUE, print.cutoffs.at=seq(0,1,0.1),
text.adj=c(-0.2, 1.7),
main="ROC curve for CART model on CV set of descriptions")
|
/src/language-model.R
|
permissive
|
poletaev/kaggle-15-071x-the-analytics-edge-summer-2015
|
R
| false
| false
| 5,080
|
r
|
## load data
trainData <- read.csv('../data/eBayiPadTrain.csv', stringsAsFactors=FALSE)
testData <- read.csv('../data/eBayiPadTest.csv', stringsAsFactors=FALSE)
trainData$sold <- as.factor(trainData$sold)
data <- rbind(c(trainData$description, testData$description))
###
library(tm)
library(SnowballC)
# Create corpus
corpus = Corpus(VectorSource(data))
# Convert to lower-case
corpus = tm_map(corpus, tolower)
print(corpus[[1]])
corpus = tm_map(corpus, PlainTextDocument)
print(corpus[[1]])
corpus = tm_map(corpus, removePunctuation)
print(corpus[[1]])
corpus = tm_map(corpus, removeNumbers)
print(corpus[[1]])
corpus = tm_map(corpus, removeWords, c("sold", "ipad", "appl",
stopwords("english")))
print(corpus[[1]])
corpus = tm_map(corpus, stemDocument)
print(corpus[[1]])
# Create matrix
frequencies = DocumentTermMatrix(corpus)
findFreqTerms(frequencies, lowfreq=10)
# Remove sparse terms
sparse <- removeSparseTerms(frequencies, 0.995)
# Convert to a data frame
termSparse = as.data.frame(as.matrix(sparse))
# Make all variable names R-friendly
colnames(termSparse) = make.names(colnames(termSparse))
# Add dependent variable
test <- termSparse[1862:2659,]
termSparse <- termSparse[1:1861,]
termSparse$sold <- as.factor(trainData$sold)
# Split the data
library(caTools)
set.seed(144)
split = sample.split(termSparse$sold, SplitRatio = 0.8)
trainSparse = subset(termSparse, split==TRUE)
cvSparse = subset(termSparse, split==FALSE)
### RANDOM FOREST MODEL ###
library(randomForest)
ntree <- 400
nodesize <- 2
# tuneRF(trainSparse[,!colnames(trainSparse) %in% c("sold") ],
# trainSparse$sold,ntreeTry=ntree, stepFactor=1.5,
# improve=0.05, trace=TRUE, plot=TRUE, doBest=FALSE,
# nodesize=nodesize)
ebayRandomForestModel <- randomForest(sold ~ .,
ntree=ntree,
mtry = 12,
nodesize=nodesize,
data=trainSparse)
plot(ebayRandomForestModel)
# plot ROC
library(ROCR)
predROCR <- prediction(predict(ebayRandomForestModel, type="prob", newdata=cvSparse)[,2],
cvSparse$sold)
perfROCR <- performance(predROCR, "tpr", "fpr")
plot(perfROCR, colorize=TRUE, print.cutoffs.at=seq(0,1,0.1),
text.adj=c(-0.2, 1.7), main="ROC curve for random-forest model on CV set")
# Compute Accuracy
print("RF cross-validation train data Accuracy: ")
print(mean(performance(predROCR, "acc")@y.values[[1]]))
# Compute Accuracy
print("RF cross-validation train data Sensitivity (true posititive rate): ")
print(mean(performance(predROCR, "sens")@y.values[[1]]))
# Compute Accuracy
print("RF cross-validation train data Specificity (true negative rate): ")
print(mean(performance(predROCR, "spec")@y.values[[1]]))
# Compute AUC
print("RF cross-validation train data AUC: ")
print(performance(predROCR, "auc")@y.values[[1]])
predROCR <- prediction(predict(ebayRandomForestModel, type="prob", newdata=trainSparse)[,2],
trainSparse$sold)
print("RF train data AUC: ")
print(performance(predROCR, "auc")@y.values[[1]])
### CART MODEL ###
library(rpart)
library(rpart.plot)
# choose complexity parameter
library(caret)
library(e1071)
# Define cross-validation experiment
numFolds = trainControl(method = "cv", number = 10)
cpGrid = expand.grid(.cp = seq(0.0001,0.008,0.0001))
# Perform the cross validation
# ebayTreeCV <- train(sold ~ .,
# data = trainSparse,
# method = "rpart",
# trControl = numFolds,
# tuneGrid = cpGrid )
ebayTreeModel <- rpart(sold ~ . ,
method="class",
#cp = 0.0057001,
cp = 4e-04,
data=trainSparse)
prp(ebayTreeModel)
# plot ROC
library(ROCR)
predROCR <- prediction(predict(ebayTreeModel, type="prob", newdata=trainSparse)[,2],
trainSparse$sold)
perfROCR <- performance(predROCR, "tpr", "fpr")
plot(perfROCR, colorize=TRUE, print.cutoffs.at=seq(0,1,0.1),
text.adj=c(-0.2, 1.7), main="CART train data ROC")
# Compute Accuracy
print("CART train data Accuracy: ")
print(mean(performance(predROCR, "acc")@y.values[[1]]))
# Compute Accuracy
print("CART train data Sensitivity (true posititive rate): ")
print(mean(performance(predROCR, "sens")@y.values[[1]]))
# Compute Accuracy
print("CART train data Specificity (true negative rate): ")
print(mean(performance(predROCR, "spec")@y.values[[1]]))
# Compute AUC
print("CART train data AUC: ")
print(performance(predROCR, "auc")@y.values[[1]])
cvROCR <- prediction(predict(ebayTreeModel, type="prob", newdata = cvSparse)[,2],
cvSparse$sold)
print("CART cross-validation train data AUC: ")
print(performance(cvROCR, "auc")@y.values[[1]])
perfROCR <- performance(cvROCR, "tpr", "fpr")
plot(perfROCR, colorize=TRUE, print.cutoffs.at=seq(0,1,0.1),
text.adj=c(-0.2, 1.7),
main="ROC curve for CART model on CV set of descriptions")
|
\name{wd.macat}
\alias{wd.macat}
\title{Internal anRpackage objects}
\description{Internal anRpackage objects.}
\details{These are not to be called by the user.}
\keyword{internal}
|
/man/wd.macat.rd
|
no_license
|
acdelre/MAd
|
R
| false
| false
| 180
|
rd
|
\name{wd.macat}
\alias{wd.macat}
\title{Internal anRpackage objects}
\description{Internal anRpackage objects.}
\details{These are not to be called by the user.}
\keyword{internal}
|
#
setClass("student", slots=list(name="character", age="numeric", GPA="numeric"))
#
s <- new("student",name="John", age=21, GPA=3.5)
s
#
isS4(s)
#
s@name
s@GPA
s@age
# modify GPA
s@GPA <- 3.7
s
#
slot(s,"name")
#
slot(s,"name") <- "Paul"
s
|
/S4-Class-Creation-Modification.R
|
no_license
|
ankit-mishra/R-Step-by-Step
|
R
| false
| false
| 273
|
r
|
#
setClass("student", slots=list(name="character", age="numeric", GPA="numeric"))
#
s <- new("student",name="John", age=21, GPA=3.5)
s
#
isS4(s)
#
s@name
s@GPA
s@age
# modify GPA
s@GPA <- 3.7
s
#
slot(s,"name")
#
slot(s,"name") <- "Paul"
s
|
as_group_map_function <- function(.f) {
.f <- rlang::as_function(.f)
if (length(form <- formals(.f)) < 2 && ! "..." %in% names(form)){
stop("The function must accept at least two arguments. You can use ... to absorb unused components")
}
.f
}
#' Apply a function to each group
#'
#' \Sexpr[results=rd, stage=render]{dplyr:::lifecycle("experimental")}
#'
#' @description
#'
#' `group_map()`, `group_modify()` and `group_walk()` are purrr-style functions that can
#' be used to iterate on grouped tibbles.
#'
#' @details
#'
#' Use `group_modify()` when `summarize()` is too limited, in terms of what you need
#' to do and return for each group. `group_modify()` is good for "data frame in, data frame out".
#' If that is too limited, you need to use a [nested][group_nest()] or [split][group_split()] workflow.
#' `group_modify()` is an evolution of [do()], if you have used that before.
#'
#' Each conceptual group of the data frame is exposed to the function `.f` with two pieces of information:
#'
#' - The subset of the data for the group, exposed as `.x`.
#' - The key, a tibble with exactly one row and columns for each grouping variable, exposed as `.y`.
#'
#' For completeness, `group_modify()`, `group_map` and `group_walk()` also work on
#' ungrouped data frames, in that case the function is applied to the
#' entire data frame (exposed as `.x`), and `.y` is a one row tibble with no
#' column, consistently with [group_keys()].
#'
#' @family grouping functions
#'
#' @param .tbl A grouped tibble
#' @param .f A function or formula to apply to each group. It must return a data frame.
#'
#' If a __function__, it is used as is. It should have at least 2 formal arguments.
#'
#' If a __formula__, e.g. `~ head(.x)`, it is converted to a function.
#'
#' In the formula, you can use
#'
#' - `.` or `.x` to refer to the subset of rows of `.tbl`
#' for the given group
#'
#' - `.y` to refer to the key, a one row tibble with one column per grouping variable
#' that identifies the group
#'
#' @param ... Additional arguments passed on to `.f`
#' @param keep are the grouping variables kept in `.x`
#'
#' @return
#' - `group_modify()` returns a grouped tibble. In that case `.f` must return a data frame.
#' - `group_map()` returns a list of results from calling `.f` on each group
#' - `group_walk()` calls `.f` for side effects and returns the input `.tbl`, invisibly
#'
#' @examples
#'
#' # return a list
#' mtcars %>%
#' group_by(cyl) %>%
#' group_map(~ head(.x, 2L))
#'
#' # return a tibble grouped by `cyl` with 2 rows per group
#' # the grouping data is recalculated
#' mtcars %>%
#' group_by(cyl) %>%
#' group_modify(~ head(.x, 2L))
#'
#' if (requireNamespace("broom", quietly = TRUE)) {
#' # a list of tibbles
#' iris %>%
#' group_by(Species) %>%
#' group_map(~ broom::tidy(lm(Petal.Length ~ Sepal.Length, data = .x)))
#'
#' # a restructured grouped tibble
#' iris %>%
#' group_by(Species) %>%
#' group_modify(~ broom::tidy(lm(Petal.Length ~ Sepal.Length, data = .x)))
#' }
#'
#' # a list of vectors
#' iris %>%
#' group_by(Species) %>%
#' group_map(~ quantile(.x$Petal.Length, probs = c(0.25, 0.5, 0.75)))
#'
#' # to use group_modify() the lambda must return a data frame
#' iris %>%
#' group_by(Species) %>%
#' group_modify(~ {
#' quantile(.x$Petal.Length, probs = c(0.25, 0.5, 0.75)) %>%
#' tibble::enframe(name = "prob", value = "quantile")
#' })
#'
#' iris %>%
#' group_by(Species) %>%
#' group_modify(~ {
#' .x %>%
#' purrr::map_dfc(fivenum) %>%
#' mutate(nms = c("min", "Q1", "median", "Q3", "max"))
#' })
#'
#' # group_walk() is for side effects
#' dir.create(temp <- tempfile())
#' iris %>%
#' group_by(Species) %>%
#' group_walk(~ write.csv(.x, file = file.path(temp, paste0(.y$Species, ".csv"))))
#' list.files(temp, pattern = "csv$")
#' unlink(temp, recursive = TRUE)
#'
#' # group_modify() and ungrouped data frames
#' mtcars %>%
#' group_modify(~ head(.x, 2L))
#'
#' @export
group_map <- function(.tbl, .f, ..., keep = FALSE) {
.f <- as_group_map_function(.f)
# call the function on each group
chunks <- group_split(.tbl, keep = keep)
keys <- group_keys(.tbl)
group_keys <- map(seq_len(nrow(keys)), function(i) keys[i, , drop = FALSE])
map2(chunks, group_keys, .f, ...)
}
#' @rdname group_map
#' @export
group_modify <- function(.tbl, .f, ..., keep = FALSE) {
UseMethod("group_modify")
}
#' @export
group_modify.data.frame <- function(.tbl, .f, ..., keep = FALSE) {
.f <- as_group_map_function(.f)
.f(.tbl, group_keys(.tbl), ...)
}
#' @export
group_modify.grouped_df <- function(.tbl, .f, ..., keep = FALSE) {
tbl_group_vars <- group_vars(.tbl)
.f <- as_group_map_function(.f)
fun <- function(.x, .y){
res <- .f(.x, .y, ...)
if (!inherits(res, "data.frame")) {
abort("The result of .f should be a data frame")
}
if (any(bad <- names(res) %in% tbl_group_vars)) {
abort(sprintf(
"The returned data frame cannot contain the original grouping variables : ",
paste(names(res)[bad], collapse = ", ")
))
}
bind_cols(.y[rep(1L, nrow(res)), , drop = FALSE], res)
}
res <- bind_rows(!!!group_map(.tbl, fun, ..., keep = keep))
group_by(res, !!!groups(.tbl), .drop = group_by_drop_default(.tbl))
}
#' @export
#' @rdname group_map
group_walk <- function(.tbl, .f, ...) {
group_map(.tbl, .f, ...)
.tbl
}
|
/R/group_map.R
|
permissive
|
davan690/dplyr
|
R
| false
| false
| 5,437
|
r
|
as_group_map_function <- function(.f) {
.f <- rlang::as_function(.f)
if (length(form <- formals(.f)) < 2 && ! "..." %in% names(form)){
stop("The function must accept at least two arguments. You can use ... to absorb unused components")
}
.f
}
#' Apply a function to each group
#'
#' \Sexpr[results=rd, stage=render]{dplyr:::lifecycle("experimental")}
#'
#' @description
#'
#' `group_map()`, `group_modify()` and `group_walk()` are purrr-style functions that can
#' be used to iterate on grouped tibbles.
#'
#' @details
#'
#' Use `group_modify()` when `summarize()` is too limited, in terms of what you need
#' to do and return for each group. `group_modify()` is good for "data frame in, data frame out".
#' If that is too limited, you need to use a [nested][group_nest()] or [split][group_split()] workflow.
#' `group_modify()` is an evolution of [do()], if you have used that before.
#'
#' Each conceptual group of the data frame is exposed to the function `.f` with two pieces of information:
#'
#' - The subset of the data for the group, exposed as `.x`.
#' - The key, a tibble with exactly one row and columns for each grouping variable, exposed as `.y`.
#'
#' For completeness, `group_modify()`, `group_map` and `group_walk()` also work on
#' ungrouped data frames, in that case the function is applied to the
#' entire data frame (exposed as `.x`), and `.y` is a one row tibble with no
#' column, consistently with [group_keys()].
#'
#' @family grouping functions
#'
#' @param .tbl A grouped tibble
#' @param .f A function or formula to apply to each group. It must return a data frame.
#'
#' If a __function__, it is used as is. It should have at least 2 formal arguments.
#'
#' If a __formula__, e.g. `~ head(.x)`, it is converted to a function.
#'
#' In the formula, you can use
#'
#' - `.` or `.x` to refer to the subset of rows of `.tbl`
#' for the given group
#'
#' - `.y` to refer to the key, a one row tibble with one column per grouping variable
#' that identifies the group
#'
#' @param ... Additional arguments passed on to `.f`
#' @param keep are the grouping variables kept in `.x`
#'
#' @return
#' - `group_modify()` returns a grouped tibble. In that case `.f` must return a data frame.
#' - `group_map()` returns a list of results from calling `.f` on each group
#' - `group_walk()` calls `.f` for side effects and returns the input `.tbl`, invisibly
#'
#' @examples
#'
#' # return a list
#' mtcars %>%
#' group_by(cyl) %>%
#' group_map(~ head(.x, 2L))
#'
#' # return a tibble grouped by `cyl` with 2 rows per group
#' # the grouping data is recalculated
#' mtcars %>%
#' group_by(cyl) %>%
#' group_modify(~ head(.x, 2L))
#'
#' if (requireNamespace("broom", quietly = TRUE)) {
#' # a list of tibbles
#' iris %>%
#' group_by(Species) %>%
#' group_map(~ broom::tidy(lm(Petal.Length ~ Sepal.Length, data = .x)))
#'
#' # a restructured grouped tibble
#' iris %>%
#' group_by(Species) %>%
#' group_modify(~ broom::tidy(lm(Petal.Length ~ Sepal.Length, data = .x)))
#' }
#'
#' # a list of vectors
#' iris %>%
#' group_by(Species) %>%
#' group_map(~ quantile(.x$Petal.Length, probs = c(0.25, 0.5, 0.75)))
#'
#' # to use group_modify() the lambda must return a data frame
#' iris %>%
#' group_by(Species) %>%
#' group_modify(~ {
#' quantile(.x$Petal.Length, probs = c(0.25, 0.5, 0.75)) %>%
#' tibble::enframe(name = "prob", value = "quantile")
#' })
#'
#' iris %>%
#' group_by(Species) %>%
#' group_modify(~ {
#' .x %>%
#' purrr::map_dfc(fivenum) %>%
#' mutate(nms = c("min", "Q1", "median", "Q3", "max"))
#' })
#'
#' # group_walk() is for side effects
#' dir.create(temp <- tempfile())
#' iris %>%
#' group_by(Species) %>%
#' group_walk(~ write.csv(.x, file = file.path(temp, paste0(.y$Species, ".csv"))))
#' list.files(temp, pattern = "csv$")
#' unlink(temp, recursive = TRUE)
#'
#' # group_modify() and ungrouped data frames
#' mtcars %>%
#' group_modify(~ head(.x, 2L))
#'
#' @export
group_map <- function(.tbl, .f, ..., keep = FALSE) {
.f <- as_group_map_function(.f)
# call the function on each group
chunks <- group_split(.tbl, keep = keep)
keys <- group_keys(.tbl)
group_keys <- map(seq_len(nrow(keys)), function(i) keys[i, , drop = FALSE])
map2(chunks, group_keys, .f, ...)
}
#' @rdname group_map
#' @export
group_modify <- function(.tbl, .f, ..., keep = FALSE) {
UseMethod("group_modify")
}
#' @export
group_modify.data.frame <- function(.tbl, .f, ..., keep = FALSE) {
.f <- as_group_map_function(.f)
.f(.tbl, group_keys(.tbl), ...)
}
#' @export
group_modify.grouped_df <- function(.tbl, .f, ..., keep = FALSE) {
tbl_group_vars <- group_vars(.tbl)
.f <- as_group_map_function(.f)
fun <- function(.x, .y){
res <- .f(.x, .y, ...)
if (!inherits(res, "data.frame")) {
abort("The result of .f should be a data frame")
}
if (any(bad <- names(res) %in% tbl_group_vars)) {
abort(sprintf(
"The returned data frame cannot contain the original grouping variables : ",
paste(names(res)[bad], collapse = ", ")
))
}
bind_cols(.y[rep(1L, nrow(res)), , drop = FALSE], res)
}
res <- bind_rows(!!!group_map(.tbl, fun, ..., keep = keep))
group_by(res, !!!groups(.tbl), .drop = group_by_drop_default(.tbl))
}
#' @export
#' @rdname group_map
group_walk <- function(.tbl, .f, ...) {
group_map(.tbl, .f, ...)
.tbl
}
|
# Linear model for row by col effect only using controls in row B
interaction_data$arow <- as.factor(c(rep(1,6),rep(2,6),rep(3,6),rep(4,6),rep(5,6),rep(6,6),rep(7,6)))
#with interaction
lin_mod1 <- lm(tinv ~ trt + arow + acol+arow:acol, data = interaction_data)
results1 <- anova(lin_mod1)
results1
#without interaction
lin_mod2 <- lm(tinv ~ trt + arow + acol, data = interaction_data)
results2 <- anova(lin_mod2)
results2
|
/Code/Linear model row b controls.R
|
no_license
|
MichaelEBurton/ST542_Consulting
|
R
| false
| false
| 427
|
r
|
# Linear model for row by col effect only using controls in row B
interaction_data$arow <- as.factor(c(rep(1,6),rep(2,6),rep(3,6),rep(4,6),rep(5,6),rep(6,6),rep(7,6)))
#with interaction
lin_mod1 <- lm(tinv ~ trt + arow + acol+arow:acol, data = interaction_data)
results1 <- anova(lin_mod1)
results1
#without interaction
lin_mod2 <- lm(tinv ~ trt + arow + acol, data = interaction_data)
results2 <- anova(lin_mod2)
results2
|
# language.R
# Execute REDCap calculations in R.
#
# Copyright (c) 2021, Michael Pascale.
TOKENS <- c('NAME', 'FUNCTION', 'NUMBER', 'STRING', 'LTEQ', 'GTEQ', 'NEQ')
LITERALS <- c('=','+','-','*','/','^','%',',','<','>','(',')','[',']')
IGNORE <- '\t '
RECORDS <- list(
'studyid'='POTS_123',
'age'=32
)
source('lexer.R')
source('parser.R')
while(TRUE) {
cat('# ')
s = readLines(file("stdin"), n=1)
if(s == 'exit') break
parser$parse(s, lexer)
}
|
/language.R
|
permissive
|
MichaelPascale/redcap-equation-r
|
R
| false
| false
| 471
|
r
|
# language.R
# Execute REDCap calculations in R.
#
# Copyright (c) 2021, Michael Pascale.
TOKENS <- c('NAME', 'FUNCTION', 'NUMBER', 'STRING', 'LTEQ', 'GTEQ', 'NEQ')
LITERALS <- c('=','+','-','*','/','^','%',',','<','>','(',')','[',']')
IGNORE <- '\t '
RECORDS <- list(
'studyid'='POTS_123',
'age'=32
)
source('lexer.R')
source('parser.R')
while(TRUE) {
cat('# ')
s = readLines(file("stdin"), n=1)
if(s == 'exit') break
parser$parse(s, lexer)
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53817576101076e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613115207-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 251
|
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53817576101076e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
## Generate GUIDE .dsc for each partition
generate_guide_description_file <- function(num_partition, num_of_folds, rand_seed) {
j <- 1:num_of_folds
i <- 1:num_partition
iandj <- expand.grid(i = i, j = j)
mclapply(1:nrow(iandj), function(iter) {
i <- iandj[iter, ]$i
j <- iandj[iter, ]$j
## Generate GUIDE .dsc
kfoldj_parti_path <-
str_c("kfold_partitions/rand_seed_",rand_seed,"_kfold_", j, "_part_", i, ".csv")
kfold_j_part_i <- read_csv(kfoldj_parti_path)
part_to_write <- kfold_j_part_i
data_file_name <-
str_c('"guide_data/rand_seed_',rand_seed,'_kfold_', j, '_part_', i, '.txt"')
desc_file_name <-
str_c("guide_data/rand_seed_",rand_seed,"_kfold_", j, "_part_", i, ".dsc")
tmp <- c(data_file_name, "NA", 2)
for (k in 1:length(names(part_to_write))) {
preds_and_feature <- names(part_to_write)
tmp2 <- str_c(k, " ", preds_and_feature[[k]], " ", "n")
if (k == length(names(part_to_write))) {
tmp2 <- str_c(k, " ", preds_and_feature[[k]], " ", "d")
}
tmp <- c(tmp, tmp2)
}
write.table(
tmp,
file = desc_file_name,
row.names = FALSE,
col.names = FALSE,
quote = FALSE
)
## Generate GUIDE .txt for each partition
# create the path and write out to txt
data_file_path <-
str_c('guide_data/rand_seed_',rand_seed,'_kfold_', j, '_part_', i, '.txt')
options(scipen = 100)
write.table(
part_to_write,
file = data_file_path,
sep = " ",
row.names = FALSE,
quote = FALSE
)
options(scipen = 0)
}, mc.silent = TRUE)
}
|
/20_GUIDE_generate_GUIDE_files.R
|
no_license
|
mythicalprogrammer/thesis_prostate_cancer_CERP_GUIDE_no_prune_CERP_ensemble_of_forests
|
R
| false
| false
| 1,621
|
r
|
## Generate GUIDE .dsc for each partition
generate_guide_description_file <- function(num_partition, num_of_folds, rand_seed) {
j <- 1:num_of_folds
i <- 1:num_partition
iandj <- expand.grid(i = i, j = j)
mclapply(1:nrow(iandj), function(iter) {
i <- iandj[iter, ]$i
j <- iandj[iter, ]$j
## Generate GUIDE .dsc
kfoldj_parti_path <-
str_c("kfold_partitions/rand_seed_",rand_seed,"_kfold_", j, "_part_", i, ".csv")
kfold_j_part_i <- read_csv(kfoldj_parti_path)
part_to_write <- kfold_j_part_i
data_file_name <-
str_c('"guide_data/rand_seed_',rand_seed,'_kfold_', j, '_part_', i, '.txt"')
desc_file_name <-
str_c("guide_data/rand_seed_",rand_seed,"_kfold_", j, "_part_", i, ".dsc")
tmp <- c(data_file_name, "NA", 2)
for (k in 1:length(names(part_to_write))) {
preds_and_feature <- names(part_to_write)
tmp2 <- str_c(k, " ", preds_and_feature[[k]], " ", "n")
if (k == length(names(part_to_write))) {
tmp2 <- str_c(k, " ", preds_and_feature[[k]], " ", "d")
}
tmp <- c(tmp, tmp2)
}
write.table(
tmp,
file = desc_file_name,
row.names = FALSE,
col.names = FALSE,
quote = FALSE
)
## Generate GUIDE .txt for each partition
# create the path and write out to txt
data_file_path <-
str_c('guide_data/rand_seed_',rand_seed,'_kfold_', j, '_part_', i, '.txt')
options(scipen = 100)
write.table(
part_to_write,
file = data_file_path,
sep = " ",
row.names = FALSE,
quote = FALSE
)
options(scipen = 0)
}, mc.silent = TRUE)
}
|
#' Manage score-keeping
#'
#' Creates a storage object which can then be accessed by $update() and $report()
#' functions to hold permanently info on attempts and the score of the most
#' recent answer.
#'
#' @export
createScorekeeper <- function() {
# Generate a random session ID
session_id <- paste(sample(c(0:9, LETTERS[1:6]),size=10,replace=TRUE),collapse="")
cache <- data.frame(problem = "start session",
answer = NA, score = NA, attempts=NA,
when=Sys.time(),
stringsAsFactors = FALSE)
# a function to add information into the cache
update <- function(problem, score, answer, session = session_id) {
ind <- which(problem == cache$problem)
if (length(ind) == 0) {
# add a new one
new_data <- data.frame(problem = problem,
answer = answer, score = score,
attempts = 1, when = Sys.time(),
stringsAsFactors = FALSE)
cache <<- rbind(cache, new_data)
} else {
# update it
cache$attempts[ind] <<- 1 + cache$attempts[ind]
cache$score[ind] <<- score
cache$problem[ind] <<- problem
cache$answer[ind] <<- answer
cache$when[ind] <<- Sys.time()
}
}
# what are the scores, so far
report <- function(){ cache }
# submit the scores
save_scores <- function(userid = "bogus", assignment = "assignment X",
ip = "ip address Y",
filename = "Scores.rda") {
cat("Saving scores!\n")
# mark the end of the session
update(problem = "end session", score = NA, answer = NA)
# the scores to save
warning("Got here 1!")
These_scores <- eval(.the_scorekeeper, envir = .GlobalEnv)$report()
# add in the user ID
These_scores$user <- userid
These_scores$assignment <- assignment
These_scores$ip <- ip
# Append to the data file storage
# Is the file there?
if (0 == length(list.files(path = ".", pattern = filename))) {
Scores <- These_scores
} else {
load(filename)
Scores <- rbind(Scores, These_scores)
}
save(Scores, file = filename)
}
list(update = update, report = report, save_scores = save_scores)
}
|
/R/Scorekeeper.R
|
no_license
|
dtkaplan/MultipleChoice
|
R
| false
| false
| 2,265
|
r
|
#' Manage score-keeping
#'
#' Creates a storage object which can then be accessed by $update() and $report()
#' functions to hold permanently info on attempts and the score of the most
#' recent answer.
#'
#' @export
createScorekeeper <- function() {
# Generate a random session ID
session_id <- paste(sample(c(0:9, LETTERS[1:6]),size=10,replace=TRUE),collapse="")
cache <- data.frame(problem = "start session",
answer = NA, score = NA, attempts=NA,
when=Sys.time(),
stringsAsFactors = FALSE)
# a function to add information into the cache
update <- function(problem, score, answer, session = session_id) {
ind <- which(problem == cache$problem)
if (length(ind) == 0) {
# add a new one
new_data <- data.frame(problem = problem,
answer = answer, score = score,
attempts = 1, when = Sys.time(),
stringsAsFactors = FALSE)
cache <<- rbind(cache, new_data)
} else {
# update it
cache$attempts[ind] <<- 1 + cache$attempts[ind]
cache$score[ind] <<- score
cache$problem[ind] <<- problem
cache$answer[ind] <<- answer
cache$when[ind] <<- Sys.time()
}
}
# what are the scores, so far
report <- function(){ cache }
# submit the scores
save_scores <- function(userid = "bogus", assignment = "assignment X",
ip = "ip address Y",
filename = "Scores.rda") {
cat("Saving scores!\n")
# mark the end of the session
update(problem = "end session", score = NA, answer = NA)
# the scores to save
warning("Got here 1!")
These_scores <- eval(.the_scorekeeper, envir = .GlobalEnv)$report()
# add in the user ID
These_scores$user <- userid
These_scores$assignment <- assignment
These_scores$ip <- ip
# Append to the data file storage
# Is the file there?
if (0 == length(list.files(path = ".", pattern = filename))) {
Scores <- These_scores
} else {
load(filename)
Scores <- rbind(Scores, These_scores)
}
save(Scores, file = filename)
}
list(update = update, report = report, save_scores = save_scores)
}
|
#Type de Fracture en fonction du type de Traumatisme
#prerequisite: importer le fichier Excel (.xslx) dans R, sous le nom de test1.xlsx
# install.packages("data.table")
# install.packages("stringr")
# install.packages("ggplot2")
# install.packages("dplyr")
# install.packages("gridExtra")
#on charge les données
library(data.table)
library(stringr)
library(ggplot2)
library(dplyr)
#for multigraph vizu
library(gridExtra)
library(cowplot)
setDT(ReccueilR)
source("utils/fn_correlation_scores_autotest.R")
#filter on useful columns non-empty
mydata = ReccueilR[
!is.na(Oxford)
&!is.na(PMA)
&!is.na(HarrisHS)
&!is.na(Womac)
&!is.na(Autotest)
]
#REPRESENTATION OF CORRELATIONS between AUTOTEST and other scores
plot_all_corr = generate_histogram_autotest_corr_with_other_scores(mydata)
plot_all_corr
plot_linreg_all_scores = generate_4_regression_lines_same_graph(mydata)
plot_linreg_all_scores
# DISPLAY SCATTERING
# Autotest x Oxford
pOxford = generate_scattering_autotest_oxford(mydata)
# Autotest x Womac
pWomac = generate_scattering_autotest_womac(mydata)
# Autotest x Harris
pHarris = generate_scattering_autotest_harris(mydata)
# Autotest x PMA
pPMA = generate_scattering_autotest_pma(mydata)
plot_grid(pOxford, pWomac, pHarris, pPMA, labels=c(NA, NA, NA, NA),ncol = 2, nrow = 2)
|
/graphs/corr_autotes_compared_to_others.R
|
no_license
|
Zolano974/Rzob
|
R
| false
| false
| 1,317
|
r
|
#Type de Fracture en fonction du type de Traumatisme
#prerequisite: importer le fichier Excel (.xslx) dans R, sous le nom de test1.xlsx
# install.packages("data.table")
# install.packages("stringr")
# install.packages("ggplot2")
# install.packages("dplyr")
# install.packages("gridExtra")
#on charge les données
library(data.table)
library(stringr)
library(ggplot2)
library(dplyr)
#for multigraph vizu
library(gridExtra)
library(cowplot)
setDT(ReccueilR)
source("utils/fn_correlation_scores_autotest.R")
#filter on useful columns non-empty
mydata = ReccueilR[
!is.na(Oxford)
&!is.na(PMA)
&!is.na(HarrisHS)
&!is.na(Womac)
&!is.na(Autotest)
]
#REPRESENTATION OF CORRELATIONS between AUTOTEST and other scores
plot_all_corr = generate_histogram_autotest_corr_with_other_scores(mydata)
plot_all_corr
plot_linreg_all_scores = generate_4_regression_lines_same_graph(mydata)
plot_linreg_all_scores
# DISPLAY SCATTERING
# Autotest x Oxford
pOxford = generate_scattering_autotest_oxford(mydata)
# Autotest x Womac
pWomac = generate_scattering_autotest_womac(mydata)
# Autotest x Harris
pHarris = generate_scattering_autotest_harris(mydata)
# Autotest x PMA
pPMA = generate_scattering_autotest_pma(mydata)
plot_grid(pOxford, pWomac, pHarris, pPMA, labels=c(NA, NA, NA, NA),ncol = 2, nrow = 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.