content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(ape)
testtree <- read.tree("9142_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9142_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/9142_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("9142_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9142_0_unrooted.txt")
|
####################
## DEPRECATED ######
####################
#' Deprecated functions
#'
#' Functions deprecated due to changed naming or because functionality is discarded. Deprecated functions are made defunct
#' every 1 major or every 2 minor package updates. See the NEWS file for more information about since when or why functions
#' have been deprecated.
#'
#' @name sentometrics-deprecated
#' @keywords internal
NULL
#' @rdname sentometrics-deprecated
#'
#' @param sentomeasures an appropriate \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param fill an element of \code{c("zero", "latest", NA)}; the first and last assume missing dates represent zero sentiment,
#' the second assumes missing dates represent constant sentiment.
#'
#' @seealso \code{\link{measures_fill}}
#'
#' @export
fill_measures <- function(sentomeasures, fill) {
.Deprecated("measures_fill", package = "sentometrics")
measures_fill(sentomeasures, fill = fill)
}
#' @rdname sentometrics-deprecated
#'
#' @param ... (other) allowed input arguments.
#'
#' @seealso \code{\link{measures_merge}}
#'
#' @export
merge_measures <- function(...) {
.Deprecated("measures_merge", package = "sentometrics")
}
#' @rdname sentometrics-deprecated
#'
#' @param lexicons a \code{numeric} vector of weights, of size \code{length(sentomeasures$lexicons)}, in the same order.
#' By default set to 1, which means equally weighted.
#' @param features a \code{numeric} vector of weights, of size \code{length(sentomeasures$features)}, in the same order.
#' By default set to 1, which means equally weighted.
#' @param time a \code{numeric} vector of weights, of size \code{length(sentomeasures$time)}, in the same order. By default
#' set to 1, which means equally weighted.
#'
#' @seealso \code{\link{measures_global}}
#'
#' @export
to_global <- function(sentomeasures, lexicons, features, time) {
.Deprecated("measures_global", package = "sentometrics")
measures_global(sentomeasures, lexicons = lexicons, features = features, time = time)
}
#' @rdname sentometrics-deprecated
#'
#' @param subset a logical expression indicating the rows to keep.
#'
#' @seealso \code{\link{measures_subset}}
#'
#' @export
subset_measures <- function(sentomeasures, subset) {
.Deprecated("measures_subset", package = "sentometrics")
measures_subset(sentomeasures, subset = subset)
}
#' @rdname sentometrics-deprecated
#'
#' @param toSelect a \code{character} vector of the lexicon, feature and time weighting scheme names, to indicate which
#' measures need to be selected, or as a \code{list} of \code{character} vectors, possibly with separately specified
#' combinations (only consisting of one lexicon, one feature, and one time weighting scheme at maximum).
#'
#' @seealso \code{\link{measures_select}}
#'
#' @export
select_measures <- function(sentomeasures, toSelect) {
.Deprecated("measures_select", package = "sentometrics")
measures_select(sentomeasures, toSelect = toSelect)
}
#' @rdname sentometrics-deprecated
#'
#' @param lexiconsIn a named \code{list} of (raw) lexicons, each element as a \code{data.table} or a \code{data.frame} with
#' respectively a words column and a polarity score column. A subset of the already formatted built-in lexicons
#' accessible via \code{list_lexicons} should be passed here first.
#' @param valenceIn a single valence word list as a \code{data.table} or a \code{data.frame} with respectively a \code{"x"}
#' and a \code{"y"} or \code{"t"} column. The first column has the words, \code{"y"} has the values for bigram
#' shifting, and \code{"t"} has the types of the valence shifter for a clustered approach to sentiment calculation
#' (supported types: \code{1} = negators, \code{2} = amplifiers, \code{3} = deamplifiers). If three columns
#' are provided, the first two will be considered only. This argument can be one of the already formatted
#' built-in valence word lists accessible via \code{list_valence_shifters}. A word that appears in both a lexicon
#' and the valence word list is prioritized as a lexical entry during sentiment calculation. If \code{NULL}, no valence word
#' list is part of this function's output, and is thus not applied in the sentiment analysis.
#' @param do.split a \code{logical} that if \code{TRUE} splits every lexicon into a separate positive polarity and negative
#' polarity lexicon.
#'
#' @seealso \code{\link{sento_lexicons}}
#'
#' @export
setup_lexicons <- function(lexiconsIn, valenceIn, do.split) {
.Deprecated("sento_lexicons", package = "sentometrics")
sento_lexicons(lexiconsIn = lexiconsIn, valenceIn = valenceIn, do.split = do.split)
}
#' @rdname sentometrics-deprecated
#'
#' @param model a \code{sentomodel} or \code{sentomodeliter} object created with \code{\link{sento_model}}.
#' @param do.normalize a \code{logical}, \code{TRUE} divides each element of every attribution vector at a given date by its
#' L2-norm at that date, normalizing the values between -1 and 1. The document attributions are not normalized. Or, for
#' \code{\link{almons}}, if \code{TRUE}, then polynomials should be normalized to unity.
#' @param refDates the dates (as \code{"yyyy-mm-dd"}) at which attribution is to be performed. These should be between the latest
#' date available in the input \code{sentomeasures} object and the first estimation sample date (that is, \code{model$dates[1]}
#' if \code{model} is a \code{sentomodel} object). All dates should also be in \code{get_dates(sentomeasures)}. If
#' \code{NULL} (default), attribution is calculated for all in-sample dates. Ignored if \code{model} is a \code{sentomodeliter}
#' object, for which attribution is calculated for all out-of-sample prediction dates.
#' @param factor the factor level as a single \code{character} vector for which attribution has to be calculated in
#' case of (a) multinomial model(s). Ignored for linear and binomial models.
#'
#' @seealso \code{\link{attributions}}
#'
#' @export
retrieve_attributions <- function(model, sentomeasures, do.normalize, refDates, factor) {
.Deprecated("attributions", package = "sentometrics")
attributions(model, sentomeasures, do.normalize = do.normalize, refDates = refDates, factor = factor)
}
#' @rdname sentometrics-deprecated
#'
#' @param sentiment output from a \code{\link{compute_sentiment}} call, computed from a \code{sentocorpus} object.
#' @param ctr output from a \code{\link{ctr_agg}} call. The \code{howWithin} and \code{nCore} elements are ignored.
#'
#' @seealso \code{\link{aggregate}}
#'
#' @export
perform_agg <- function(sentiment, ctr) {
.Deprecated("aggregate", package = "sentometrics")
aggregate(sentiment = sentiment, ctr = ctr)
}
#' @rdname sentometrics-deprecated
#'
#' @param attributions an \code{attributions} object created with \code{\link{attributions}}.
#' @param group a value from \code{c("lags", "lexicons", "features", "time")}.
#'
#' @seealso \code{\link{plot.attributions}}
#'
#' @export
plot_attributions <- function(attributions, group, ...) {
.Deprecated("plot.attributions", package = "sentometrics")
plot.attributions(x = attributions, group = group, ...)
}
#' @rdname sentometrics-deprecated
#'
#' @param orders a \code{numeric} vector as the sequence of the Almon orders (cf., \emph{b}). The maximum value
#' corresponds to \emph{B}.
#' @param do.inverse \code{TRUE} if the inverse Almon polynomials should be calculated as well.
#'
#' @seealso \code{\link{weights_almon}}
#'
#' @export
almons <- function(n, orders, do.inverse, do.normalize) {
.Deprecated("weights_almon", package = "sentometrics")
weights_almon(n = n, orders = orders, do.inverse = do.inverse, do.normalize = do.normalize)
}
#' @rdname sentometrics-deprecated
#'
#' @param alphas a \code{numeric} vector of decay factors.
#'
#' @seealso \code{\link{weights_exponential}}
#'
#' @export
exponentials <- function(n, alphas) {
.Deprecated("weights_exponential", package = "sentometrics")
weights_exponential(n = n, alphas = alphas)
}
####################
## DEFUNCT #########
####################
#' Datasets with defunct names
#'
#' These are datasets that have been renamed and the old names removed. Please change your code to use the new names.
#'
#' @docType data
#'
#' @name data-defunct
#' @keywords internal
NULL
#' @rdname data-defunct
#' @name lexicons
#' @details The dataset \code{lexicons} is defunct, use \code{list_lexicons} instead.
NULL
#' @rdname data-defunct
#' @name valence
#' @details The dataset \code{valence} is defunct, use \code{list_valence_shifters} instead.
NULL
#' Defunct functions
#'
#' Functions defunct due to changed naming or because functionality is discarded. See the NEWS file for more information
#' about since when or why functions have been defunct.
#'
#' @name sentometrics-defunct
#' @keywords internal
NULL
#' @rdname sentometrics-defunct
#'
#' @param ... arguments not documented since function declared defunct.
#'
#' @export
ctr_merge <- function(...) {
.Defunct(package = "sentometrics")
}
#' @rdname sentometrics-defunct
#'
#' @export
perform_MCS <- function(...) {
.Defunct(package = "sentometrics")
}
|
/R/deprecated.R
|
no_license
|
vanpeltj/sentometrics
|
R
| false
| false
| 9,110
|
r
|
####################
## DEPRECATED ######
####################
#' Deprecated functions
#'
#' Functions deprecated due to changed naming or because functionality is discarded. Deprecated functions are made defunct
#' every 1 major or every 2 minor package updates. See the NEWS file for more information about since when or why functions
#' have been deprecated.
#'
#' @name sentometrics-deprecated
#' @keywords internal
NULL
#' @rdname sentometrics-deprecated
#'
#' @param sentomeasures an appropriate \code{sentomeasures} object created using \code{\link{sento_measures}}.
#' @param fill an element of \code{c("zero", "latest", NA)}; the first and last assume missing dates represent zero sentiment,
#' the second assumes missing dates represent constant sentiment.
#'
#' @seealso \code{\link{measures_fill}}
#'
#' @export
fill_measures <- function(sentomeasures, fill) {
.Deprecated("measures_fill", package = "sentometrics")
measures_fill(sentomeasures, fill = fill)
}
#' @rdname sentometrics-deprecated
#'
#' @param ... (other) allowed input arguments.
#'
#' @seealso \code{\link{measures_merge}}
#'
#' @export
merge_measures <- function(...) {
.Deprecated("measures_merge", package = "sentometrics")
}
#' @rdname sentometrics-deprecated
#'
#' @param lexicons a \code{numeric} vector of weights, of size \code{length(sentomeasures$lexicons)}, in the same order.
#' By default set to 1, which means equally weighted.
#' @param features a \code{numeric} vector of weights, of size \code{length(sentomeasures$features)}, in the same order.
#' By default set to 1, which means equally weighted.
#' @param time a \code{numeric} vector of weights, of size \code{length(sentomeasures$time)}, in the same order. By default
#' set to 1, which means equally weighted.
#'
#' @seealso \code{\link{measures_global}}
#'
#' @export
to_global <- function(sentomeasures, lexicons, features, time) {
.Deprecated("measures_global", package = "sentometrics")
measures_global(sentomeasures, lexicons = lexicons, features = features, time = time)
}
#' @rdname sentometrics-deprecated
#'
#' @param subset a logical expression indicating the rows to keep.
#'
#' @seealso \code{\link{measures_subset}}
#'
#' @export
subset_measures <- function(sentomeasures, subset) {
.Deprecated("measures_subset", package = "sentometrics")
measures_subset(sentomeasures, subset = subset)
}
#' @rdname sentometrics-deprecated
#'
#' @param toSelect a \code{character} vector of the lexicon, feature and time weighting scheme names, to indicate which
#' measures need to be selected, or as a \code{list} of \code{character} vectors, possibly with separately specified
#' combinations (only consisting of one lexicon, one feature, and one time weighting scheme at maximum).
#'
#' @seealso \code{\link{measures_select}}
#'
#' @export
select_measures <- function(sentomeasures, toSelect) {
.Deprecated("measures_select", package = "sentometrics")
measures_select(sentomeasures, toSelect = toSelect)
}
#' @rdname sentometrics-deprecated
#'
#' @param lexiconsIn a named \code{list} of (raw) lexicons, each element as a \code{data.table} or a \code{data.frame} with
#' respectively a words column and a polarity score column. A subset of the already formatted built-in lexicons
#' accessible via \code{list_lexicons} should be passed here first.
#' @param valenceIn a single valence word list as a \code{data.table} or a \code{data.frame} with respectively a \code{"x"}
#' and a \code{"y"} or \code{"t"} column. The first column has the words, \code{"y"} has the values for bigram
#' shifting, and \code{"t"} has the types of the valence shifter for a clustered approach to sentiment calculation
#' (supported types: \code{1} = negators, \code{2} = amplifiers, \code{3} = deamplifiers). If three columns
#' are provided, the first two will be considered only. This argument can be one of the already formatted
#' built-in valence word lists accessible via \code{list_valence_shifters}. A word that appears in both a lexicon
#' and the valence word list is prioritized as a lexical entry during sentiment calculation. If \code{NULL}, no valence word
#' list is part of this function's output, and is thus not applied in the sentiment analysis.
#' @param do.split a \code{logical} that if \code{TRUE} splits every lexicon into a separate positive polarity and negative
#' polarity lexicon.
#'
#' @seealso \code{\link{sento_lexicons}}
#'
#' @export
setup_lexicons <- function(lexiconsIn, valenceIn, do.split) {
.Deprecated("sento_lexicons", package = "sentometrics")
sento_lexicons(lexiconsIn = lexiconsIn, valenceIn = valenceIn, do.split = do.split)
}
#' @rdname sentometrics-deprecated
#'
#' @param model a \code{sentomodel} or \code{sentomodeliter} object created with \code{\link{sento_model}}.
#' @param do.normalize a \code{logical}, \code{TRUE} divides each element of every attribution vector at a given date by its
#' L2-norm at that date, normalizing the values between -1 and 1. The document attributions are not normalized. Or, for
#' \code{\link{almons}}, if \code{TRUE}, then polynomials should be normalized to unity.
#' @param refDates the dates (as \code{"yyyy-mm-dd"}) at which attribution is to be performed. These should be between the latest
#' date available in the input \code{sentomeasures} object and the first estimation sample date (that is, \code{model$dates[1]}
#' if \code{model} is a \code{sentomodel} object). All dates should also be in \code{get_dates(sentomeasures)}. If
#' \code{NULL} (default), attribution is calculated for all in-sample dates. Ignored if \code{model} is a \code{sentomodeliter}
#' object, for which attribution is calculated for all out-of-sample prediction dates.
#' @param factor the factor level as a single \code{character} vector for which attribution has to be calculated in
#' case of (a) multinomial model(s). Ignored for linear and binomial models.
#'
#' @seealso \code{\link{attributions}}
#'
#' @export
retrieve_attributions <- function(model, sentomeasures, do.normalize, refDates, factor) {
.Deprecated("attributions", package = "sentometrics")
attributions(model, sentomeasures, do.normalize = do.normalize, refDates = refDates, factor = factor)
}
#' @rdname sentometrics-deprecated
#'
#' @param sentiment output from a \code{\link{compute_sentiment}} call, computed from a \code{sentocorpus} object.
#' @param ctr output from a \code{\link{ctr_agg}} call. The \code{howWithin} and \code{nCore} elements are ignored.
#'
#' @seealso \code{\link{aggregate}}
#'
#' @export
perform_agg <- function(sentiment, ctr) {
.Deprecated("aggregate", package = "sentometrics")
aggregate(sentiment = sentiment, ctr = ctr)
}
#' @rdname sentometrics-deprecated
#'
#' @param attributions an \code{attributions} object created with \code{\link{attributions}}.
#' @param group a value from \code{c("lags", "lexicons", "features", "time")}.
#'
#' @seealso \code{\link{plot.attributions}}
#'
#' @export
plot_attributions <- function(attributions, group, ...) {
.Deprecated("plot.attributions", package = "sentometrics")
plot.attributions(x = attributions, group = group, ...)
}
#' @rdname sentometrics-deprecated
#'
#' @param orders a \code{numeric} vector as the sequence of the Almon orders (cf., \emph{b}). The maximum value
#' corresponds to \emph{B}.
#' @param do.inverse \code{TRUE} if the inverse Almon polynomials should be calculated as well.
#'
#' @seealso \code{\link{weights_almon}}
#'
#' @export
almons <- function(n, orders, do.inverse, do.normalize) {
.Deprecated("weights_almon", package = "sentometrics")
weights_almon(n = n, orders = orders, do.inverse = do.inverse, do.normalize = do.normalize)
}
#' @rdname sentometrics-deprecated
#'
#' @param alphas a \code{numeric} vector of decay factors.
#'
#' @seealso \code{\link{weights_exponential}}
#'
#' @export
exponentials <- function(n, alphas) {
.Deprecated("weights_exponential", package = "sentometrics")
weights_exponential(n = n, alphas = alphas)
}
####################
## DEFUNCT #########
####################
#' Datasets with defunct names
#'
#' These are datasets that have been renamed and the old names removed. Please change your code to use the new names.
#'
#' @docType data
#'
#' @name data-defunct
#' @keywords internal
NULL
#' @rdname data-defunct
#' @name lexicons
#' @details The dataset \code{lexicons} is defunct, use \code{list_lexicons} instead.
NULL
#' @rdname data-defunct
#' @name valence
#' @details The dataset \code{valence} is defunct, use \code{list_valence_shifters} instead.
NULL
#' Defunct functions
#'
#' Functions defunct due to changed naming or because functionality is discarded. See the NEWS file for more information
#' about since when or why functions have been defunct.
#'
#' @name sentometrics-defunct
#' @keywords internal
NULL
#' @rdname sentometrics-defunct
#'
#' @param ... arguments not documented since function declared defunct.
#'
#' @export
ctr_merge <- function(...) {
.Defunct(package = "sentometrics")
}
#' @rdname sentometrics-defunct
#'
#' @export
perform_MCS <- function(...) {
.Defunct(package = "sentometrics")
}
|
library(ape)
testtree <- read.tree("2006_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2006_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/2006_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("2006_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2006_0_unrooted.txt")
|
####Gütemaß improvements
rm(list = ls())
setwd("~/Workspace/RioSaoFrancisco/ResultsCalibration/Paper/NewExe/6000runs")
png(file = "ObjectiveFunctions_irriNoirri.png", bg = "white", width = 1500 , height = 2480, res = 300)
par(mfrow = c(2, 1), mai = c(1,1,0.2,0.2), oma = c(2, 2, .5, .5), mgp = c(2, .6, 0))
# for barplot RMSE_monthly
Z2Irri <- read.table("Z2Irri/irri_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2Irri) <- c("obj_fun","val")
Z2Irri <- Z2Irri[grep("rmse_monthly", Z2Irri[,1]),]
Z2Irri$obj_fun <- as.factor(Z2Irri$obj_fun)
Z2Irri$Subgroup <- as.factor("Irri")
Z2NoIrri <- read.table("Z2NoIrriNew/SetA/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2NoIrri) <- c("obj_fun","val")
Z2NoIrri <- Z2NoIrri[grep("rmse_monthly", Z2NoIrri[,1]),]
Z2NoIrri$obj_fun <- as.factor(Z2NoIrri$obj_fun)
Z2NoIrri$Subgroup <- as.factor("NoIrri")
Z2C <- read.table("Z2Irri/noirri/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2C) <- c("obj_fun","val")
Z2C <- Z2C[grep("rmse_monthly", Z2C[,1]),]
Z2C$obj_fun <- as.factor(Z2C$obj_fun)
Z2C$Subgroup <- as.factor("C")
Z2D <- read.table("Z2NoIrriNew/SetD/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2D) <- c("obj_fun","val")
Z2D <- Z2D[grep("rmse_monthly", Z2D[,1]),]
Z2D$obj_fun <- as.factor(Z2D$obj_fun)
Z2D$Subgroup <- as.factor("D")
AllZ2 <- rbind(Z2Irri,Z2NoIrri)
data_base <- reshape(AllZ2, # Modify data for Base R barplot
idvar = "Subgroup",
timevar = "obj_fun",
direction = "wide")
row.names(data_base) <- data_base$Subgroup
data_base <- data_base[ , 2:ncol(data_base)]
data_base <- data_base[,-c(5,8)]
data_base <- data_base[,c(7,6,5,3,1,2,4)]
colnames(data_base) <- c("96", "58", "90", "78", "13", "73", "15")
data_base <- as.matrix(data_base)
##take out the two negative subs (Carinhana and )
#data_base <- data_base[,1:7]
data_base <- data_base[c(2,1),]
## take out 2 last columns, change colors
barplot(height = data_base, beside = TRUE, col = c("#4AB29D","#D40E92"), ylim = c(0,20000))
abline(h=0)
title(ylab=expression("RMSE"["m"]*" [m" ^3*"/s]"), line=2, cex.lab=1.2)
percent <- ((data_base[1,] -data_base[2,])/data_base[1,]) * 100
############################
## RMSE dry season
Z2Irri <- read.table("Z2Irri/irri_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2Irri) <- c("obj_fun","val")
Z2Irri <- Z2Irri[grep("dry", Z2Irri[,1]),]
Z2Irri$obj_fun <- as.factor(Z2Irri$obj_fun)
Z2Irri$Subgroup <- as.factor("Irri")
Z2NoIrri <- read.table("Z2NoIrriNew/SetA/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2NoIrri) <- c("obj_fun","val")
Z2NoIrri <- Z2NoIrri[grep("dry", Z2NoIrri[,1]),]
Z2NoIrri$obj_fun <- as.factor(Z2NoIrri$obj_fun)
Z2NoIrri$Subgroup <- as.factor("NoIrri")
Z2C <- read.table("Z2Irri/noirri/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2C) <- c("obj_fun","val")
Z2C <- Z2C[grep("dry", Z2C[,1]),]
Z2C$obj_fun <- as.factor(Z2C$obj_fun)
Z2C$Subgroup <- as.factor("C")
Z2D <- read.table("Z2NoIrriNew/SetD/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2D) <- c("obj_fun","val")
Z2D <- Z2D[grep("dry", Z2D[,1]),]
Z2D$obj_fun <- as.factor(Z2D$obj_fun)
Z2D$Subgroup <- as.factor("D")
#####Refill dataframe AllZ2 with newly calculated RMSE dry season values
### simulation days ofdry seasons were derived visually/manually:
sub13 <- c(92:306,397:682,742:1036,1217:1401,1582:1783,1947:2131,2312:2596,2677:2861,3044:3226,
3407:3575,3759:3941,4122:4304,4487:4686,4856:5077,5142:5416)
sub15 <- c(92:306,464:640,822:1036,1202:1401,1582:1804,1926:2131,2322:2471,2677:2880,3042:3243,
3417:3591,3757:3956,4137:4303,4472:4686,4867:5074,5232:5411)
sub58 <- c(106:344,457:671,836:1036,1217:1431,1596:1796,1947:2131,2318:2486,2677:2889,3044:3237,
3417:3585,3727:3956,4137:4345,4487:4690,4867:5081,5232:5385)
sub73 <- c(122:306,457:671,836:1074,1217:1416,1613:1793,1947:2131,2312:2496,2677:2881,3042:3234,
3407:3584,3772:3956,4137:4335,4516:4686,4867:4990,5232:5416)
sub78 <- c(101:306,447:671,822:1036,1187:1431,1582:1796,1927:2131,2282:2496,2616:2891,3042:3235,
3407:3577,3772:3956,4121:4342,4502:4686,4867:5074,5222:5445)
sub90 <- c(106:306,457:771,836:1036,1217:1431,1596:1796,1931:2131,2312:2484,2661:2891,3042:3239,
3421:3580,3772:3956,4137:4342,4486:4686,4867:5081,5232:5410)
sub96 <- c(122:306,457:671,852:1036,1217:1431,1596:1796,1947:2137,2312:2496,2661:2891,3042:3241,
3421:3584,3761:3956,4137:4351,4502:4691,4867:5081,5246:5416)
obs <- read.delim("Z2Irri/init_config/Input/Time_series/discharge_obs_24.txt", skip = 4, header = T, check.names = F)
obs$Date <- paste(obs$YYYY,obs$MM,obs$DD, sep = "-")
obs$Date <- as.POSIXct(obs$Date, format = c("%Y-%m-%d"))
obs <- obs[obs$Date < "2015-01-01",]
irri <- read.table("Z2Irri/irri_best/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
setC <- read.table("Z2Irri/noirri/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
noirri <- read.table("Z2NoIrriNew/SetA/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
setD<- read.table("Z2NoIrriNew/SetD/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
subs <- c('13','73','78','15','90','58','96')
for (i in 1:length(subs)){
Z2Irri[grepl(subs[i],Z2Irri[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-irri[get(paste0("sub",subs[i])),match(subs[i],colnames(irri))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
Z2NoIrri[grepl(subs[i],Z2NoIrri[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-noirri[get(paste0("sub",subs[i])),match(subs[i],colnames(noirri))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
Z2C[grepl(subs[i],Z2C[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-setC[get(paste0("sub",subs[i])),match(subs[i],colnames(setC))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
Z2D[grepl(subs[i],Z2D[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-setD[get(paste0("sub",subs[i])),match(subs[i],colnames(setD))])^2, na.rm=TRUE))
}
AllZ2 <- rbind(Z2Irri,Z2NoIrri)
data_base <- reshape(AllZ2, # Modify data for Base R barplot
idvar = "Subgroup",
timevar = "obj_fun",
direction = "wide")
row.names(data_base) <- data_base$Subgroup
data_base <- data_base[ , 2:ncol(data_base)]
data_base <- data_base[,-c(5,8)]
data_base <- data_base[,c(7,6,5,3,1,2,4)]
colnames(data_base) <- c("96", "58", "90", "78", "13", "73", "15")
data_base <- as.matrix(data_base)
##take out the two negative subs (Carinhana and )
#data_base <- data_base[,1:7]
data_base <- data_base[c(2,1),]
percent <- ((data_base[1,] -data_base[2,])/data_base[1,]) * 100
## take out 2 last columns, change colors
barplot(height = data_base, beside = TRUE, col = c("#4AB29D","#D40E92") , ylim = c(0,500))
abline(h=0)
title(ylab=expression("RMSE"["L"]*" [m" ^3*"/s]"), line=2, cex.lab=1.2)
title(xlab="Subbasin", line=1.8, cex.lab=1.2)
legend("topright", legend= c("Set A", "Set B"),col = c("#4AB29D","#D40E92"), pch = 15)
dev.off()
# ###NSE
#
# Z2Irri <- read.table("Z2Irri/irri_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
# names(Z2Irri) <- c("obj_fun","val")
# Z2Irri <- Z2Irri[grep("ns_co", Z2Irri[,1]),]
# Z2Irri$obj_fun <- as.factor(Z2Irri$obj_fun)
# Z2Irri$Subgroup <- as.factor("Irri")
#
# Z2NoIrri <- read.table("Z2NoIrriNew/thread1_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
# names(Z2NoIrri) <- c("obj_fun","val")
# Z2NoIrri <- Z2NoIrri[grep("ns_co", Z2NoIrri[,1]),]
# Z2NoIrri$obj_fun <- as.factor(Z2NoIrri$obj_fun)
# Z2NoIrri$Subgroup <- as.factor("NoIrri")
#
# Z2C <- read.table("Z2Irri/noirri/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
# names(Z2C) <- c("obj_fun","val")
# Z2C <- Z2C[grep("ns_co", Z2C[,1]),]
# Z2C$obj_fun <- as.factor(Z2C$obj_fun)
# Z2C$Subgroup <- as.factor("C")
#
# #
# AllZ2 <- rbind(Z2Irri,Z2NoIrri, Z2C)
#
# # # for barplot NSE
# data_base <- reshape(AllZ2, # Modify data for Base R barplot
# idvar = "Subgroup",
# timevar = "obj_fun",
# direction = "wide")
# row.names(data_base) <- data_base$Subgroup
# data_base <- data_base[ , 2:ncol(data_base)]
# data_base <- data_base[,c(9,7,6,3,1,2,4,5,8)]
# colnames(data_base) <- c("1", "2", "3", "4", "11", "12", "13", "14", "15")
# data_base <- as.matrix(data_base)
#
#
# # ##take out the two negative subs (Carinhana and )
# data_base <- data_base[,1:7]
# data_base <- data_base[c(2,1,3),]
# #
# ## take out 2 last columns, change colors
# barplot(height = data_base, beside = TRUE, ylab= "NSE", col = c("#D40E92", "#4AB29D",'#000099'), ylim = c(0,0.8), cex.lab = 1.2)
# abline(h=0)
# legend("topright", legend= c("Set A", "Set B", "Set C"),col = c("#D40E92", "#4AB29D",'#000099'), pch = 15)
# title(xlab="Gauge number", line=1.4, cex.lab=1.2)
# #dev.off()
#
# ##compare low flows Set B and Set C
# lf_b <- AllZ2[AllZ2[3] == "Irri",]
# lf_c <- AllZ2[AllZ2[3] == "C",]
#
# d <- lf_c$val - lf_b$val
# d_per <- d/lf_c$val * 100
# d_per
### For Urucuia Subbasin
## RMSE dry season
rm(list = ls())
setwd("~/Workspace/RioSaoFrancisco/ResultsCalibration/Paper/NewExe/6000runs/10000runs")
UruIrri <- read.table("UruIrri/irri_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(UruIrri) <- c("obj_fun","val")
UruIrri <- UruIrri[grep("loflo", UruIrri[,1]),]
UruIrri$obj_fun <- as.factor(UruIrri$obj_fun)
UruIrri$Subgroup <- as.factor("Irri")
UruNoIrri <- read.table("test/SetA/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(UruNoIrri) <- c("obj_fun","val")
UruNoIrri <- UruNoIrri[grep("loflo", UruNoIrri[,1]),]
UruNoIrri$obj_fun <- as.factor(UruNoIrri$obj_fun)
UruNoIrri$Subgroup <- as.factor("NoIrri")
UruC <- read.table("UruIrri/noirri/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(UruC) <- c("obj_fun","val")
UruC <- UruC[grep("loflo", UruC[,1]),]
UruC$obj_fun <- as.factor(UruC$obj_fun)
UruC$Subgroup <- as.factor("C")
UruD <- read.table("test/SetD/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(UruD) <- c("obj_fun","val")
UruD <- UruD[grep("loflo", UruD[,1]),]
UruD$obj_fun <- as.factor(UruD$obj_fun)
UruD$Subgroup <- as.factor("D")
#####Refill dataframe AllUru with newly calculated RMSE dry season values
### simulation days ofdry seasons were derived visually/manually:
sub13 <- c(92:306,397:682,742:1036,1217:1401,1582:1783,1947:2131,2312:2596,2677:2861,3044:3226,
3407:3575,3759:3941,4122:4304,4487:4686,4856:5077,5142:5416)
sub15 <- c(92:306,464:640,822:1036,1202:1401,1582:1804,1926:2131,2322:2471,2677:2880,3042:3243,
3417:3591,3757:3956,4137:4303,4472:4686,4867:5074,5232:5411)
sub58 <- c(106:344,457:671,836:1036,1217:1431,1596:1796,1947:2131,2318:2486,2677:2889,3044:3237,
3417:3585,3727:3956,4137:4345,4487:4690,4867:5081,5232:5385)
sub73 <- c(122:306,457:671,836:1074,1217:1416,1613:1793,1947:2131,2312:2496,2677:2881,3042:3234,
3407:3584,3772:3956,4137:4335,4516:4686,4867:4990,5232:5416)
sub78 <- c(101:306,447:671,822:1036,1187:1431,1582:1796,1927:2131,2282:2496,2616:2891,3042:3235,
3407:3577,3772:3956,4121:4342,4502:4686,4867:5074,5222:5445)
sub90 <- c(106:306,457:771,836:1036,1217:1431,1596:1796,1931:2131,2312:2484,2661:2891,3042:3239,
3421:3580,3772:3956,4137:4342,4486:4686,4867:5081,5232:5410)
sub96 <- c(122:306,457:671,852:1036,1217:1431,1596:1796,1947:2137,2312:2496,2661:2891,3042:3241,
3421:3584,3761:3956,4137:4351,4502:4691,4867:5081,5246:5416)
obs <- read.delim("UruIrri/init_config/Input/Time_series/discharge_obs_24.txt", skip = 4, header = T, check.names = F)
obs$Date <- paste(obs$YYYY,obs$MM,obs$DD, sep = "-")
obs$Date <- as.POSIXct(obs$Date, format = c("%Y-%m-%d"))
obs <- obs[obs$Date < "2015-01-01",]
irri <- read.table("UruIrri/irri_best/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
setC <- read.table("UruIrri/noirri/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
noirri <- read.table("test/SetA/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
setD <- read.table("test/SetD/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
subs <- c('15')
for (i in 1:length(subs)){
UruIrri[grepl(subs[i],UruIrri[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-irri[get(paste0("sub",subs[i])),match(subs[i],colnames(irri))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
UruNoIrri[grepl(subs[i],UruNoIrri[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-noirri[get(paste0("sub",subs[i])),match(subs[i],colnames(noirri))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
UruC[grepl(subs[i],UruC[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-setC[get(paste0("sub",subs[i])),match(subs[i],colnames(setC))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
UruD[grepl(subs[i],UruD[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-setD[get(paste0("sub",subs[i])),match(subs[i],colnames(setD))])^2, na.rm=TRUE))
}
AllUru <- rbind(UruIrri,UruNoIrri, UruC, UruD)
data_base <- reshape(AllUru, # Modify data for Base R barplot
idvar = "Subgroup",
timevar = "obj_fun",
direction = "wide")
row.names(data_base) <- data_base$Subgroup
data_base <- as.matrix(data_base)
##take out the two negative subs (Carinhana and )
#data_base <- data_base[,1:7]
data_base <- data_base[c(2,1,3),]
percent <- ((data_base[1,] -data_base[2,])/data_base[1,]) * 100
## take out 2 last columns, change colors
barplot(height = data_base, beside = TRUE, col = c("#D40E92", "#4AB29D",'#000099') , ylim = c(0,500))
abline(h=0)
title(ylab=expression("RMSE"["L"]*" [m" ^3*"/s]"), line=2, cex.lab=1.2)
title(xlab="Subbasin", line=1.8, cex.lab=1.2)
legend("topright", legend= c("Set A", "Set B", "Set C"),col = c("#D40E92", "#4AB29D",'#000099'), pch = 15)
dev.off()
|
/Paper/Barplot_ObjectiveFunction.R
|
no_license
|
plvoit/RSF
|
R
| false
| false
| 14,986
|
r
|
####Gütemaß improvements
rm(list = ls())
setwd("~/Workspace/RioSaoFrancisco/ResultsCalibration/Paper/NewExe/6000runs")
png(file = "ObjectiveFunctions_irriNoirri.png", bg = "white", width = 1500 , height = 2480, res = 300)
par(mfrow = c(2, 1), mai = c(1,1,0.2,0.2), oma = c(2, 2, .5, .5), mgp = c(2, .6, 0))
# for barplot RMSE_monthly
Z2Irri <- read.table("Z2Irri/irri_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2Irri) <- c("obj_fun","val")
Z2Irri <- Z2Irri[grep("rmse_monthly", Z2Irri[,1]),]
Z2Irri$obj_fun <- as.factor(Z2Irri$obj_fun)
Z2Irri$Subgroup <- as.factor("Irri")
Z2NoIrri <- read.table("Z2NoIrriNew/SetA/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2NoIrri) <- c("obj_fun","val")
Z2NoIrri <- Z2NoIrri[grep("rmse_monthly", Z2NoIrri[,1]),]
Z2NoIrri$obj_fun <- as.factor(Z2NoIrri$obj_fun)
Z2NoIrri$Subgroup <- as.factor("NoIrri")
Z2C <- read.table("Z2Irri/noirri/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2C) <- c("obj_fun","val")
Z2C <- Z2C[grep("rmse_monthly", Z2C[,1]),]
Z2C$obj_fun <- as.factor(Z2C$obj_fun)
Z2C$Subgroup <- as.factor("C")
Z2D <- read.table("Z2NoIrriNew/SetD/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2D) <- c("obj_fun","val")
Z2D <- Z2D[grep("rmse_monthly", Z2D[,1]),]
Z2D$obj_fun <- as.factor(Z2D$obj_fun)
Z2D$Subgroup <- as.factor("D")
AllZ2 <- rbind(Z2Irri,Z2NoIrri)
data_base <- reshape(AllZ2, # Modify data for Base R barplot
idvar = "Subgroup",
timevar = "obj_fun",
direction = "wide")
row.names(data_base) <- data_base$Subgroup
data_base <- data_base[ , 2:ncol(data_base)]
data_base <- data_base[,-c(5,8)]
data_base <- data_base[,c(7,6,5,3,1,2,4)]
colnames(data_base) <- c("96", "58", "90", "78", "13", "73", "15")
data_base <- as.matrix(data_base)
##take out the two negative subs (Carinhana and )
#data_base <- data_base[,1:7]
data_base <- data_base[c(2,1),]
## take out 2 last columns, change colors
barplot(height = data_base, beside = TRUE, col = c("#4AB29D","#D40E92"), ylim = c(0,20000))
abline(h=0)
title(ylab=expression("RMSE"["m"]*" [m" ^3*"/s]"), line=2, cex.lab=1.2)
percent <- ((data_base[1,] -data_base[2,])/data_base[1,]) * 100
############################
## RMSE dry season
Z2Irri <- read.table("Z2Irri/irri_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2Irri) <- c("obj_fun","val")
Z2Irri <- Z2Irri[grep("dry", Z2Irri[,1]),]
Z2Irri$obj_fun <- as.factor(Z2Irri$obj_fun)
Z2Irri$Subgroup <- as.factor("Irri")
Z2NoIrri <- read.table("Z2NoIrriNew/SetA/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2NoIrri) <- c("obj_fun","val")
Z2NoIrri <- Z2NoIrri[grep("dry", Z2NoIrri[,1]),]
Z2NoIrri$obj_fun <- as.factor(Z2NoIrri$obj_fun)
Z2NoIrri$Subgroup <- as.factor("NoIrri")
Z2C <- read.table("Z2Irri/noirri/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2C) <- c("obj_fun","val")
Z2C <- Z2C[grep("dry", Z2C[,1]),]
Z2C$obj_fun <- as.factor(Z2C$obj_fun)
Z2C$Subgroup <- as.factor("C")
Z2D <- read.table("Z2NoIrriNew/SetD/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(Z2D) <- c("obj_fun","val")
Z2D <- Z2D[grep("dry", Z2D[,1]),]
Z2D$obj_fun <- as.factor(Z2D$obj_fun)
Z2D$Subgroup <- as.factor("D")
#####Refill dataframe AllZ2 with newly calculated RMSE dry season values
### simulation days ofdry seasons were derived visually/manually:
sub13 <- c(92:306,397:682,742:1036,1217:1401,1582:1783,1947:2131,2312:2596,2677:2861,3044:3226,
3407:3575,3759:3941,4122:4304,4487:4686,4856:5077,5142:5416)
sub15 <- c(92:306,464:640,822:1036,1202:1401,1582:1804,1926:2131,2322:2471,2677:2880,3042:3243,
3417:3591,3757:3956,4137:4303,4472:4686,4867:5074,5232:5411)
sub58 <- c(106:344,457:671,836:1036,1217:1431,1596:1796,1947:2131,2318:2486,2677:2889,3044:3237,
3417:3585,3727:3956,4137:4345,4487:4690,4867:5081,5232:5385)
sub73 <- c(122:306,457:671,836:1074,1217:1416,1613:1793,1947:2131,2312:2496,2677:2881,3042:3234,
3407:3584,3772:3956,4137:4335,4516:4686,4867:4990,5232:5416)
sub78 <- c(101:306,447:671,822:1036,1187:1431,1582:1796,1927:2131,2282:2496,2616:2891,3042:3235,
3407:3577,3772:3956,4121:4342,4502:4686,4867:5074,5222:5445)
sub90 <- c(106:306,457:771,836:1036,1217:1431,1596:1796,1931:2131,2312:2484,2661:2891,3042:3239,
3421:3580,3772:3956,4137:4342,4486:4686,4867:5081,5232:5410)
sub96 <- c(122:306,457:671,852:1036,1217:1431,1596:1796,1947:2137,2312:2496,2661:2891,3042:3241,
3421:3584,3761:3956,4137:4351,4502:4691,4867:5081,5246:5416)
obs <- read.delim("Z2Irri/init_config/Input/Time_series/discharge_obs_24.txt", skip = 4, header = T, check.names = F)
obs$Date <- paste(obs$YYYY,obs$MM,obs$DD, sep = "-")
obs$Date <- as.POSIXct(obs$Date, format = c("%Y-%m-%d"))
obs <- obs[obs$Date < "2015-01-01",]
irri <- read.table("Z2Irri/irri_best/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
setC <- read.table("Z2Irri/noirri/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
noirri <- read.table("Z2NoIrriNew/SetA/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
setD<- read.table("Z2NoIrriNew/SetD/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
subs <- c('13','73','78','15','90','58','96')
for (i in 1:length(subs)){
Z2Irri[grepl(subs[i],Z2Irri[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-irri[get(paste0("sub",subs[i])),match(subs[i],colnames(irri))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
Z2NoIrri[grepl(subs[i],Z2NoIrri[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-noirri[get(paste0("sub",subs[i])),match(subs[i],colnames(noirri))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
Z2C[grepl(subs[i],Z2C[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-setC[get(paste0("sub",subs[i])),match(subs[i],colnames(setC))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
Z2D[grepl(subs[i],Z2D[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-setD[get(paste0("sub",subs[i])),match(subs[i],colnames(setD))])^2, na.rm=TRUE))
}
AllZ2 <- rbind(Z2Irri,Z2NoIrri)
data_base <- reshape(AllZ2, # Modify data for Base R barplot
idvar = "Subgroup",
timevar = "obj_fun",
direction = "wide")
row.names(data_base) <- data_base$Subgroup
data_base <- data_base[ , 2:ncol(data_base)]
data_base <- data_base[,-c(5,8)]
data_base <- data_base[,c(7,6,5,3,1,2,4)]
colnames(data_base) <- c("96", "58", "90", "78", "13", "73", "15")
data_base <- as.matrix(data_base)
##take out the two negative subs (Carinhana and )
#data_base <- data_base[,1:7]
data_base <- data_base[c(2,1),]
percent <- ((data_base[1,] -data_base[2,])/data_base[1,]) * 100
## take out 2 last columns, change colors
barplot(height = data_base, beside = TRUE, col = c("#4AB29D","#D40E92") , ylim = c(0,500))
abline(h=0)
title(ylab=expression("RMSE"["L"]*" [m" ^3*"/s]"), line=2, cex.lab=1.2)
title(xlab="Subbasin", line=1.8, cex.lab=1.2)
legend("topright", legend= c("Set A", "Set B"),col = c("#4AB29D","#D40E92"), pch = 15)
dev.off()
# ###NSE
#
# Z2Irri <- read.table("Z2Irri/irri_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
# names(Z2Irri) <- c("obj_fun","val")
# Z2Irri <- Z2Irri[grep("ns_co", Z2Irri[,1]),]
# Z2Irri$obj_fun <- as.factor(Z2Irri$obj_fun)
# Z2Irri$Subgroup <- as.factor("Irri")
#
# Z2NoIrri <- read.table("Z2NoIrriNew/thread1_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
# names(Z2NoIrri) <- c("obj_fun","val")
# Z2NoIrri <- Z2NoIrri[grep("ns_co", Z2NoIrri[,1]),]
# Z2NoIrri$obj_fun <- as.factor(Z2NoIrri$obj_fun)
# Z2NoIrri$Subgroup <- as.factor("NoIrri")
#
# Z2C <- read.table("Z2Irri/noirri/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
# names(Z2C) <- c("obj_fun","val")
# Z2C <- Z2C[grep("ns_co", Z2C[,1]),]
# Z2C$obj_fun <- as.factor(Z2C$obj_fun)
# Z2C$Subgroup <- as.factor("C")
#
# #
# AllZ2 <- rbind(Z2Irri,Z2NoIrri, Z2C)
#
# # # for barplot NSE
# data_base <- reshape(AllZ2, # Modify data for Base R barplot
# idvar = "Subgroup",
# timevar = "obj_fun",
# direction = "wide")
# row.names(data_base) <- data_base$Subgroup
# data_base <- data_base[ , 2:ncol(data_base)]
# data_base <- data_base[,c(9,7,6,3,1,2,4,5,8)]
# colnames(data_base) <- c("1", "2", "3", "4", "11", "12", "13", "14", "15")
# data_base <- as.matrix(data_base)
#
#
# # ##take out the two negative subs (Carinhana and )
# data_base <- data_base[,1:7]
# data_base <- data_base[c(2,1,3),]
# #
# ## take out 2 last columns, change colors
# barplot(height = data_base, beside = TRUE, ylab= "NSE", col = c("#D40E92", "#4AB29D",'#000099'), ylim = c(0,0.8), cex.lab = 1.2)
# abline(h=0)
# legend("topright", legend= c("Set A", "Set B", "Set C"),col = c("#D40E92", "#4AB29D",'#000099'), pch = 15)
# title(xlab="Gauge number", line=1.4, cex.lab=1.2)
# #dev.off()
#
# ##compare low flows Set B and Set C
# lf_b <- AllZ2[AllZ2[3] == "Irri",]
# lf_c <- AllZ2[AllZ2[3] == "C",]
#
# d <- lf_c$val - lf_b$val
# d_per <- d/lf_c$val * 100
# d_per
### For Urucuia Subbasin
## RMSE dry season
rm(list = ls())
setwd("~/Workspace/RioSaoFrancisco/ResultsCalibration/Paper/NewExe/6000runs/10000runs")
UruIrri <- read.table("UruIrri/irri_best/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(UruIrri) <- c("obj_fun","val")
UruIrri <- UruIrri[grep("loflo", UruIrri[,1]),]
UruIrri$obj_fun <- as.factor(UruIrri$obj_fun)
UruIrri$Subgroup <- as.factor("Irri")
UruNoIrri <- read.table("test/SetA/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(UruNoIrri) <- c("obj_fun","val")
UruNoIrri <- UruNoIrri[grep("loflo", UruNoIrri[,1]),]
UruNoIrri$obj_fun <- as.factor(UruNoIrri$obj_fun)
UruNoIrri$Subgroup <- as.factor("NoIrri")
UruC <- read.table("UruIrri/noirri/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(UruC) <- c("obj_fun","val")
UruC <- UruC[grep("loflo", UruC[,1]),]
UruC$obj_fun <- as.factor(UruC$obj_fun)
UruC$Subgroup <- as.factor("C")
UruD <- read.table("test/SetD/curr_obj_fun_val_day.txt", quote="\"", comment.char="", skip = 3, stringsAsFactors = F)
names(UruD) <- c("obj_fun","val")
UruD <- UruD[grep("loflo", UruD[,1]),]
UruD$obj_fun <- as.factor(UruD$obj_fun)
UruD$Subgroup <- as.factor("D")
#####Refill dataframe AllUru with newly calculated RMSE dry season values
### simulation days ofdry seasons were derived visually/manually:
sub13 <- c(92:306,397:682,742:1036,1217:1401,1582:1783,1947:2131,2312:2596,2677:2861,3044:3226,
3407:3575,3759:3941,4122:4304,4487:4686,4856:5077,5142:5416)
sub15 <- c(92:306,464:640,822:1036,1202:1401,1582:1804,1926:2131,2322:2471,2677:2880,3042:3243,
3417:3591,3757:3956,4137:4303,4472:4686,4867:5074,5232:5411)
sub58 <- c(106:344,457:671,836:1036,1217:1431,1596:1796,1947:2131,2318:2486,2677:2889,3044:3237,
3417:3585,3727:3956,4137:4345,4487:4690,4867:5081,5232:5385)
sub73 <- c(122:306,457:671,836:1074,1217:1416,1613:1793,1947:2131,2312:2496,2677:2881,3042:3234,
3407:3584,3772:3956,4137:4335,4516:4686,4867:4990,5232:5416)
sub78 <- c(101:306,447:671,822:1036,1187:1431,1582:1796,1927:2131,2282:2496,2616:2891,3042:3235,
3407:3577,3772:3956,4121:4342,4502:4686,4867:5074,5222:5445)
sub90 <- c(106:306,457:771,836:1036,1217:1431,1596:1796,1931:2131,2312:2484,2661:2891,3042:3239,
3421:3580,3772:3956,4137:4342,4486:4686,4867:5081,5232:5410)
sub96 <- c(122:306,457:671,852:1036,1217:1431,1596:1796,1947:2137,2312:2496,2661:2891,3042:3241,
3421:3584,3761:3956,4137:4351,4502:4691,4867:5081,5246:5416)
obs <- read.delim("UruIrri/init_config/Input/Time_series/discharge_obs_24.txt", skip = 4, header = T, check.names = F)
obs$Date <- paste(obs$YYYY,obs$MM,obs$DD, sep = "-")
obs$Date <- as.POSIXct(obs$Date, format = c("%Y-%m-%d"))
obs <- obs[obs$Date < "2015-01-01",]
irri <- read.table("UruIrri/irri_best/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
setC <- read.table("UruIrri/noirri/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
noirri <- read.table("test/SetA/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
setD <- read.table("test/SetD/Output/River_Flow.out", quote="\"", comment.char="",skip = 1, header = T, check.names = F)
subs <- c('15')
for (i in 1:length(subs)){
UruIrri[grepl(subs[i],UruIrri[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-irri[get(paste0("sub",subs[i])),match(subs[i],colnames(irri))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
UruNoIrri[grepl(subs[i],UruNoIrri[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-noirri[get(paste0("sub",subs[i])),match(subs[i],colnames(noirri))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
UruC[grepl(subs[i],UruC[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-setC[get(paste0("sub",subs[i])),match(subs[i],colnames(setC))])^2, na.rm=TRUE))
}
for (i in 1:length(subs)){
UruD[grepl(subs[i],UruD[,1]),2] <- sqrt(mean((obs[get(paste0("sub",subs[i])),match(subs[i],colnames(obs))]-setD[get(paste0("sub",subs[i])),match(subs[i],colnames(setD))])^2, na.rm=TRUE))
}
AllUru <- rbind(UruIrri,UruNoIrri, UruC, UruD)
data_base <- reshape(AllUru, # Modify data for Base R barplot
idvar = "Subgroup",
timevar = "obj_fun",
direction = "wide")
row.names(data_base) <- data_base$Subgroup
data_base <- as.matrix(data_base)
##take out the two negative subs (Carinhana and )
#data_base <- data_base[,1:7]
data_base <- data_base[c(2,1,3),]
percent <- ((data_base[1,] -data_base[2,])/data_base[1,]) * 100
## take out 2 last columns, change colors
barplot(height = data_base, beside = TRUE, col = c("#D40E92", "#4AB29D",'#000099') , ylim = c(0,500))
abline(h=0)
title(ylab=expression("RMSE"["L"]*" [m" ^3*"/s]"), line=2, cex.lab=1.2)
title(xlab="Subbasin", line=1.8, cex.lab=1.2)
legend("topright", legend= c("Set A", "Set B", "Set C"),col = c("#D40E92", "#4AB29D",'#000099'), pch = 15)
dev.off()
|
#' Harmonize a set of education variables all at once
#' @param data.frame
#' @value data.frame
#' @export
build_geography_municipality2010standard_1970 <- function(CensusData,
state_var_name = "uf"){
if(!is.data.frame(CensusData)){
stop("'CensusData' is not a data.frame")
}
if(!(is.character(state_var_name) & (length(state_var_name)==1) )){
stop("'state_var_name' must be a single-valued character vector")
}
if(!is.data.table(CensusData)){
CensusData = as.data.table(CensusData)
}
municipalityCurrent_just_created = F
check_vars <- check_var_existence(CensusData, c("municipalityCurrent"))
if(length(check_vars) > 0){
CensusData <- build_geography_municipalityCurrent_1970(CensusData,
state_var_name = state_var_name)
municipalityCurrent_just_created = T
gc()
}
data(crosswalk_munic_1970_to_2010)
crosswalk_munic_1970_to_2010 = data.table(crosswalk_munic_1970_to_2010 %>%
select(municipality1970standard,
municipality2010standard)) %>%
rename(municipalityCurrent = municipality1970standard)
CensusData = data.table:::merge.data.table(x = CensusData,
y = crosswalk_munic_1970_to_2010,
by = "municipalityCurrent",
all.x = T,
all.y = F,
sort = F)
gc();Sys.sleep(1);gc()
n_digit <- nchar(min(CensusData[ , municipality2010standard]))
if(n_digit == 7){
CensusData[ , municipality2010standard := trunc(municipality2010standard/10)]
}
if(municipalityCurrent_just_created == T){
CensusData[ , municipalityCurrent := NULL]
}
gc()
CensusData
}
|
/R/build_geography_municipality2010standard_1970.R
|
no_license
|
claudiacerqn/harmonizeIBGE
|
R
| false
| false
| 2,315
|
r
|
#' Harmonize a set of education variables all at once
#' @param data.frame
#' @value data.frame
#' @export
build_geography_municipality2010standard_1970 <- function(CensusData,
state_var_name = "uf"){
if(!is.data.frame(CensusData)){
stop("'CensusData' is not a data.frame")
}
if(!(is.character(state_var_name) & (length(state_var_name)==1) )){
stop("'state_var_name' must be a single-valued character vector")
}
if(!is.data.table(CensusData)){
CensusData = as.data.table(CensusData)
}
municipalityCurrent_just_created = F
check_vars <- check_var_existence(CensusData, c("municipalityCurrent"))
if(length(check_vars) > 0){
CensusData <- build_geography_municipalityCurrent_1970(CensusData,
state_var_name = state_var_name)
municipalityCurrent_just_created = T
gc()
}
data(crosswalk_munic_1970_to_2010)
crosswalk_munic_1970_to_2010 = data.table(crosswalk_munic_1970_to_2010 %>%
select(municipality1970standard,
municipality2010standard)) %>%
rename(municipalityCurrent = municipality1970standard)
CensusData = data.table:::merge.data.table(x = CensusData,
y = crosswalk_munic_1970_to_2010,
by = "municipalityCurrent",
all.x = T,
all.y = F,
sort = F)
gc();Sys.sleep(1);gc()
n_digit <- nchar(min(CensusData[ , municipality2010standard]))
if(n_digit == 7){
CensusData[ , municipality2010standard := trunc(municipality2010standard/10)]
}
if(municipalityCurrent_just_created == T){
CensusData[ , municipalityCurrent := NULL]
}
gc()
CensusData
}
|
###11.0.0.4 Excercise ###
X <- read.csv('ChlorellaGrowth.csv',comment.char='#')
# using '#' as a comment character allows us the CSV file to be self-documenting
par(cex=1.5,cex.main=0.9,mfcol=c(2,1))
plot(rmax~light,data=X,xlab="light intensity (uE/m2/s)",ylab="maximum growth rate (1/d)",pch=16)
title(main="Data from Fussmann et al. (2000) system")
fit <- lm(rmax~light)
summary(fit); abline(fit)
# xlab and ylab are x and y axis labels, pch is "plotting character"
# cex is 'character expansion' - cex=1.5 increases symbol & label sizes by 50%
# cex.main sets the character expansion for the main title of the plot
plot(log(rmax)~log(light),data=X,xlab="log light intensity (uE/m2/s)",ylab="log maximum growth rate (1/d)",pch=16)
title(main="Data from Fussmann et al. (2000) system")
fit <- lm(log(rmax)~log(light))
summary(fit); abline(fit)
par(cex=1.5,cex.main=0.9,mfcol=c(1,2))
plot(rmax~light,data=X,xlab="light intensity (uE/m2/s)",ylab="maximum growth rate (1/d)",pch=16)
title(main="Data from Fussmann et al. (2000) system")
fit <- lm(rmax~light)
summary(fit); abline(fit)
# xlab and ylab are x and y axis labels, pch is "plotting character"
# cex is 'character expansion' - cex=1.5 increases symbol & label sizes by 50%
# cex.main sets the character expansion for the main title of the plot
plot(log(rmax)~log(light),data=X,xlab="log light intensity (uE/m2/s)",ylab="log maximum growth rate (1/d)",pch=16)
title(main="Data from Fussmann et al. (2000) system")
fit <- lm(log(rmax)~log(light))
summary(fit); abline(fit)
# Next we get the regression equation to 'display itself' on the graph
c1 <- round(fit$coef[1],digits=3) # intercept
c2 <- round(fit$coef[2],digits=3) # slope
text(50,3,paste("rmax=",c1,"+",c2,"light"))
# You can use ?round, ?text and ?paste to read about these commands
# for working with plots
|
/11.0.0.4.R
|
no_license
|
ediamant/BootcampAssignments
|
R
| false
| false
| 1,846
|
r
|
###11.0.0.4 Excercise ###
X <- read.csv('ChlorellaGrowth.csv',comment.char='#')
# using '#' as a comment character allows us the CSV file to be self-documenting
par(cex=1.5,cex.main=0.9,mfcol=c(2,1))
plot(rmax~light,data=X,xlab="light intensity (uE/m2/s)",ylab="maximum growth rate (1/d)",pch=16)
title(main="Data from Fussmann et al. (2000) system")
fit <- lm(rmax~light)
summary(fit); abline(fit)
# xlab and ylab are x and y axis labels, pch is "plotting character"
# cex is 'character expansion' - cex=1.5 increases symbol & label sizes by 50%
# cex.main sets the character expansion for the main title of the plot
plot(log(rmax)~log(light),data=X,xlab="log light intensity (uE/m2/s)",ylab="log maximum growth rate (1/d)",pch=16)
title(main="Data from Fussmann et al. (2000) system")
fit <- lm(log(rmax)~log(light))
summary(fit); abline(fit)
par(cex=1.5,cex.main=0.9,mfcol=c(1,2))
plot(rmax~light,data=X,xlab="light intensity (uE/m2/s)",ylab="maximum growth rate (1/d)",pch=16)
title(main="Data from Fussmann et al. (2000) system")
fit <- lm(rmax~light)
summary(fit); abline(fit)
# xlab and ylab are x and y axis labels, pch is "plotting character"
# cex is 'character expansion' - cex=1.5 increases symbol & label sizes by 50%
# cex.main sets the character expansion for the main title of the plot
plot(log(rmax)~log(light),data=X,xlab="log light intensity (uE/m2/s)",ylab="log maximum growth rate (1/d)",pch=16)
title(main="Data from Fussmann et al. (2000) system")
fit <- lm(log(rmax)~log(light))
summary(fit); abline(fit)
# Next we get the regression equation to 'display itself' on the graph
c1 <- round(fit$coef[1],digits=3) # intercept
c2 <- round(fit$coef[2],digits=3) # slope
text(50,3,paste("rmax=",c1,"+",c2,"light"))
# You can use ?round, ?text and ?paste to read about these commands
# for working with plots
|
# install.packages(c("ggvis","dplyr"))
library(dplyr)
library(ggvis)
mtcars %>%
ggvis(~mpg, ~wt,
fill = input_radiobuttons(label = "Choose fill variable:",
choices = c("vs","carb"), map = as.name)) %>%
layer_points() %>%
layer_smooths()
mtcars %>%
ggvis(~mpg) %>%
layer_histograms(width = input_numeric(2))
mtcars %>%
ggvis(~mpg) %>%
layer_histograms(width = input_slider(1,20, label="Choose a binwidth:"))
# Map the binwidth to a slider bar ("Choose a binwidth:") with the correct specifications
mtcars %>%
ggvis(~mpg, ~vs, strokeOpacity := 0.5, strokeWidth := 5, stroke := "skyblue") %>%
layer_model_predictions(model="glm") %>%
layer_points(fill = ~carb,
shape := "triangle-up", size := 100)
pressure %>%
ggvis(~temperature, ~pressure, stroke := "darkred") %>%
layer_lines(stroke := "orange", strokeDash := 5, strokeWidth := 5) %>%
layer_points(shape := "circle", size := 100, fill := "lightgreen") %>%
layer_smooths()
faithful %>%
ggvis(~waiting, ~eruptions, opacity := 0.6,
fill = ~factor(round(eruptions))) %>%
add_legend("fill", title = "~ duration (m)", orient = "left") %>%
layer_points()
faithful %>%
ggvis(~waiting, ~eruptions, opacity := 0.6,
fill = ~factor(round(eruptions)), shape = ~factor(round(eruptions)),
size = ~round(eruptions)) %>%
add_legend(c("fill", "shape", "size"), title = "~duration (m)", value = c(2,3,4,5)) %>%
layer_points()
iris %>%
group_by(Species) %>%
ggvis(~Sepal.Width, ~Sepal.Length, fill = ~Species, stroke = ~Species) %>%
layer_points() %>%
layer_smooths()
mtcars %>% ggvis(x = ~wt, y = ~mpg, fill = ~factor(cyl), opacity = ~hp) %>%
layer_points() %>%
scale_numeric("opacity", range = c(0.2,2))
|
/Presentation/ggvis.R
|
no_license
|
cimentadaj/Rseminars
|
R
| false
| false
| 1,823
|
r
|
# install.packages(c("ggvis","dplyr"))
library(dplyr)
library(ggvis)
mtcars %>%
ggvis(~mpg, ~wt,
fill = input_radiobuttons(label = "Choose fill variable:",
choices = c("vs","carb"), map = as.name)) %>%
layer_points() %>%
layer_smooths()
mtcars %>%
ggvis(~mpg) %>%
layer_histograms(width = input_numeric(2))
mtcars %>%
ggvis(~mpg) %>%
layer_histograms(width = input_slider(1,20, label="Choose a binwidth:"))
# Map the binwidth to a slider bar ("Choose a binwidth:") with the correct specifications
mtcars %>%
ggvis(~mpg, ~vs, strokeOpacity := 0.5, strokeWidth := 5, stroke := "skyblue") %>%
layer_model_predictions(model="glm") %>%
layer_points(fill = ~carb,
shape := "triangle-up", size := 100)
pressure %>%
ggvis(~temperature, ~pressure, stroke := "darkred") %>%
layer_lines(stroke := "orange", strokeDash := 5, strokeWidth := 5) %>%
layer_points(shape := "circle", size := 100, fill := "lightgreen") %>%
layer_smooths()
faithful %>%
ggvis(~waiting, ~eruptions, opacity := 0.6,
fill = ~factor(round(eruptions))) %>%
add_legend("fill", title = "~ duration (m)", orient = "left") %>%
layer_points()
faithful %>%
ggvis(~waiting, ~eruptions, opacity := 0.6,
fill = ~factor(round(eruptions)), shape = ~factor(round(eruptions)),
size = ~round(eruptions)) %>%
add_legend(c("fill", "shape", "size"), title = "~duration (m)", value = c(2,3,4,5)) %>%
layer_points()
iris %>%
group_by(Species) %>%
ggvis(~Sepal.Width, ~Sepal.Length, fill = ~Species, stroke = ~Species) %>%
layer_points() %>%
layer_smooths()
mtcars %>% ggvis(x = ~wt, y = ~mpg, fill = ~factor(cyl), opacity = ~hp) %>%
layer_points() %>%
scale_numeric("opacity", range = c(0.2,2))
|
#Run everything in the main .Rmd to set up the dataframes for these models.
#Note: modelling takes a while.
library(WARN)
kellis <- subset(egyptCN, siteName == "Kellis 1" | siteName == "Kellis 2" & sampleType == "Bone") %>%
filter(!is.na(d15N))
kellis2 <- subset(kellis, select = c(refIndividualInPubli, humanSex, humanAgeEstimAvg, d13C, d15N, humanAgeClass))
nonadult <- subset(kellis2, humanAgeEstimAvg <= 10)
#nonadult$humanAgeEstimAvg <- as.numeric(as.character(nonadult$humanAgeEstimAvg))
adult <- subset(kellis2, humanAgeClass == "Adult")
female <- subset(adult, adult$humanSex == "Female")
female.mean = mean(female$d15N)
warn.Kellis <- warn(
age = nonadult$humanAgeEstimAvg,
d15N = nonadult$d15N,
female.mean = mean(female$d15N),
female.sd = sd(female$d15N))
## Indicate summary.
summary(warn.Kellis)
#simple plot
plot(warn.Kellis,
# hline.adult = F,
# adult.mean = mean(adult$N),
# adult.sd = sd(adult$N),
is.female = T,
ylim = c(15, 24),
xlab = "Age (years)",
ylab = expression(paste(delta^15, "N"[collagen], " (\u2030, AIR)")),
main = "Kellis")
Kellis.CI <- warnCI(warn.Kellis, 0.95)
plot(Kellis.CI, "age",
xlab = expression("t"[1]~"(years)"),
ylab = expression("t"[2]~"(years)"),
main = "Tell Barri")
Kellis.CI
|
/warn.R
|
no_license
|
stantis/IsoArchEAA
|
R
| false
| false
| 1,309
|
r
|
#Run everything in the main .Rmd to set up the dataframes for these models.
#Note: modelling takes a while.
library(WARN)
kellis <- subset(egyptCN, siteName == "Kellis 1" | siteName == "Kellis 2" & sampleType == "Bone") %>%
filter(!is.na(d15N))
kellis2 <- subset(kellis, select = c(refIndividualInPubli, humanSex, humanAgeEstimAvg, d13C, d15N, humanAgeClass))
nonadult <- subset(kellis2, humanAgeEstimAvg <= 10)
#nonadult$humanAgeEstimAvg <- as.numeric(as.character(nonadult$humanAgeEstimAvg))
adult <- subset(kellis2, humanAgeClass == "Adult")
female <- subset(adult, adult$humanSex == "Female")
female.mean = mean(female$d15N)
warn.Kellis <- warn(
age = nonadult$humanAgeEstimAvg,
d15N = nonadult$d15N,
female.mean = mean(female$d15N),
female.sd = sd(female$d15N))
## Indicate summary.
summary(warn.Kellis)
#simple plot
plot(warn.Kellis,
# hline.adult = F,
# adult.mean = mean(adult$N),
# adult.sd = sd(adult$N),
is.female = T,
ylim = c(15, 24),
xlab = "Age (years)",
ylab = expression(paste(delta^15, "N"[collagen], " (\u2030, AIR)")),
main = "Kellis")
Kellis.CI <- warnCI(warn.Kellis, 0.95)
plot(Kellis.CI, "age",
xlab = expression("t"[1]~"(years)"),
ylab = expression("t"[2]~"(years)"),
main = "Tell Barri")
Kellis.CI
|
# article: Gromov–Wasserstein Distances and the
# Metric Approach to Object Matching
# author : Facundo Mémoli
# implementation of FLB (lk 467)
# constraints to mu (lk 466)
mu_constraints <- function(mu_X,mu_Y)
{
mu <- matrix(NA, nrow = length(mu_X),
ncol = length(mu_Y), byrow = T)
mu_pos <- matrix(c(1:length(mu)), byrow = T,
nrow = length(mu_X), ncol = length(mu_Y))
c_mat <- matrix(0,nrow = length(mu_X) + length(mu_Y),
ncol = length(mu))
for(i in 1:(length(mu_X) + length(mu_Y)))
{
if(i <= length(mu_X))
{
c_mat[i,c(mu_pos[i,])] <- 1
}
else
{
c_mat[i,c(mu_pos[,i-length(mu_X)])] <- 1
}
}
# returns matrix with n_X + n_Y rows (nr of linear constraints) and
# n_X * n_Y columns (nr of mu_ij's)
# each row satifies left side of one linear constraint
# right side = mu_X(1), mu_X(2), ..., mu_Y(1), mu_Y(2), ...
# each column marks one of mu_ij (mu_11,mu_12, mu_13, ..., mu_21, ... )
return(c_mat)
}
|
/R/mu_constraints.R
|
no_license
|
rendrikson/gwDist
|
R
| false
| false
| 1,172
|
r
|
# article: Gromov–Wasserstein Distances and the
# Metric Approach to Object Matching
# author : Facundo Mémoli
# implementation of FLB (lk 467)
# constraints to mu (lk 466)
mu_constraints <- function(mu_X,mu_Y)
{
mu <- matrix(NA, nrow = length(mu_X),
ncol = length(mu_Y), byrow = T)
mu_pos <- matrix(c(1:length(mu)), byrow = T,
nrow = length(mu_X), ncol = length(mu_Y))
c_mat <- matrix(0,nrow = length(mu_X) + length(mu_Y),
ncol = length(mu))
for(i in 1:(length(mu_X) + length(mu_Y)))
{
if(i <= length(mu_X))
{
c_mat[i,c(mu_pos[i,])] <- 1
}
else
{
c_mat[i,c(mu_pos[,i-length(mu_X)])] <- 1
}
}
# returns matrix with n_X + n_Y rows (nr of linear constraints) and
# n_X * n_Y columns (nr of mu_ij's)
# each row satifies left side of one linear constraint
# right side = mu_X(1), mu_X(2), ..., mu_Y(1), mu_Y(2), ...
# each column marks one of mu_ij (mu_11,mu_12, mu_13, ..., mu_21, ... )
return(c_mat)
}
|
# rekreirati SPSS-ov FREQUENCIES
frre <- function(x,...){
UseMethod("frre")
}
# labelled ----------------------------------------------------------------
frre.labelled <- function (x, lab.duljina = 40, ime="",
N = TRUE,
levels = "prefixed", sort_levels = "auto", drop = TRUE,
kablica = TRUE, digits = 2, ...) {
varlab <- attributes(x)[["label"]]
if (nchar(ime) > 0 ) {
varlab <- ime
if (lab.duljina == 40) lab.duljina <- 200
}
if (identical(varlab, attributes(x)[["labels"]])) {
stop("vaR lab i vaL lab su isti - vjerojatno nepostojeći")
}
lejbld <- labelled::to_factor(x, levels = levels,
sort_levels = sort_levels,
drop_unused_labels = drop)
levels(lejbld) <- strtrim(levels(lejbld), lab.duljina - 3)
if (drop && !identical(
levels(to_factor(x)),
levels(to_factor(x, drop_unused_labels = TRUE))
)) {
no_of_dropped <-
length(levels(to_factor(x))) - length(levels(to_factor(x, drop_unused_labels = TRUE)))
warning(paste(no_of_dropped, "level(s) dropped"), call. = FALSE)
}
gnjec.df <- merge.data.frame(as.data.frame(table(lejbld)),
as.data.frame(prop.table(table(lejbld))),
by = "lejbld", sort = FALSE)
if (!is.null(varlab)) {
names(gnjec.df)[1] <- strtrim(varlab, lab.duljina)
} else names(gnjec.df)[1] <- deparse(substitute(x))
names(gnjec.df)[2] <- "Counts"
names(gnjec.df)[3] <- "Percents"
if (N) {
cat("valid =", sum(!is.na(x)),
" missing =", sum(is.na(x)))
}
if (kablica) {
knitr::kable(gnjec.df, digits = digits, ...)
} else {
gnjec.df$Percents <- round(gnjec.df$Percents * 100)
gnjec.df
}
}
# LABELLED primjeri -------------------------------------------------------
# s1 <- labelled(c("M", "M", "F"), c(Male = "M", Female = "F"))
# s2 <- labelled(c(1, 1, 2), c(Male = 1, Female = 2))
# var_label(s2) <- "A vector to label. Must be either numeric or character."
#
# frre(s2)
# frre(s2, lab.duljina = 60) # duže ime
# frre(s1, # custom ime ostaje dugo
# ime = "when coercing a labelled character vector to a factor")
# frre(s1, lab.duljina = 10, # ...ostaje dugo ako se ne specifira non-default
# ime = "when coercing a labelled character vector to a factor")
# frre(s2, N = FALSE)
# frre(s1, N = FALSE, kablica = FALSE) # ružno, ali ima svojih čari
# frre(s1, digits = 3) # digits : proslijeđen u kable, def = 2
# frre(s2, # argument proslijeđen u kable /// ne printa se u konzoli
# caption = "naslov koji se ne vidi, ali je valjda tu")
# factor ------------------------------------------------------------------
frre.factor <- function (x, lab.duljina = 40, ime="",
N = TRUE, drop = TRUE, kablica = TRUE, digits = 2, ...) {
varlab <- attributes(x)[["label"]]
if (nchar(ime) > 0) {
varlab <- ime
if (lab.duljina == 40) lab.duljina <- 200
}
if (drop && !identical(levels(x), levels(droplevels(x)))) {
no_of_dropped <-
length(levels(x)) - length(levels(droplevels(x)))
warning(paste(no_of_dropped, "level(s) dropped"), call. = FALSE)
}
nejm <- deparse(substitute(x))
if (drop) x <- droplevels(x)
levels(x) <- strtrim(levels(x), lab.duljina - 3)
gnjec.df <- merge.data.frame(as.data.frame(table(x)),
as.data.frame(prop.table(table(x))),
by = "x", sort = FALSE)
if (!is.null(varlab)) {
names(gnjec.df)[1] <- strtrim(varlab, lab.duljina)
} else {
names(gnjec.df)[1] <- nejm
}
names(gnjec.df)[2] <- "Counts"
names(gnjec.df)[3] <- "Percents"
if (N) {
cat("valid =", sum(!is.na(x)),
" missing =", sum(is.na(x)))
}
if (kablica) {
knitr::kable(gnjec.df, digits = digits, ...)
} else {
gnjec.df$Percents <- round(gnjec.df$Percents * 100)
gnjec.df
}
}
# FACTOR primjeri ---------------------------------------------------------
# s1 <- labelled(c("M", "M", "F"), c(Male = "M", Female = "F"))
# s1f <- to_factor(s1)
# var_label(s1f) <- "neko izrazito dugačko i nespretno ime velike dužine"
# frre(s1f)
#
# sx <- to_factor(scopes.2015$p38)
# frre(sx, drop = TRUE)
#
# frre(diamonds$cut, ime = "Cut of the diamonds", N = FALSE)
# ====
# numeric DEPRECATED ------------------------------------------------------
# frre.numeric <- function (x, varLabDuljina = 40, ime="",
# prosjekN = FALSE, kablica = TRUE, ...) {
# varlab <- attributes(x)[["label"]]
# if (nchar(ime) > 0) {
# varlab <- ime
# if (varLabDuljina == 40) varLabDuljina <- 200
# }
# nejm <- deparse(substitute(x))
# gnjec.df <- merge.data.frame(as.data.frame(table(x)),
# as.data.frame(prop.table(table(x))),
# by = "x", sort = FALSE)
# if (!is.null(varlab)) {
# names(gnjec.df)[1] <- strtrim(varlab, varLabDuljina)
# } else names(gnjec.df)[1] <- nejm
# names(gnjec.df)[2] <- "Counts"
# names(gnjec.df)[3] <- "Percents"
#
# if (prosjekN) {
# cat("valid =", sum(!is.na(x)),
# " missing =", sum(is.na(x)),
# "mean =", round(mean(x, na.rm = TRUE), digits = 2))
# }
# if (kablica) {
# knitr::kable(gnjec.df, digits = 2, ...)
# } else {
# gnjec.df$Percents <- round(gnjec.df$Percents * 100)
# gnjec.df
# }
# }
# PRIMJERI # numeric ----
# frre(mtcars$cyl, prosjek = TRUE)
# frre(diamonds$carat, prosjek = TRUE) # bzvz
# hist(diamonds$carat); summary(diamonds$carat) # klasika je bolja
# ====
|
/FUN_frre.R
|
no_license
|
ipuzek/R_funkcije
|
R
| false
| false
| 5,743
|
r
|
# rekreirati SPSS-ov FREQUENCIES
frre <- function(x,...){
UseMethod("frre")
}
# labelled ----------------------------------------------------------------
frre.labelled <- function (x, lab.duljina = 40, ime="",
N = TRUE,
levels = "prefixed", sort_levels = "auto", drop = TRUE,
kablica = TRUE, digits = 2, ...) {
varlab <- attributes(x)[["label"]]
if (nchar(ime) > 0 ) {
varlab <- ime
if (lab.duljina == 40) lab.duljina <- 200
}
if (identical(varlab, attributes(x)[["labels"]])) {
stop("vaR lab i vaL lab su isti - vjerojatno nepostojeći")
}
lejbld <- labelled::to_factor(x, levels = levels,
sort_levels = sort_levels,
drop_unused_labels = drop)
levels(lejbld) <- strtrim(levels(lejbld), lab.duljina - 3)
if (drop && !identical(
levels(to_factor(x)),
levels(to_factor(x, drop_unused_labels = TRUE))
)) {
no_of_dropped <-
length(levels(to_factor(x))) - length(levels(to_factor(x, drop_unused_labels = TRUE)))
warning(paste(no_of_dropped, "level(s) dropped"), call. = FALSE)
}
gnjec.df <- merge.data.frame(as.data.frame(table(lejbld)),
as.data.frame(prop.table(table(lejbld))),
by = "lejbld", sort = FALSE)
if (!is.null(varlab)) {
names(gnjec.df)[1] <- strtrim(varlab, lab.duljina)
} else names(gnjec.df)[1] <- deparse(substitute(x))
names(gnjec.df)[2] <- "Counts"
names(gnjec.df)[3] <- "Percents"
if (N) {
cat("valid =", sum(!is.na(x)),
" missing =", sum(is.na(x)))
}
if (kablica) {
knitr::kable(gnjec.df, digits = digits, ...)
} else {
gnjec.df$Percents <- round(gnjec.df$Percents * 100)
gnjec.df
}
}
# LABELLED primjeri -------------------------------------------------------
# s1 <- labelled(c("M", "M", "F"), c(Male = "M", Female = "F"))
# s2 <- labelled(c(1, 1, 2), c(Male = 1, Female = 2))
# var_label(s2) <- "A vector to label. Must be either numeric or character."
#
# frre(s2)
# frre(s2, lab.duljina = 60) # duže ime
# frre(s1, # custom ime ostaje dugo
# ime = "when coercing a labelled character vector to a factor")
# frre(s1, lab.duljina = 10, # ...ostaje dugo ako se ne specifira non-default
# ime = "when coercing a labelled character vector to a factor")
# frre(s2, N = FALSE)
# frre(s1, N = FALSE, kablica = FALSE) # ružno, ali ima svojih čari
# frre(s1, digits = 3) # digits : proslijeđen u kable, def = 2
# frre(s2, # argument proslijeđen u kable /// ne printa se u konzoli
# caption = "naslov koji se ne vidi, ali je valjda tu")
# factor ------------------------------------------------------------------
frre.factor <- function (x, lab.duljina = 40, ime="",
N = TRUE, drop = TRUE, kablica = TRUE, digits = 2, ...) {
varlab <- attributes(x)[["label"]]
if (nchar(ime) > 0) {
varlab <- ime
if (lab.duljina == 40) lab.duljina <- 200
}
if (drop && !identical(levels(x), levels(droplevels(x)))) {
no_of_dropped <-
length(levels(x)) - length(levels(droplevels(x)))
warning(paste(no_of_dropped, "level(s) dropped"), call. = FALSE)
}
nejm <- deparse(substitute(x))
if (drop) x <- droplevels(x)
levels(x) <- strtrim(levels(x), lab.duljina - 3)
gnjec.df <- merge.data.frame(as.data.frame(table(x)),
as.data.frame(prop.table(table(x))),
by = "x", sort = FALSE)
if (!is.null(varlab)) {
names(gnjec.df)[1] <- strtrim(varlab, lab.duljina)
} else {
names(gnjec.df)[1] <- nejm
}
names(gnjec.df)[2] <- "Counts"
names(gnjec.df)[3] <- "Percents"
if (N) {
cat("valid =", sum(!is.na(x)),
" missing =", sum(is.na(x)))
}
if (kablica) {
knitr::kable(gnjec.df, digits = digits, ...)
} else {
gnjec.df$Percents <- round(gnjec.df$Percents * 100)
gnjec.df
}
}
# FACTOR primjeri ---------------------------------------------------------
# s1 <- labelled(c("M", "M", "F"), c(Male = "M", Female = "F"))
# s1f <- to_factor(s1)
# var_label(s1f) <- "neko izrazito dugačko i nespretno ime velike dužine"
# frre(s1f)
#
# sx <- to_factor(scopes.2015$p38)
# frre(sx, drop = TRUE)
#
# frre(diamonds$cut, ime = "Cut of the diamonds", N = FALSE)
# ====
# numeric DEPRECATED ------------------------------------------------------
# frre.numeric <- function (x, varLabDuljina = 40, ime="",
# prosjekN = FALSE, kablica = TRUE, ...) {
# varlab <- attributes(x)[["label"]]
# if (nchar(ime) > 0) {
# varlab <- ime
# if (varLabDuljina == 40) varLabDuljina <- 200
# }
# nejm <- deparse(substitute(x))
# gnjec.df <- merge.data.frame(as.data.frame(table(x)),
# as.data.frame(prop.table(table(x))),
# by = "x", sort = FALSE)
# if (!is.null(varlab)) {
# names(gnjec.df)[1] <- strtrim(varlab, varLabDuljina)
# } else names(gnjec.df)[1] <- nejm
# names(gnjec.df)[2] <- "Counts"
# names(gnjec.df)[3] <- "Percents"
#
# if (prosjekN) {
# cat("valid =", sum(!is.na(x)),
# " missing =", sum(is.na(x)),
# "mean =", round(mean(x, na.rm = TRUE), digits = 2))
# }
# if (kablica) {
# knitr::kable(gnjec.df, digits = 2, ...)
# } else {
# gnjec.df$Percents <- round(gnjec.df$Percents * 100)
# gnjec.df
# }
# }
# PRIMJERI # numeric ----
# frre(mtcars$cyl, prosjek = TRUE)
# frre(diamonds$carat, prosjek = TRUE) # bzvz
# hist(diamonds$carat); summary(diamonds$carat) # klasika je bolja
# ====
|
#' Visualize data using ggplot2 \url{http://docs.ggplot2.org/current/}
#'
#' @details See \url{http://vnijs.github.io/radiant/base/visualize.html} for an example in Radiant
#'
#' @param dataset Dataset name (string). This can be a dataframe in the global environment or an element in an r_data list from Radiant
#' @param viz_xvar One or more variables to display along the X-axis of the plot
#' @param viz_yvar Variable to display along the Y-axis of the plot (default = "none")
#' @param data_filter Expression used to filter the dataset. This should be a string (e.g., "price > 10000")
#' @param viz_type Type of plot to create. One of Histogram ('hist'), Density ('density'), Scatter ('scatter'), Line ('line'), Bar ('bar'), or Box-plot ('box')
#' @param viz_facet_row Create vertically arranged subplots for each level of the selected factor variable
#' @param viz_facet_col Create horizontally arranged subplots for each level of the selected factor variable
#' @param viz_color Adds color to a scatter plot to generate a heat map. For a line plot one line is created for each group and each is assigned a different colour
#' @param viz_bins Number of bins used for a histogram (not accessible in Radiant)
#' @param viz_smooth Adjust the flexibility of the loess line for scatter plots (not accessible in Radiant)
#' @param viz_check Add a regression line ("line"), a loess line ("loess"), or jitter ("jitter") to a scatter plot
#' @param viz_axes Flip the axes in a plot ("flip") or apply a log transformation (base e) to the y-axis ("log_y") or the x-axis ("log_x")
#' @param shiny Did the function call originate inside a shiny app
#'
#' @return Generated plots
#'
#' @examples
#' visualize("diamonds", "carat", "price", viz_type = "scatter", viz_check = "loess")
#' visualize("diamonds", "price:x", viz_type = "hist")
#' visualize("diamonds", "carat:x", viz_yvar = "price", viz_type = "scatter")
#' diamonds %>% visualize(c("price","carat","depth"), viz_type = "density")
#' @export
visualize <- function(dataset, viz_xvar,
viz_yvar = "none",
data_filter = "",
viz_type = "hist",
viz_facet_row = ".",
viz_facet_col = ".",
viz_color = "none",
viz_bins = 10,
viz_smooth = 1,
viz_check = "",
viz_axes = "",
shiny = FALSE) {
# inspired by Joe Cheng's ggplot2 browser app http://www.youtube.com/watch?feature=player_embedded&v=o2B5yJeEl1A#!
vars <- viz_xvar
if (!viz_type %in% c("scatter","line"))
viz_color = "none"
if (viz_yvar != "none") vars %<>% c(., viz_yvar)
if (viz_color != "none") vars %<>% c(., viz_color)
if (viz_facet_row != ".") vars %<>% c(., viz_facet_row)
if (viz_facet_col != ".") vars %<>% c(., viz_facet_col)
# so you can also pass-in a data.frame
dat <- getdata(dataset, vars, filt = data_filter)
if (!is_string(dataset)) dataset <- "-----"
# if : is used to specify a range of variables
if (length(vars) < ncol(dat)) {
fl <- strsplit(viz_xvar,":") %>% unlist
cn <- colnames(dat)
viz_xvar <- cn[which(fl[1] == cn):which(fl[2] == cn)]
}
# dat$x <- as.character(dat$x)
isChar <- sapply(dat, class) == "character"
if (sum(isChar) > 0) {
if (viz_type == "density")
dat[,isChar] %<>% data.frame %>% mutate_each(funs(as.numeric))
else
dat[,isChar] %<>% data.frame %>% mutate_each(funs(as.factor))
}
plots <- list()
if (viz_type == "hist") {
for (i in viz_xvar) {
plots[[i]] <- ggplot(dat, aes_string(x=i))
if ("factor" %in% class(dat[,i])) {
plots[[i]] <- plots[[i]] + geom_histogram()
} else {
bw <- select_(dat,i) %>% range %>% diff(.)/viz_bins
plots[[i]] <- plots[[i]] + geom_histogram(binwidth = bw)
# + stat_ecdf()
}
}
} else if (viz_type == "density") {
for (i in viz_xvar) {
plots[[i]] <- ggplot(dat, aes_string(x=i)) +
geom_density(adjust=viz_smooth, fill = "green", alpha=.3)
}
} else if (viz_type == "scatter") {
itt <- 1
gs <- if ("jitter" %in% viz_check) geom_blank() else geom_point(alpha = .5)
for (i in viz_xvar) {
for (j in viz_yvar) {
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j)) + gs
itt <- itt + 1
}
}
} else if (viz_type == "line") {
itt <- 1
for (i in viz_xvar) {
for (j in viz_yvar) {
if (viz_color == 'none') {
if (is.factor(dat[,i]))
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j)) + geom_line(aes(group = 1))
else
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j)) + geom_line()
} else {
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j, color = viz_color)) + geom_line()
# plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j, color = viz_color)) + geom_line(aes_string(group = viz_color))
}
itt <- itt + 1
}
}
} else if (viz_type == "bar") {
itt <- 1
for (i in viz_xvar) {
for (j in viz_yvar) {
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j)) + geom_bar(stat="identity")
itt <- itt + 1
}
}
} else if (viz_type == "box") {
itt <- 1
for (i in viz_xvar) {
dat[,i] %<>% as.factor
for (j in viz_yvar) {
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j, fill=i)) +
geom_boxplot(alpha = .7) +
theme(legend.position = "none")
itt <- itt + 1
}
}
}
facets <- paste(viz_facet_row, '~', viz_facet_col)
if (facets != '. ~ .')
for (i in 1:length(plots)) plots[[i]] <- plots[[i]] + facet_grid(facets)
if (viz_color != 'none')
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + aes_string(color=viz_color) + scale_fill_brewer()
if ("jitter" %in% viz_check)
for (i in 1:length(plots)) plots[[i]] <- plots[[1]] + geom_jitter(alpha = .5)
if ("line" %in% viz_check)
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + geom_smooth(method = "lm", fill = 'blue',
alpha = .1, size = .75,
linetype = "dashed",
colour = 'black')
if ("loess" %in% viz_check)
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + geom_smooth(span = viz_smooth, size = .75,
linetype = "dotdash", aes(group=1))
if ("flip" %in% viz_axes)
for (i in 1:length(plots)) plots[[i]] <- plots[[i]] + coord_flip()
if ("log_y" %in% viz_axes)
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + scale_y_continuous(trans = "log")
if ("log_x" %in% viz_axes)
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + scale_x_continuous(trans = "log")
sshhr( do.call(arrangeGrob, c(plots, list(ncol = min(length(plots), 2)))) ) %>%
{ if (shiny) . else print(.) }
}
|
/certus/R/visualize.R
|
no_license
|
gvanzin/shiny
|
R
| false
| false
| 7,077
|
r
|
#' Visualize data using ggplot2 \url{http://docs.ggplot2.org/current/}
#'
#' @details See \url{http://vnijs.github.io/radiant/base/visualize.html} for an example in Radiant
#'
#' @param dataset Dataset name (string). This can be a dataframe in the global environment or an element in an r_data list from Radiant
#' @param viz_xvar One or more variables to display along the X-axis of the plot
#' @param viz_yvar Variable to display along the Y-axis of the plot (default = "none")
#' @param data_filter Expression used to filter the dataset. This should be a string (e.g., "price > 10000")
#' @param viz_type Type of plot to create. One of Histogram ('hist'), Density ('density'), Scatter ('scatter'), Line ('line'), Bar ('bar'), or Box-plot ('box')
#' @param viz_facet_row Create vertically arranged subplots for each level of the selected factor variable
#' @param viz_facet_col Create horizontally arranged subplots for each level of the selected factor variable
#' @param viz_color Adds color to a scatter plot to generate a heat map. For a line plot one line is created for each group and each is assigned a different colour
#' @param viz_bins Number of bins used for a histogram (not accessible in Radiant)
#' @param viz_smooth Adjust the flexibility of the loess line for scatter plots (not accessible in Radiant)
#' @param viz_check Add a regression line ("line"), a loess line ("loess"), or jitter ("jitter") to a scatter plot
#' @param viz_axes Flip the axes in a plot ("flip") or apply a log transformation (base e) to the y-axis ("log_y") or the x-axis ("log_x")
#' @param shiny Did the function call originate inside a shiny app
#'
#' @return Generated plots
#'
#' @examples
#' visualize("diamonds", "carat", "price", viz_type = "scatter", viz_check = "loess")
#' visualize("diamonds", "price:x", viz_type = "hist")
#' visualize("diamonds", "carat:x", viz_yvar = "price", viz_type = "scatter")
#' diamonds %>% visualize(c("price","carat","depth"), viz_type = "density")
#' @export
visualize <- function(dataset, viz_xvar,
viz_yvar = "none",
data_filter = "",
viz_type = "hist",
viz_facet_row = ".",
viz_facet_col = ".",
viz_color = "none",
viz_bins = 10,
viz_smooth = 1,
viz_check = "",
viz_axes = "",
shiny = FALSE) {
# inspired by Joe Cheng's ggplot2 browser app http://www.youtube.com/watch?feature=player_embedded&v=o2B5yJeEl1A#!
vars <- viz_xvar
if (!viz_type %in% c("scatter","line"))
viz_color = "none"
if (viz_yvar != "none") vars %<>% c(., viz_yvar)
if (viz_color != "none") vars %<>% c(., viz_color)
if (viz_facet_row != ".") vars %<>% c(., viz_facet_row)
if (viz_facet_col != ".") vars %<>% c(., viz_facet_col)
# so you can also pass-in a data.frame
dat <- getdata(dataset, vars, filt = data_filter)
if (!is_string(dataset)) dataset <- "-----"
# if : is used to specify a range of variables
if (length(vars) < ncol(dat)) {
fl <- strsplit(viz_xvar,":") %>% unlist
cn <- colnames(dat)
viz_xvar <- cn[which(fl[1] == cn):which(fl[2] == cn)]
}
# dat$x <- as.character(dat$x)
isChar <- sapply(dat, class) == "character"
if (sum(isChar) > 0) {
if (viz_type == "density")
dat[,isChar] %<>% data.frame %>% mutate_each(funs(as.numeric))
else
dat[,isChar] %<>% data.frame %>% mutate_each(funs(as.factor))
}
plots <- list()
if (viz_type == "hist") {
for (i in viz_xvar) {
plots[[i]] <- ggplot(dat, aes_string(x=i))
if ("factor" %in% class(dat[,i])) {
plots[[i]] <- plots[[i]] + geom_histogram()
} else {
bw <- select_(dat,i) %>% range %>% diff(.)/viz_bins
plots[[i]] <- plots[[i]] + geom_histogram(binwidth = bw)
# + stat_ecdf()
}
}
} else if (viz_type == "density") {
for (i in viz_xvar) {
plots[[i]] <- ggplot(dat, aes_string(x=i)) +
geom_density(adjust=viz_smooth, fill = "green", alpha=.3)
}
} else if (viz_type == "scatter") {
itt <- 1
gs <- if ("jitter" %in% viz_check) geom_blank() else geom_point(alpha = .5)
for (i in viz_xvar) {
for (j in viz_yvar) {
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j)) + gs
itt <- itt + 1
}
}
} else if (viz_type == "line") {
itt <- 1
for (i in viz_xvar) {
for (j in viz_yvar) {
if (viz_color == 'none') {
if (is.factor(dat[,i]))
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j)) + geom_line(aes(group = 1))
else
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j)) + geom_line()
} else {
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j, color = viz_color)) + geom_line()
# plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j, color = viz_color)) + geom_line(aes_string(group = viz_color))
}
itt <- itt + 1
}
}
} else if (viz_type == "bar") {
itt <- 1
for (i in viz_xvar) {
for (j in viz_yvar) {
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j)) + geom_bar(stat="identity")
itt <- itt + 1
}
}
} else if (viz_type == "box") {
itt <- 1
for (i in viz_xvar) {
dat[,i] %<>% as.factor
for (j in viz_yvar) {
plots[[itt]] <- ggplot(dat, aes_string(x=i, y=j, fill=i)) +
geom_boxplot(alpha = .7) +
theme(legend.position = "none")
itt <- itt + 1
}
}
}
facets <- paste(viz_facet_row, '~', viz_facet_col)
if (facets != '. ~ .')
for (i in 1:length(plots)) plots[[i]] <- plots[[i]] + facet_grid(facets)
if (viz_color != 'none')
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + aes_string(color=viz_color) + scale_fill_brewer()
if ("jitter" %in% viz_check)
for (i in 1:length(plots)) plots[[i]] <- plots[[1]] + geom_jitter(alpha = .5)
if ("line" %in% viz_check)
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + geom_smooth(method = "lm", fill = 'blue',
alpha = .1, size = .75,
linetype = "dashed",
colour = 'black')
if ("loess" %in% viz_check)
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + geom_smooth(span = viz_smooth, size = .75,
linetype = "dotdash", aes(group=1))
if ("flip" %in% viz_axes)
for (i in 1:length(plots)) plots[[i]] <- plots[[i]] + coord_flip()
if ("log_y" %in% viz_axes)
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + scale_y_continuous(trans = "log")
if ("log_x" %in% viz_axes)
for (i in 1:length(plots))
plots[[i]] <- plots[[i]] + scale_x_continuous(trans = "log")
sshhr( do.call(arrangeGrob, c(plots, list(ncol = min(length(plots), 2)))) ) %>%
{ if (shiny) . else print(.) }
}
|
library(yardstick)
### Name: rsq
### Title: R squared
### Aliases: rsq rsq.data.frame rsq_vec
### ** Examples
# Supply truth and predictions as bare column names
rsq(solubility_test, solubility, prediction)
library(dplyr)
set.seed(1234)
size <- 100
times <- 10
# create 10 resamples
solubility_resampled <- bind_rows(
replicate(
n = times,
expr = sample_n(solubility_test, size, replace = TRUE),
simplify = FALSE
),
.id = "resample"
)
# Compute the metric by group
metric_results <- solubility_resampled %>%
group_by(resample) %>%
rsq(solubility, prediction)
metric_results
# Resampled mean estimate
metric_results %>%
summarise(avg_estimate = mean(.estimate))
# With uninformitive data, the traditional version of R^2 can return
# negative values.
set.seed(2291)
solubility_test$randomized <- sample(solubility_test$prediction)
rsq(solubility_test, solubility, randomized)
rsq_trad(solubility_test, solubility, randomized)
|
/data/genthat_extracted_code/yardstick/examples/rsq.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 962
|
r
|
library(yardstick)
### Name: rsq
### Title: R squared
### Aliases: rsq rsq.data.frame rsq_vec
### ** Examples
# Supply truth and predictions as bare column names
rsq(solubility_test, solubility, prediction)
library(dplyr)
set.seed(1234)
size <- 100
times <- 10
# create 10 resamples
solubility_resampled <- bind_rows(
replicate(
n = times,
expr = sample_n(solubility_test, size, replace = TRUE),
simplify = FALSE
),
.id = "resample"
)
# Compute the metric by group
metric_results <- solubility_resampled %>%
group_by(resample) %>%
rsq(solubility, prediction)
metric_results
# Resampled mean estimate
metric_results %>%
summarise(avg_estimate = mean(.estimate))
# With uninformitive data, the traditional version of R^2 can return
# negative values.
set.seed(2291)
solubility_test$randomized <- sample(solubility_test$prediction)
rsq(solubility_test, solubility, randomized)
rsq_trad(solubility_test, solubility, randomized)
|
# Version info: R 3.2.3, Biobase 2.30.0, GEOquery 2.40.0, limma 3.26.8
# R scripts generated Mon Sep 24 22:18:03 EDT 2018
################################################################
# Differential expression analysis with limma
library(Biobase)
library(GEOquery)
library(limma)
# load series and platform data from GEO
gset <- getGEO("GSE71034", GSEMatrix =TRUE, AnnotGPL=TRUE)
if (length(gset) > 1) idx <- grep("GPL1261", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
# make proper column names to match toptable
fvarLabels(gset) <- make.names(fvarLabels(gset))
# group names for all samples
gsms <- "XXXXXXXXXX1111100000XXXXXXXXXXXXXXXXXXXXXX"
sml <- c()
for (i in 1:nchar(gsms)) { sml[i] <- substr(gsms,i,i) }
# eliminate samples marked as "X"
sel <- which(sml != "X")
sml <- sml[sel]
gset <- gset[ ,sel]
# log2 transform
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
# set up the data and proceed with analysis
sml <- paste("G", sml, sep="") # set group names
fl <- as.factor(sml)
gset$description <- fl
design <- model.matrix(~ description + 0, gset)
colnames(design) <- levels(fl)
fit <- lmFit(gset, design)
cont.matrix <- makeContrasts(G1-G0, levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2, 0.01)
tT <- topTable(fit2, adjust="fdr", sort.by="B", number=10000)
tT <- subset(tT, select=c("ID","adj.P.Val","P.Value","t","B","logFC","Gene.symbol","Gene.title"))
# write.table(tT, file=stdout(), row.names=F, sep="\t")
# stdout()
write.table(tT,file="C:/gse71034_cotex_primary_neuron_fragile_x.txt" ,row.names=F, sep="\t")
# save as txt
################################################################
# Boxplot for selected GEO samples
#library(Biobase)
#library(GEOquery)
# load series and platform data from GEO
#gset <- getGEO("GSE71034", GSEMatrix =TRUE, getGPL=FALSE)
#if (length(gset) > 1) idx <- grep("GPL1261", attr(gset, "names")) else idx <- 1
#gset <- gset[[idx]]
# group names for all samples in a series
#gsms <- "XXXXXXXXXX1111100000XXXXXXXXXXXXXXXXXXXXXX"
#sml <- c()
#for (i in 1:nchar(gsms)) { sml[i] <- substr(gsms,i,i) }
#sml <- paste("G", sml, sep="") set group names
# eliminate samples marked as "X"
#sel <- which(sml != "X")
#sml <- sml[sel]
#gset <- gset[ ,sel]
# order samples by group
#ex <- exprs(gset)[ , order(sml)]
#sml <- sml[order(sml)]
#fl <- as.factor(sml)
#labels <- c("B","a")
# set parameters and draw the plot
#palette(c("#dfeaf4","#f4dfdf", "#AABBCC"))
#dev.new(width=4+dim(gset)[[2]]/5, height=6)
#par(mar=c(2+round(max(nchar(sampleNames(gset)))/2),4,2,1))
#title <- paste ("GSE71034", '/', annotation(gset), " selected samples", sep ='')
#boxplot(ex, boxwex=0.6, notch=T, main=title, outline=FALSE, las=2, col=fl)
#legend("topleft", labels, fill=palette(), bty="n")
|
/gse71034_cotex_primary_neuron_fragile_x.r
|
no_license
|
cgh2797/GEO2R
|
R
| false
| false
| 3,023
|
r
|
# Version info: R 3.2.3, Biobase 2.30.0, GEOquery 2.40.0, limma 3.26.8
# R scripts generated Mon Sep 24 22:18:03 EDT 2018
################################################################
# Differential expression analysis with limma
library(Biobase)
library(GEOquery)
library(limma)
# load series and platform data from GEO
gset <- getGEO("GSE71034", GSEMatrix =TRUE, AnnotGPL=TRUE)
if (length(gset) > 1) idx <- grep("GPL1261", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
# make proper column names to match toptable
fvarLabels(gset) <- make.names(fvarLabels(gset))
# group names for all samples
gsms <- "XXXXXXXXXX1111100000XXXXXXXXXXXXXXXXXXXXXX"
sml <- c()
for (i in 1:nchar(gsms)) { sml[i] <- substr(gsms,i,i) }
# eliminate samples marked as "X"
sel <- which(sml != "X")
sml <- sml[sel]
gset <- gset[ ,sel]
# log2 transform
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
# set up the data and proceed with analysis
sml <- paste("G", sml, sep="") # set group names
fl <- as.factor(sml)
gset$description <- fl
design <- model.matrix(~ description + 0, gset)
colnames(design) <- levels(fl)
fit <- lmFit(gset, design)
cont.matrix <- makeContrasts(G1-G0, levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2, 0.01)
tT <- topTable(fit2, adjust="fdr", sort.by="B", number=10000)
tT <- subset(tT, select=c("ID","adj.P.Val","P.Value","t","B","logFC","Gene.symbol","Gene.title"))
# write.table(tT, file=stdout(), row.names=F, sep="\t")
# stdout()
write.table(tT,file="C:/gse71034_cotex_primary_neuron_fragile_x.txt" ,row.names=F, sep="\t")
# save as txt
################################################################
# Boxplot for selected GEO samples
#library(Biobase)
#library(GEOquery)
# load series and platform data from GEO
#gset <- getGEO("GSE71034", GSEMatrix =TRUE, getGPL=FALSE)
#if (length(gset) > 1) idx <- grep("GPL1261", attr(gset, "names")) else idx <- 1
#gset <- gset[[idx]]
# group names for all samples in a series
#gsms <- "XXXXXXXXXX1111100000XXXXXXXXXXXXXXXXXXXXXX"
#sml <- c()
#for (i in 1:nchar(gsms)) { sml[i] <- substr(gsms,i,i) }
#sml <- paste("G", sml, sep="") set group names
# eliminate samples marked as "X"
#sel <- which(sml != "X")
#sml <- sml[sel]
#gset <- gset[ ,sel]
# order samples by group
#ex <- exprs(gset)[ , order(sml)]
#sml <- sml[order(sml)]
#fl <- as.factor(sml)
#labels <- c("B","a")
# set parameters and draw the plot
#palette(c("#dfeaf4","#f4dfdf", "#AABBCC"))
#dev.new(width=4+dim(gset)[[2]]/5, height=6)
#par(mar=c(2+round(max(nchar(sampleNames(gset)))/2),4,2,1))
#title <- paste ("GSE71034", '/', annotation(gset), " selected samples", sep ='')
#boxplot(ex, boxwex=0.6, notch=T, main=title, outline=FALSE, las=2, col=fl)
#legend("topleft", labels, fill=palette(), bty="n")
|
#' Construct lower tolerance limit, upper tolerance limit and tolerance interval for a
#' proportion of future measurements from the gamma distribution.
#'
#' @param x observations of a gamma distribution.
#' @param alpha value of alpha, such that a (beta, 1-alpha) lower tolerance limit, a (beta, 1-alpha)
#' upper tolerance limit or a (beta, 1-alpha) tolerance interval is returned. If alpha is not specified, the default
#' value is 0.05.
#' @param gamma value of gamma (the proportion of future measurements), such that a (gamma, 1-alpha) lower
#' tolerance limit, a (gamma, 1-alpha) upper tolerance limit or a (gamma, 1-alpha) tolerance interval
#' is returned. If gamma is not specified, the default value is 0.95.
#' @param B number of realizations of the GPQs. If B is not specified, the default value is 2000.
#'
#' @details Assume that X_1, ..., X_n from a gamma distribution with culmulative distribution function F are available.
#' Consider two statistics TL = TL(X_1, ..., X_n) and TU = PU(X_1, ..., X_n).
#' Let gamma and alpha be two constants and 0 < gamma, alpha < 1. If TL and TU
#' are determined such that
#' P(F(TU) - F(TL) >= gamma) = 1-alpha,
#' then (TL,TU) is called a (gamma, 1-alpha) two-sided tolerance interval. When TU equals infinity, TL is called
#' a (gamma, 1-alpha) lower tolerance limit.
#' Similarly, when TL equals -infinity, TU is called a (gamma, 1-alpha) upper tolerance limit.
#'
#' @return tollimits returns a dataframe containing one vector where the first two elements make for the (gamma, 1-alpha) tolerance
#' interval, the third element is the
#' (gamma, 1-alpha) lower tolerance limit and the last is the (gamma, 1-alpha)
#' upper tolerance limit.
#'
#' @export
#'
#' @references Chen, P. and Ye, Z., 2017. "Approximate Statistical Limits For A Gamma Distribution."
#' Journal of Quality Technology, 49(1), 64-77.
#'
#'Wang, B. X. and Wu, F. (2018), “Inference on the Gamma Distribution,”
#'Technometrics, 60(2), 235–244.
#'
#' Krishnamoorthy, K. Mathew, T. Mukherjee, S. 2008. "Normal-Based Methods for a Gamma Distribution:
#' Prediction and Tolerance Intervals and Stress-Strength Reliability".
#' Technometrics 50(1), 69-78.
#'
#' @examples x = rGamma(100, shape = 3, rate = 1)
#' tollimits(x, alpha = 0.05, gamma = 0.99, B=5000)
tollimits <- function(x, alpha = 0.05, gamma = 0.99, B=5000){
n <- length(x)
w <- x^(1/3)
wbar <- mean(w)
sw <- sd(w)
c <- sqrt((n-1)*qchisq(gamma,df=1,ncp=1/n)/qchisq(alpha,n-1))
gpq.all <- pargpq (x,B)
tu.all <- mapply(function(shape,scale) qgamma(gamma,shape=shape,scale=scale), shape=gpq.all$shape,scale=gpq.all$scale)
tl.all <- mapply(function(shape,scale) qgamma(1-gamma,shape=shape,scale=scale), shape=gpq.all$shape,scale=gpq.all$scale)
tol <- c((wbar-c*sw)^3,(wbar+c*sw)^3,quantile(tl.all,alpha),quantile(tu.all,1-alpha))
return(data.frame(tol,row.names=c('low-int','up-int','low-lim','up-lim')))
}
|
/R/tollimits.R
|
no_license
|
statcp/gammadist
|
R
| false
| false
| 2,988
|
r
|
#' Construct lower tolerance limit, upper tolerance limit and tolerance interval for a
#' proportion of future measurements from the gamma distribution.
#'
#' @param x observations of a gamma distribution.
#' @param alpha value of alpha, such that a (beta, 1-alpha) lower tolerance limit, a (beta, 1-alpha)
#' upper tolerance limit or a (beta, 1-alpha) tolerance interval is returned. If alpha is not specified, the default
#' value is 0.05.
#' @param gamma value of gamma (the proportion of future measurements), such that a (gamma, 1-alpha) lower
#' tolerance limit, a (gamma, 1-alpha) upper tolerance limit or a (gamma, 1-alpha) tolerance interval
#' is returned. If gamma is not specified, the default value is 0.95.
#' @param B number of realizations of the GPQs. If B is not specified, the default value is 2000.
#'
#' @details Assume that X_1, ..., X_n from a gamma distribution with culmulative distribution function F are available.
#' Consider two statistics TL = TL(X_1, ..., X_n) and TU = PU(X_1, ..., X_n).
#' Let gamma and alpha be two constants and 0 < gamma, alpha < 1. If TL and TU
#' are determined such that
#' P(F(TU) - F(TL) >= gamma) = 1-alpha,
#' then (TL,TU) is called a (gamma, 1-alpha) two-sided tolerance interval. When TU equals infinity, TL is called
#' a (gamma, 1-alpha) lower tolerance limit.
#' Similarly, when TL equals -infinity, TU is called a (gamma, 1-alpha) upper tolerance limit.
#'
#' @return tollimits returns a dataframe containing one vector where the first two elements make for the (gamma, 1-alpha) tolerance
#' interval, the third element is the
#' (gamma, 1-alpha) lower tolerance limit and the last is the (gamma, 1-alpha)
#' upper tolerance limit.
#'
#' @export
#'
#' @references Chen, P. and Ye, Z., 2017. "Approximate Statistical Limits For A Gamma Distribution."
#' Journal of Quality Technology, 49(1), 64-77.
#'
#'Wang, B. X. and Wu, F. (2018), “Inference on the Gamma Distribution,”
#'Technometrics, 60(2), 235–244.
#'
#' Krishnamoorthy, K. Mathew, T. Mukherjee, S. 2008. "Normal-Based Methods for a Gamma Distribution:
#' Prediction and Tolerance Intervals and Stress-Strength Reliability".
#' Technometrics 50(1), 69-78.
#'
#' @examples x = rGamma(100, shape = 3, rate = 1)
#' tollimits(x, alpha = 0.05, gamma = 0.99, B=5000)
tollimits <- function(x, alpha = 0.05, gamma = 0.99, B=5000){
n <- length(x)
w <- x^(1/3)
wbar <- mean(w)
sw <- sd(w)
c <- sqrt((n-1)*qchisq(gamma,df=1,ncp=1/n)/qchisq(alpha,n-1))
gpq.all <- pargpq (x,B)
tu.all <- mapply(function(shape,scale) qgamma(gamma,shape=shape,scale=scale), shape=gpq.all$shape,scale=gpq.all$scale)
tl.all <- mapply(function(shape,scale) qgamma(1-gamma,shape=shape,scale=scale), shape=gpq.all$shape,scale=gpq.all$scale)
tol <- c((wbar-c*sw)^3,(wbar+c*sw)^3,quantile(tl.all,alpha),quantile(tu.all,1-alpha))
return(data.frame(tol,row.names=c('low-int','up-int','low-lim','up-lim')))
}
|
#' @title summary method for funreg object
#' @description Returns summary information on a \code{funreg} object.
#' @param object An object of class \code{funreg}
#' @param digits The number of digits past the decimal place to use when printing numbers
#' @param silent If \code{TRUE}, indicates that the summary should be returned
#' as a list object but not printed to the screen.
#' @param ... Any other optional arguments that may be passed
#' from other methods (but currently ignored by this one).
#' @return Returns a list with four components. First, \code{call.info} summarizes the
#' inputs that were sent into the \code{funreg} function. Second,
#' \code{intercept.estimate.uncentered} gives the estimated functional
#' coefficient for the intercept in the functional regression model. Third,
#' \code{functional.covariates.table} provides estimated values for the
#' functional coefficients at each of a grid of time points. Fourth,
#' \code{subject.level.covariates.table} provides estimated values for
#' subject-level covariates if any are in the model.
#'@export
#'@S3method summary funreg
#'@method summary funreg
summary.funreg <- function(object,
digits=4,
silent=FALSE,
...) {
stopifnot(class(object)=="funreg");
beta <- as.matrix(object$betafn.estimate.by.grid);
se.beta <- as.matrix(object$betafn.se.by.grid);
###### Functional Covariates ########
nx <- ncol(beta);
if (is.null(object$xnames)) {xnames <- paste("X",1:nx,sep="");
} else {xnames <- object$xnames;}
xnames[which(xnames=="t")] <- "Covariate named t";
functional.covariates.table <- data.frame(t=object$times.for.fit.grid);
column.names <- "t";
for (col.index in 1:nx) {
functional.covariates.table <- cbind(functional.covariates.table,
beta[,col.index]);
column.names <- c(column.names,paste("Beta.for.",xnames[col.index],sep=""));
if (!is.null(se.beta[,col.index])) {
functional.covariates.table <- cbind(functional.covariates.table,
se.beta[,col.index]);
}
column.names <- c(column.names,
paste("SE(Beta).for.",xnames[col.index],sep=""));
}
stopifnot(length(column.names)==ncol(functional.covariates.table));
colnames(functional.covariates.table) <- column.names;
rownames(functional.covariates.table) <- NULL;
functional.covariates.table <- round(functional.covariates.table,
digits=digits);
######### Other Covariates ##########
if (!is.null(object$other.covariates.estimate)) {
z <- object$other.covariates.estimate/
object$other.covariates.se;
subject.level.covariates.table <- cbind(
estimate=object$other.covariates.estimate,
SE=object$other.covariates.se,
z=z,
p=2*(1-pnorm(abs(z))));
subject.level.covariates.table <- round(subject.level.covariates.table,
digits=digits);
} else {subject.level.covariates.table <- NULL;}
######### Return the Answer #########
return(list(call.info=object$call.info,
intercept.estimate.uncentered=object$intercept.estimate.uncentered,
functional.covariates.table=functional.covariates.table,
subject.level.covariates.table=subject.level.covariates.table));
}
|
/funreg/R/SummaryFunReg.r
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 3,652
|
r
|
#' @title summary method for funreg object
#' @description Returns summary information on a \code{funreg} object.
#' @param object An object of class \code{funreg}
#' @param digits The number of digits past the decimal place to use when printing numbers
#' @param silent If \code{TRUE}, indicates that the summary should be returned
#' as a list object but not printed to the screen.
#' @param ... Any other optional arguments that may be passed
#' from other methods (but currently ignored by this one).
#' @return Returns a list with four components. First, \code{call.info} summarizes the
#' inputs that were sent into the \code{funreg} function. Second,
#' \code{intercept.estimate.uncentered} gives the estimated functional
#' coefficient for the intercept in the functional regression model. Third,
#' \code{functional.covariates.table} provides estimated values for the
#' functional coefficients at each of a grid of time points. Fourth,
#' \code{subject.level.covariates.table} provides estimated values for
#' subject-level covariates if any are in the model.
#'@export
#'@S3method summary funreg
#'@method summary funreg
summary.funreg <- function(object,
digits=4,
silent=FALSE,
...) {
stopifnot(class(object)=="funreg");
beta <- as.matrix(object$betafn.estimate.by.grid);
se.beta <- as.matrix(object$betafn.se.by.grid);
###### Functional Covariates ########
nx <- ncol(beta);
if (is.null(object$xnames)) {xnames <- paste("X",1:nx,sep="");
} else {xnames <- object$xnames;}
xnames[which(xnames=="t")] <- "Covariate named t";
functional.covariates.table <- data.frame(t=object$times.for.fit.grid);
column.names <- "t";
for (col.index in 1:nx) {
functional.covariates.table <- cbind(functional.covariates.table,
beta[,col.index]);
column.names <- c(column.names,paste("Beta.for.",xnames[col.index],sep=""));
if (!is.null(se.beta[,col.index])) {
functional.covariates.table <- cbind(functional.covariates.table,
se.beta[,col.index]);
}
column.names <- c(column.names,
paste("SE(Beta).for.",xnames[col.index],sep=""));
}
stopifnot(length(column.names)==ncol(functional.covariates.table));
colnames(functional.covariates.table) <- column.names;
rownames(functional.covariates.table) <- NULL;
functional.covariates.table <- round(functional.covariates.table,
digits=digits);
######### Other Covariates ##########
if (!is.null(object$other.covariates.estimate)) {
z <- object$other.covariates.estimate/
object$other.covariates.se;
subject.level.covariates.table <- cbind(
estimate=object$other.covariates.estimate,
SE=object$other.covariates.se,
z=z,
p=2*(1-pnorm(abs(z))));
subject.level.covariates.table <- round(subject.level.covariates.table,
digits=digits);
} else {subject.level.covariates.table <- NULL;}
######### Return the Answer #########
return(list(call.info=object$call.info,
intercept.estimate.uncentered=object$intercept.estimate.uncentered,
functional.covariates.table=functional.covariates.table,
subject.level.covariates.table=subject.level.covariates.table));
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12400894659582e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615782950-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 329
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12400894659582e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
\name{pedIBD}
\Rdversion{1.1}
\alias{pedIBD}
\title{Calculates the Pedigree-based Kinship Matrix
}
\description{
Calculates the \bold{ped}igree based probability of alleles to be \bold{IBD}. This pedigree based kinship matrix is also called coancestry matrix and is half the additive relationship matrix.
}
\usage{
pedIBD(Pedig, keep.only=NULL, keep=keep.only, kinFounder=NULL)}
\arguments{
\item{Pedig}{Data frame containing the pedigree with \code{Indiv} (individual ID), \code{Sire}, and \code{Dam} in the first 3 columns.
Missing parents are coded as NA. Both parents must either be missing or present. If this is not the case use function \link{prePed} to prepare the pedigree.}
\item{keep}{If \code{keep} is provided then kinships are computed only for these animals and their ancestors.}
\item{keep.only}{If \code{keep.only} is provided then kinships are computed only for these animals.}
\item{kinFounder}{Kinship matrix for the founders. The row names are the ids of the founders. By default, founders are assumed to be unrelated. Founders not included in this matrix are also assumed to be unrelated.}
}
\details{
Computation of pedigree based kinship matrix f which is half the additive relationship matrix. For individuals i and j it is defined as
\tabular{l}{
fij = Probability that two alleles chosen from individuals i and j are IBD. \cr
}
}
\value{
Kinship matrix.
}
\examples{
data(PedigWithErrors)
data(Phen)
keep <- Phen$Indiv
Pedig <- prePed(PedigWithErrors, keep=keep, thisBreed="Hinterwaelder", lastNative=1970)
pedA <- pedIBD(Pedig, keep.only=keep)
}
\author{Robin Wellmann}
|
/fuzzedpackages/optiSel/man/pedIBD.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 1,701
|
rd
|
\name{pedIBD}
\Rdversion{1.1}
\alias{pedIBD}
\title{Calculates the Pedigree-based Kinship Matrix
}
\description{
Calculates the \bold{ped}igree based probability of alleles to be \bold{IBD}. This pedigree based kinship matrix is also called coancestry matrix and is half the additive relationship matrix.
}
\usage{
pedIBD(Pedig, keep.only=NULL, keep=keep.only, kinFounder=NULL)}
\arguments{
\item{Pedig}{Data frame containing the pedigree with \code{Indiv} (individual ID), \code{Sire}, and \code{Dam} in the first 3 columns.
Missing parents are coded as NA. Both parents must either be missing or present. If this is not the case use function \link{prePed} to prepare the pedigree.}
\item{keep}{If \code{keep} is provided then kinships are computed only for these animals and their ancestors.}
\item{keep.only}{If \code{keep.only} is provided then kinships are computed only for these animals.}
\item{kinFounder}{Kinship matrix for the founders. The row names are the ids of the founders. By default, founders are assumed to be unrelated. Founders not included in this matrix are also assumed to be unrelated.}
}
\details{
Computation of pedigree based kinship matrix f which is half the additive relationship matrix. For individuals i and j it is defined as
\tabular{l}{
fij = Probability that two alleles chosen from individuals i and j are IBD. \cr
}
}
\value{
Kinship matrix.
}
\examples{
data(PedigWithErrors)
data(Phen)
keep <- Phen$Indiv
Pedig <- prePed(PedigWithErrors, keep=keep, thisBreed="Hinterwaelder", lastNative=1970)
pedA <- pedIBD(Pedig, keep.only=keep)
}
\author{Robin Wellmann}
|
library(psych)
# Matrix Transformations
# cbind(as.matrix(d1[1:30]),as.matrix(d2[1:30]), as.matrix(d3[1:30]),as.matrix(d4[1:30]),as.matrix(d5[1:30]))
#-- change dtm term weighting with log.
nV <- nV[,order(nV[1,],decreasing = TRUE)] # this is just ordering according to first row= 1. doc, maybe not represenatative
#write.table(d,file="doc.txt",sep="\t", quote=FALSE)
#d2 <- dr[!dr %in% intersect]
#-----------
f <- as.integer(rownames(feature.1.red))
colnames(s) <- c(1:numOfComps)
S.red <- S[,f]
#-----------
Vsize <- dim(nV)
numOfDocs <- Vsize[1]
numOfTerms <- Vsize[2]
n.relFreq <- matrix(0,nrow=numOfDocs,ncol= numOfTerms)
rownames(n.relFreq) <- rownames(nV)
colnames(n.relFreq) <- colnames(nV)
for (n in rownames(nV)){
curr.Doc <- nV[n,]
w.Token <- sum(curr.Doc) # sum over all tokens in doc
w.Type <- length(curr.Doc[curr.Doc != 0])
for(i in 1:length(curr.Doc)){
curr.Val <- curr.Doc[i]
rel.Freq <- (curr.Val +1)/ (w.Token+w.Type)
n.relFreq[n,i] <- (rel.Freq)
}
}
nV <- n.relFreq
-----------------------------------------
hist.res <- diff$hist.res
clust.eval <- diff$cv
sim <- diff$sim
feat <- diff$feat
D.feat <- diff$D.feat
O.feat <- diff$O.feat
D.feat.O <- diff$D.feat.O
O.feat.O <- diff$O.feat.O
D.consist <- diff$D.consist
O.consist <- diff$O.consist
hist <- diff$hist
hist2 <- diff$hist2
save(hist.res,cv,sim,feat,D.feat,O.feat,
D.feat.O,O.feat.O,D.consist,O.consist,hist,hist2,
file='DCTabata4999.rda')
-----------------------------------------------------
#-------------Evaluation
dickens.list <- names(diff$Dickens)[substr((names(diff$Dickens)),1,1) == "D"]
other.list <- names(diff$Dickens)[substr((names(diff$Dickens)),1,1) != "D"]
dsize <- length(dickens.list)
osize <- length(other.list)
results <- matrix(0, nrow = (dsize+2),ncol=3)
rownames(results) <- c(dickens.list,"mean","sum")
colnames(results) <- c("Dist.D.","Dist.C.","(Collins-Dickens)")
d <- diff[[1]]
c <- diff[[2]]
for (n in dickens.list){
results[n,1] <- d[[n]]
results[n,2] <- c[[n]]
results[n,3] <- c[[n]]-d[[n]]
}
results[dsize+1,] <- c(mean(results[1:dsize,1]),mean(results[1:dsize,2]),mean(results[1:dsize,3]))
results[dsize+2,] <- c(sum(results[1:dsize,1]),sum(results[1:dsize,2]),sum(results[1:dsize,3]))
results.2 <- matrix(0, nrow = (osize+2),ncol=3)
rownames(results.2) <- c(other.list,"mean","sum")
colnames(results.2) <- c("Dist.D.","Dist.C.","(Dickens-Collins)")
d <- diff[[1]]
c <- diff[[2]]
for (n in other.list){
results.2[n,1] <- d[[n]]
results.2[n,2] <- c[[n]]
results.2[n,3] <- d[[n]]-c[[n]]
}
results.2[osize+1,] <- c(mean(results.2[1:osize,1]),mean(results.2[1:osize,2]),mean(results.2[1:osize,3]))
results.2[osize+2,] <- c(sum(results.2[1:osize,1]),sum(results.2[1:osize,2]),sum(results.2[1:osize,3]))
matrix.RD <- as.matrix(rep(0,nrow=num.Docs,ncol=num.Docs))
for (i in 1:length(diff$dis.Matrix)){
m <- data.frame(diff$dis.Matrix[i])
matrix.RD <- matrix.RD +m
}
matrix.RD <- matrix.RD/length(diff$dis.Matrix)
#---plotting
nV.pca <- prcomp(nV)
summary(nV.pca)
nV.latent <- nV.pca$rotation
signif(sort(nV.latent[,1],decreasing=TRUE)[1:30],2) # 1 st component
pdf("file.pdf")
plot(nV.pca$x[,1:2], type= "n")
main = "Dickens vs. Collins", xlab= "PC1:+and +but +that +upon +very -the -her -she -you", ylab= "PC2:+you +her +she +said +what -the -and -their -they ", type= "n")
#points(nV.pca$x[1:29,1:2],pch="C",col="blue")
#points(nV.pca$x[30:74,1:2],pch="D",col="green")
dev.off()
biplot(nV.pca, main = "Dickens vs. Collins", col= c("black","red"), cex= 0.7)
t.test(c1,d1,paired= TRUE, alternative="greater")
D.diff <- diff$D.diff
O.diff <- diff$O.diff
clust.eval <- diff$clust.eval
hist <- diff$hist
hist2 <- diff$hist2
############
D.diff <- diff$D.diff
O.diff <- diff$O.diff
clust.eval <- diff$cv
hist <- diff$hist
hist2 <- diff$hist2
D.feat.O <- diff$D.feat.O
O.feat.O <- diff$O.feat.O
D.diff <- D$D.diff
O.diff <- D$O.diff
clust.eval <- D$cv
hist <- D$hist
hist2 <- D$hist2
|
/code/R/Transformations.R
|
no_license
|
cklaussner/thesis-stylometry
|
R
| false
| false
| 4,031
|
r
|
library(psych)
# Matrix Transformations
# cbind(as.matrix(d1[1:30]),as.matrix(d2[1:30]), as.matrix(d3[1:30]),as.matrix(d4[1:30]),as.matrix(d5[1:30]))
#-- change dtm term weighting with log.
nV <- nV[,order(nV[1,],decreasing = TRUE)] # this is just ordering according to first row= 1. doc, maybe not represenatative
#write.table(d,file="doc.txt",sep="\t", quote=FALSE)
#d2 <- dr[!dr %in% intersect]
#-----------
f <- as.integer(rownames(feature.1.red))
colnames(s) <- c(1:numOfComps)
S.red <- S[,f]
#-----------
Vsize <- dim(nV)
numOfDocs <- Vsize[1]
numOfTerms <- Vsize[2]
n.relFreq <- matrix(0,nrow=numOfDocs,ncol= numOfTerms)
rownames(n.relFreq) <- rownames(nV)
colnames(n.relFreq) <- colnames(nV)
for (n in rownames(nV)){
curr.Doc <- nV[n,]
w.Token <- sum(curr.Doc) # sum over all tokens in doc
w.Type <- length(curr.Doc[curr.Doc != 0])
for(i in 1:length(curr.Doc)){
curr.Val <- curr.Doc[i]
rel.Freq <- (curr.Val +1)/ (w.Token+w.Type)
n.relFreq[n,i] <- (rel.Freq)
}
}
nV <- n.relFreq
-----------------------------------------
hist.res <- diff$hist.res
clust.eval <- diff$cv
sim <- diff$sim
feat <- diff$feat
D.feat <- diff$D.feat
O.feat <- diff$O.feat
D.feat.O <- diff$D.feat.O
O.feat.O <- diff$O.feat.O
D.consist <- diff$D.consist
O.consist <- diff$O.consist
hist <- diff$hist
hist2 <- diff$hist2
save(hist.res,cv,sim,feat,D.feat,O.feat,
D.feat.O,O.feat.O,D.consist,O.consist,hist,hist2,
file='DCTabata4999.rda')
-----------------------------------------------------
#-------------Evaluation
dickens.list <- names(diff$Dickens)[substr((names(diff$Dickens)),1,1) == "D"]
other.list <- names(diff$Dickens)[substr((names(diff$Dickens)),1,1) != "D"]
dsize <- length(dickens.list)
osize <- length(other.list)
results <- matrix(0, nrow = (dsize+2),ncol=3)
rownames(results) <- c(dickens.list,"mean","sum")
colnames(results) <- c("Dist.D.","Dist.C.","(Collins-Dickens)")
d <- diff[[1]]
c <- diff[[2]]
for (n in dickens.list){
results[n,1] <- d[[n]]
results[n,2] <- c[[n]]
results[n,3] <- c[[n]]-d[[n]]
}
results[dsize+1,] <- c(mean(results[1:dsize,1]),mean(results[1:dsize,2]),mean(results[1:dsize,3]))
results[dsize+2,] <- c(sum(results[1:dsize,1]),sum(results[1:dsize,2]),sum(results[1:dsize,3]))
results.2 <- matrix(0, nrow = (osize+2),ncol=3)
rownames(results.2) <- c(other.list,"mean","sum")
colnames(results.2) <- c("Dist.D.","Dist.C.","(Dickens-Collins)")
d <- diff[[1]]
c <- diff[[2]]
for (n in other.list){
results.2[n,1] <- d[[n]]
results.2[n,2] <- c[[n]]
results.2[n,3] <- d[[n]]-c[[n]]
}
results.2[osize+1,] <- c(mean(results.2[1:osize,1]),mean(results.2[1:osize,2]),mean(results.2[1:osize,3]))
results.2[osize+2,] <- c(sum(results.2[1:osize,1]),sum(results.2[1:osize,2]),sum(results.2[1:osize,3]))
matrix.RD <- as.matrix(rep(0,nrow=num.Docs,ncol=num.Docs))
for (i in 1:length(diff$dis.Matrix)){
m <- data.frame(diff$dis.Matrix[i])
matrix.RD <- matrix.RD +m
}
matrix.RD <- matrix.RD/length(diff$dis.Matrix)
#---plotting
nV.pca <- prcomp(nV)
summary(nV.pca)
nV.latent <- nV.pca$rotation
signif(sort(nV.latent[,1],decreasing=TRUE)[1:30],2) # 1 st component
pdf("file.pdf")
plot(nV.pca$x[,1:2], type= "n")
main = "Dickens vs. Collins", xlab= "PC1:+and +but +that +upon +very -the -her -she -you", ylab= "PC2:+you +her +she +said +what -the -and -their -they ", type= "n")
#points(nV.pca$x[1:29,1:2],pch="C",col="blue")
#points(nV.pca$x[30:74,1:2],pch="D",col="green")
dev.off()
biplot(nV.pca, main = "Dickens vs. Collins", col= c("black","red"), cex= 0.7)
t.test(c1,d1,paired= TRUE, alternative="greater")
D.diff <- diff$D.diff
O.diff <- diff$O.diff
clust.eval <- diff$clust.eval
hist <- diff$hist
hist2 <- diff$hist2
############
D.diff <- diff$D.diff
O.diff <- diff$O.diff
clust.eval <- diff$cv
hist <- diff$hist
hist2 <- diff$hist2
D.feat.O <- diff$D.feat.O
O.feat.O <- diff$O.feat.O
D.diff <- D$D.diff
O.diff <- D$O.diff
clust.eval <- D$cv
hist <- D$hist
hist2 <- D$hist2
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/easyVP.R
\name{easyVP}
\alias{easyVP}
\title{easyVP}
\usage{
easyVP(DEGlist, main, FCcol1, FCcol2, FCcol3, FClimit, PVALlimit, LegendPlace)
}
\arguments{
\item{DEGlist}{limma output. List of differentially expressed features with mandatory columns: logFC, P.Value, adj.P.Val, GeneID}
\item{main}{a title for your plot.}
\item{FCcol1}{a first logFC threshold for key color.}
\item{FCcol2}{a second logFC threshold for key color.}
\item{FCcol3}{a third logFC threshold for key color.}
\item{FClimit}{a logFC threshold beyond which dots are annotated with GeneID.}
\item{PVALlimit}{a -log10(P.Value) threshold beyond which dots are annotated with GeneID. Must be -log10(P.Value) and notP.Value or adj.P.Val.}
\item{LegendPlace}{a place to choose for legend between "topleft", "topright", "bottomright", "bottomleft".}
}
\description{
Create annotated volcano plots with colored dots
}
\examples{
MyDEGs <- read.csv2(file="~/MyStudy/MyDEGs.csv", header=T, sep=";",dec=".", row.names=1)
colnames(MyDEGs)[1] <-"logFC"
colnames(MyDEGs)[4] <-"P.Value"
colnames(MyDEGs)[5] <-"adj.P.Val"
colnames(MyDEGs)[8] <-"GeneID"
easyVP(DEGlist=MyDEGs,main="My easy VP", FCcol1=0.5, FCcol2=1, FCcol3=2, FClimit=2, PVALlimit=5, LegendPlace="topright")
}
|
/man/easyVP.Rd
|
no_license
|
nahtan/easyVP
|
R
| false
| true
| 1,317
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/easyVP.R
\name{easyVP}
\alias{easyVP}
\title{easyVP}
\usage{
easyVP(DEGlist, main, FCcol1, FCcol2, FCcol3, FClimit, PVALlimit, LegendPlace)
}
\arguments{
\item{DEGlist}{limma output. List of differentially expressed features with mandatory columns: logFC, P.Value, adj.P.Val, GeneID}
\item{main}{a title for your plot.}
\item{FCcol1}{a first logFC threshold for key color.}
\item{FCcol2}{a second logFC threshold for key color.}
\item{FCcol3}{a third logFC threshold for key color.}
\item{FClimit}{a logFC threshold beyond which dots are annotated with GeneID.}
\item{PVALlimit}{a -log10(P.Value) threshold beyond which dots are annotated with GeneID. Must be -log10(P.Value) and notP.Value or adj.P.Val.}
\item{LegendPlace}{a place to choose for legend between "topleft", "topright", "bottomright", "bottomleft".}
}
\description{
Create annotated volcano plots with colored dots
}
\examples{
MyDEGs <- read.csv2(file="~/MyStudy/MyDEGs.csv", header=T, sep=";",dec=".", row.names=1)
colnames(MyDEGs)[1] <-"logFC"
colnames(MyDEGs)[4] <-"P.Value"
colnames(MyDEGs)[5] <-"adj.P.Val"
colnames(MyDEGs)[8] <-"GeneID"
easyVP(DEGlist=MyDEGs,main="My easy VP", FCcol1=0.5, FCcol2=1, FCcol3=2, FClimit=2, PVALlimit=5, LegendPlace="topright")
}
|
library(shiny)
shinyServer(function(input,output,session){
thedata <- reactive({
input$update # when update is pressed
isolate({
withProgress({
setProgress(message = "Processing corpus...")
getTDM(input$input_file)
})
})#isolate ends
})# reactive code
wordcloud_repeatable <- repeatable(wordcloud)
output$plot <- renderPlot({
v <- thedata()
wordcloud_repeatable(names(v),v,scale=c(4,0.5),
min.freq = input$freq, rot.per=0.35,max.words = input$word_max, random.color = TRUE, colors = brewer.pal(8,"Dark2") )
})
output$Frequentwords <- renderText({
d <- thedata()
names(d[1:5])
})
})#shiny server ends
|
/server.R
|
no_license
|
saitej09/basic-text-mining
|
R
| false
| false
| 724
|
r
|
library(shiny)
shinyServer(function(input,output,session){
thedata <- reactive({
input$update # when update is pressed
isolate({
withProgress({
setProgress(message = "Processing corpus...")
getTDM(input$input_file)
})
})#isolate ends
})# reactive code
wordcloud_repeatable <- repeatable(wordcloud)
output$plot <- renderPlot({
v <- thedata()
wordcloud_repeatable(names(v),v,scale=c(4,0.5),
min.freq = input$freq, rot.per=0.35,max.words = input$word_max, random.color = TRUE, colors = brewer.pal(8,"Dark2") )
})
output$Frequentwords <- renderText({
d <- thedata()
names(d[1:5])
})
})#shiny server ends
|
library(dplyr)
library(lubridate)
library(magrittr)
library(readr)
library(stringr)
power <- read_delim("~/Data Science/household_power_consumption.txt",
";", col_types = cols(.default = "c"))
power$datetime <- str_c(power$Date, power$Time, sep = " ")
power$datetime <-dmy_hms(power$datetime)
power$Global_active_power <- as.numeric(power$Global_active_power)
power$Date <- date(power$datetime)
power %<>% filter(Date=="2007-02-01" | Date== "2007-02-02")
png("plot2.png")
plot(power$date, power$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
edavidaja/ExData_Plotting1
|
R
| false
| false
| 650
|
r
|
library(dplyr)
library(lubridate)
library(magrittr)
library(readr)
library(stringr)
power <- read_delim("~/Data Science/household_power_consumption.txt",
";", col_types = cols(.default = "c"))
power$datetime <- str_c(power$Date, power$Time, sep = " ")
power$datetime <-dmy_hms(power$datetime)
power$Global_active_power <- as.numeric(power$Global_active_power)
power$Date <- date(power$datetime)
power %<>% filter(Date=="2007-02-01" | Date== "2007-02-02")
png("plot2.png")
plot(power$date, power$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_scientific_names.R
\name{list_scientific_names}
\alias{list_scientific_names}
\title{List all available scientific names}
\usage{
list_scientific_names(connection = con)
}
\arguments{
\item{connection}{A connection to the ETN database. Defaults to \code{con}.}
}
\value{
A vector of all unique \code{scientific_name} present in
\code{common.animal_release}.
}
\description{
List all available scientific names
}
|
/man/list_scientific_names.Rd
|
permissive
|
inbo/etn
|
R
| false
| true
| 495
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_scientific_names.R
\name{list_scientific_names}
\alias{list_scientific_names}
\title{List all available scientific names}
\usage{
list_scientific_names(connection = con)
}
\arguments{
\item{connection}{A connection to the ETN database. Defaults to \code{con}.}
}
\value{
A vector of all unique \code{scientific_name} present in
\code{common.animal_release}.
}
\description{
List all available scientific names
}
|
library(Matrix)
library(dplyr)
library(irlba)
library(Rtsne)
library(ggplot2)
library(scatterpie)
library(gplots)
library(SummarizedExperiment)
simulateAltReadCount <- function(n_snps, n_strain, n_samples, n_mixture, min_depth, max_depth, n_max_mixture, n_pc, beta, min_alt, max_alt, seed){
set.seed(seed)
# simulate ancestor
alt_ratio <- runif(n_strain, min = min_alt, max = max_alt)
SimulateAncestorStrains <- function(n_snps,n_strain,alt_ratio) {
anc <- matrix(0,nrow = n_strain,ncol = n_snps)
for (s in (1:n_strain)){
anc[s,] <- sample(0:1,size=n_snps,replace = TRUE, prob = c(1-alt_ratio[s],alt_ratio[s]))
}
if (length(which(colSums(anc) == 0) > 0)){
fill.sites <- which(colSums(anc) == 0)
fill.rows <- sample(1:n_strain,sample(1:(n_strain-1),1))
anc[fill.rows,fill.sites] <- 1
}
return(anc)
}
anc <- SimulateAncestorStrains(n_snps, n_strain, alt_ratio)
### Generate a sample ~ strain assignment matrix
A <- sparseMatrix(
i = 1:n_samples,
j = sample(1:n_strain, n_samples, replace = TRUE),
dims = c(n_samples, n_strain)
)
A_noise <- A + runif(prod(dim(A)), min = 0, max = beta)
A_noise <- Diagonal(x = 1 / Matrix::rowSums(A_noise)) %*% A_noise
### Randomly pick pairs of sample for mixing and randonly set their mixing ratio
# A_mix is a sample ~ strain matrix represeting the mixture ratio of each strain
A_mix <- do.call('rbind', lapply(1:n_mixture, function(h){
p <- runif(n_strain)
off <- sample(n_strain, n_strain - n_max_mixture)
p[off] <- 0
p <- p / sum(p)
p
}))
A_merge_noise <- rbind(A_noise, A_mix)
A_merge <- rbind(A, A_mix)
### Simulating sequence depth for input samples and mixture samples
D <- sample(min_depth:max_depth, (n_samples + n_mixture) * n_snps, replace = TRUE) %>%
matrix(nrow = n_samples + n_mixture, ncol = n_snps)
#print(dim(D))
### Generate alt read count from allele frequency and coverage
GT <- A_merge_noise %*% anc # matrix multiplication: GT is allele frequency matrix: a sample ~ SNP
Alt_count <- round(GT * D) # multiply element wise
results <- SummarizedExperiment(assays = list(count = Alt_count, depth = D))
rowData(results)$mixture <- A_merge
return(results)
}
|
/simulateAltReadCount.r
|
no_license
|
yyw-informatics/MixtureDetection_KNN
|
R
| false
| false
| 2,433
|
r
|
library(Matrix)
library(dplyr)
library(irlba)
library(Rtsne)
library(ggplot2)
library(scatterpie)
library(gplots)
library(SummarizedExperiment)
simulateAltReadCount <- function(n_snps, n_strain, n_samples, n_mixture, min_depth, max_depth, n_max_mixture, n_pc, beta, min_alt, max_alt, seed){
set.seed(seed)
# simulate ancestor
alt_ratio <- runif(n_strain, min = min_alt, max = max_alt)
SimulateAncestorStrains <- function(n_snps,n_strain,alt_ratio) {
anc <- matrix(0,nrow = n_strain,ncol = n_snps)
for (s in (1:n_strain)){
anc[s,] <- sample(0:1,size=n_snps,replace = TRUE, prob = c(1-alt_ratio[s],alt_ratio[s]))
}
if (length(which(colSums(anc) == 0) > 0)){
fill.sites <- which(colSums(anc) == 0)
fill.rows <- sample(1:n_strain,sample(1:(n_strain-1),1))
anc[fill.rows,fill.sites] <- 1
}
return(anc)
}
anc <- SimulateAncestorStrains(n_snps, n_strain, alt_ratio)
### Generate a sample ~ strain assignment matrix
A <- sparseMatrix(
i = 1:n_samples,
j = sample(1:n_strain, n_samples, replace = TRUE),
dims = c(n_samples, n_strain)
)
A_noise <- A + runif(prod(dim(A)), min = 0, max = beta)
A_noise <- Diagonal(x = 1 / Matrix::rowSums(A_noise)) %*% A_noise
### Randomly pick pairs of sample for mixing and randonly set their mixing ratio
# A_mix is a sample ~ strain matrix represeting the mixture ratio of each strain
A_mix <- do.call('rbind', lapply(1:n_mixture, function(h){
p <- runif(n_strain)
off <- sample(n_strain, n_strain - n_max_mixture)
p[off] <- 0
p <- p / sum(p)
p
}))
A_merge_noise <- rbind(A_noise, A_mix)
A_merge <- rbind(A, A_mix)
### Simulating sequence depth for input samples and mixture samples
D <- sample(min_depth:max_depth, (n_samples + n_mixture) * n_snps, replace = TRUE) %>%
matrix(nrow = n_samples + n_mixture, ncol = n_snps)
#print(dim(D))
### Generate alt read count from allele frequency and coverage
GT <- A_merge_noise %*% anc # matrix multiplication: GT is allele frequency matrix: a sample ~ SNP
Alt_count <- round(GT * D) # multiply element wise
results <- SummarizedExperiment(assays = list(count = Alt_count, depth = D))
rowData(results)$mixture <- A_merge
return(results)
}
|
##' @title archive_timing
##'
##' @description Get or set the number of seconds to wait after trying to
##' retrieve a file from the CRAN Archive.
##'
##' This is intended to stop intermittent install failures
##' due to failing to retrieve files that *are* in the
##' archive but are not downloading properly when a larger
##' number of packages is being retrieved.
##'
##' @param x A SwitchrParam object
##' @return When getting, the number of seconds to wait,
##' when setting, a new, updated SwitchrParam object.
##' @rdname archive_timing
##' @docType methods
##' @export
setGeneric("archive_timing", function(x) standardGeneric("archive_timing"))
##' @rdname archive_timing
##' @aliases archive_timing,SwitchrParam
setMethod("archive_timing", "SwitchrParam", function(x) x@archive_timing)
##' @rdname archive_timing
##' @aliases archive_timing<-
##' @export
setGeneric("archive_timing<-", function(x, value) standardGeneric("archive_timing<-"))
##' @rdname archive_timing
##' @param value The new number of seconds to wait
##' @aliases archive_timing<-,SwitchrParam
setMethod("archive_timing<-", "SwitchrParam", function(x, value) {
x@archive_timing = value
x
})
##' @title Get or set the number of seconds to wait between successive shell commands
##'
##' @description This is intended to stop intermittent install failures
##' due to network drive latency interacting with git commands
##'
##' @param x A SwitchrParam object
##' @return When getting, the number of seconds to wait,
##' when setting, a new, updated SwitchrParam object.
##' @rdname shell_timing
##' @docType methods
##' @export
setGeneric("shell_timing", function(x) standardGeneric("shell_timing"))
##' @rdname shell_timing
##' @aliases shell_timing,SwitchrParam
setMethod("shell_timing", "SwitchrParam", function(x) x@shell_timing)
##' @rdname shell_timing
##' @aliases shell_timing<-
##' @export
setGeneric("shell_timing<-", function(x, value) standardGeneric("shell_timing<-"))
##' @rdname shell_timing
##' @param value The new number of seconds to wait
##' @aliases shell_timing<-,SwitchrParam
setMethod("shell_timing<-", "SwitchrParam", function(x, value) {
x@shell_timing = value
x
})
##' @title dl_method
##' @description Get or set the download method for retreiving files.
##' @param x A SwitchrParam object
##' @rdname dl_method
##' @docType methods
##' @return for the getter, the download method specified in the
##' \code{SwitchrParam} object, for the setter, the object
##' updated with the new download method.
##' @export
setGeneric("dl_method", function(x) standardGeneric("dl_method"))
##' @rdname dl_method
##' @aliases dl_method,SwitchrParam
setMethod("dl_method", "SwitchrParam", function(x) x@dl_method)
##' @rdname dl_method
##' @aliases dl_method<-
##' @export
setGeneric("dl_method<-", function(x, value) standardGeneric("dl_method<-"))
##' @rdname dl_method
##' @param value The new number of seconds to wait
##' @aliases dl_method<-,SwitchrParam
setMethod("dl_method<-", "SwitchrParam", function(x, value) {
x@dl_method = value
x
})
##' @title archive_retries
##'
##' @description Get or set the number of times to retry downloading a file from
##' the CRAN archive
##'
##' This is intended to stop intermittent install failures
##' due to failing to retrieve files that *are* in the
##' archive but are not downloading properly when a larger
##' number of packages is being retrieved.
##'
##' @param x A SwitchrParam object
##' @return When getting, the number of seconds to wait,
##' when setting, a new, updated SwitchrParam object.
##' @rdname archive_retries
##' @docType methods
##' @export
setGeneric("archive_retries", function(x) standardGeneric("archive_retries"))
##' @rdname archive_retries
##' @aliases archive_retries,SwitchrParam
setMethod("archive_retries", "SwitchrParam", function(x) x@archive_retries)
##' @rdname archive_retries
##' @aliases archive_retries<-
##' @export
setGeneric("archive_retries<-",
function(x, value) standardGeneric("archive_retries<-"))
##' @rdname archive_retries
##' @param value The new number of seconds to wait
##' @aliases archive_retries<-,SwitchrParam
setMethod("archive_retries<-", "SwitchrParam", function(x, value) {
x@archive_retries = value
x
})
##' @title dep_repos
##'
##' @description Get or set repositories to be used to fullfill dependencies beyond packages
##' within the manifest
##' @return Character vector with existing repository urls
##' @param x A package or session manifest
##' @rdname dep_repos
##' @docType methods
##' @export
setGeneric("dep_repos", function(x) standardGeneric("dep_repos"))
##' @rdname dep_repos
##' @aliases dep_repos,PkgManifest
setMethod("dep_repos", "PkgManifest", function(x) x@dependency_repos)
##' @rdname dep_repos
##' @aliases dep_repos,SessionManifest
setMethod("dep_repos", "SessionManifest", function(x) manifest(x)@dependency_repos)
##'
##' @rdname dep_repos
##' @param value A character vector with the new dependency repos
##' @export
setGeneric("dep_repos<-", function(x, value) standardGeneric("dep_repos<-"))
##'@rdname dep_repos
##' @aliases dep_repos<-,PkgManifest
setMethod("dep_repos<-", "PkgManifest", function(x, value) {
x@dependency_repos = value
x
})
##' @rdname dep_repos
##' @aliases dep_repos<-,SessionManifest
setMethod("dep_repos<-", "SessionManifest", function(x, value) {
man = manifest(x)
dep_repos(man) = value
manifest(x) = man
x
})
##' @title Get or set the manifest associated with an object
##' @description Get or set manifest associated with an object
##'
##' @rdname manifest_methods
##' @param x An object which contains a manifest
##' @docType methods
##' @return A PkgManifest or SessionManifest object
##' @export
setGeneric("manifest", function(x) standardGeneric("manifest"))
##setMethod("manifest", "PkgManifest", function(x) x@manifest)
##' @export
##' @rdname manifest_methods
##' @param value A PkgManifest
setGeneric("manifest<-", function(x, value) standardGeneric("manifest<-"))
##' @rdname manifest_methods
##' @aliases manifest,SessionManifest
setMethod("manifest", "SessionManifest",
function(x) x@pkg_manifest)
##' @rdname manifest_methods
##' @aliases manifest<-,SessionManifest
setMethod("manifest<-", "SessionManifest",
function(x, value ) {
x@pkg_manifest = value
x
})
##' @title manifest_df
##'
##' @description Get or set the package location manifest (data.frame) associated with an
##' object
##'
##' @rdname manifest_df
##' @param x The object
##' @param ... unused.
##' @docType methods
##' @export
##' @return for the getter, the manifest data.frame corresponding to the manifest,
##' for the setter, a manifest updated with the new manifest data.frame.
setGeneric("manifest_df", function(x, ...) standardGeneric("manifest_df"))
## only get manifest rows for pkgs in the 'session' by default
## override with session_only=FALSE if desired
##' @aliases manifest_df,SessionManifest
##' @param session_only Only return manifest rows associated with the
##' versioned cohort defined in \code{x} (\code{SessionManifest}s only).
##' @rdname manifest_df
setMethod("manifest_df", "SessionManifest",
function(x, session_only = TRUE, ...) {
## all pkgs in the manifest
mdf = manifest_df(manifest(x))
## restrict to pkgs in the 'session' if desired
if(session_only)
mdf = mdf[mdf$name %in% versions_df(x)$name,]
mdf
})
##' @aliases manifest_df,PkgManifest
##' @rdname manifest_df
setMethod("manifest_df", "PkgManifest", function(x) x@manifest)
##' @rdname manifest_df
##' @param value A data.frame of package manifest information.
##' See \code{\link{ManifestRow}}
##' @export
setGeneric("manifest_df<-", function(x, value) standardGeneric("manifest_df<-"))
##' @aliases manifest_df<-,SessionManifest
##' @rdname manifest_df
setMethod("manifest_df<-", "SessionManifest", function(x, value) {
manifest_df(manifest(x)) = value
x
})
##' @aliases manifest_df<-,PkgManifest
##' @rdname manifest_df
setMethod("manifest_df<-", "PkgManifest", function(x, value) {
x@manifest = value
x
})
##' @title versions_df
##'
##' @description Get or set the the versions information in a SessionManifest
##'
##' @param x An object containing package version information
##' @rdname versions
##' @docType methods
##' @export
##' @return For the getter, a data.frame indicating the versions-specific cohort
##' of packages defined by \code{x}, for the setter, the \code{SessionManifest} object
##' updated with the new versions data.frame.
setGeneric("versions_df", function(x) standardGeneric("versions_df"))
##' @aliases versions_df,SessionManifest
##' @rdname versions
setMethod("versions_df", "SessionManifest",
function(x) x@pkg_versions)
##' @rdname versions
##' @param value A data.frame of package version information.
##' @export
setGeneric("versions_df<-", function(x, value) standardGeneric("versions_df<-"))
##' @aliases versions_df<-,SessionManifest
##' @rdname versions
setMethod("versions_df<-", "SessionManifest", function(x, value) {
x@pkg_versions = value
x
})
##' @title branch
##' @description Get or set the branch associated with a Package Source
##' @export
##' @param x A source
##' @rdname branch
##' @docType methods
##' @return for the getter, the branch associated with the source
##' object, for the setter, the object updated to use the specified
##' branch.
setGeneric("branch", function(x) standardGeneric("branch"))
##' @aliases branch,PkgSource
##' @rdname branch
setMethod("branch", "PkgSource", function(x) x@branch)
##' @rdname branch
##' @param value The new branch
setGeneric("branch<-", function(x, value) standardGeneric("branch<-"))
##' @aliases branch<-,PkgSource
##' @rdname branch
setMethod("branch<-", "PkgSource", function(x, value) {
x@branch = value
x
})
##' @title pkgname
##' @description Get or set the package name associated with a Package Source
##' @export
##' @param x A source
##' @rdname pkgname
##' @docType methods
##' @return for the getter, the package name as a string, for the setter,
##' an updated \code{PkgSource} (or subclass) object with the new
##' package name.
setGeneric("pkgname", function(x) standardGeneric("pkgname"))
##' @aliases pkgname,PkgSource
##' @rdname pkgname
setMethod("pkgname", "PkgSource", function(x) x@name)
##' @rdname pkgname
##' @param value The new pkgname
setGeneric("pkgname<-", function(x, value) standardGeneric("pkgname<-"))
##' @aliases pkgname<-,PkgSource
##' @rdname pkgname
setMethod("pkgname<-", "PkgSource", function(x, value) {
x@name = value
x
})
##' @title subdir
##' @description accessor for subdirectory.
##' @rdname subdir
##' @param x An object associated with a subdirectory, typically a PkgSource
##' @docType methods
##' @export
##' @return For the getter, the subdirectory within the overall source to
##' find the actual package source code, for the setter, an updated
##' \code{PkgSource} object with the new subdirectory set.
setGeneric("subdir", function(x) standardGeneric("subdir"))
##' @aliases subdir,PkgSource
##' @rdname subdir
setMethod("subdir", "PkgSource", function(x) x@subdir)
##' @export
##' @param value The new subdirectory to associate with the object
##' @rdname subdir
setGeneric("subdir<-", function(x, value) standardGeneric("subdir<-"))
##' @rdname subdir
##' @aliases subdir<-,PkgSource
setMethod("subdir<-", "PkgSource", function(x, value) {
x@subdir = value
x
})
##' @title location
##' @description Retreive the directory associated with an object
##' @rdname location-methods
##' @return a character containing the associated path
##' @author Gabriel Becker
##' @param repo An object associated with a path
##' @docType methods
##' @export
setGeneric("location", function(repo) standardGeneric("location"))
##' @rdname location-methods
##' @aliases location,PkgSource-method
##' @export
setMethod("location", "PkgSource", function(repo) repo@location)
##' @title shell init
##' @description Set or Retrieve the shell initialization script for an object
##' @export
##' @param x An object associated with a SwitchrParam object
##' @rdname sh_init
##' @return For the getter, the shell initialization script/commands,
##' for the setter, an updated \code{SwitchrParam} object with the
##' new shell initialization set.
setGeneric("sh_init_script", function(x) standardGeneric("sh_init_script"))
##' @aliases sh_init_script,SwitchrParam
##' @rdname sh_init
setMethod("sh_init_script", "SwitchrParam", function(x) x@shell_init)
##' @export
##' @rdname sh_init
##' @docType methods
##' @param value The new value.
setGeneric("sh_init_script<-", function(x, value) standardGeneric("sh_init_script<-"))
##' @aliases sh_init_script<-,SwitchrParam,ANY
##' @rdname sh_init
setMethod("sh_init_script<-", "SwitchrParam", function(x, value) {
x@shell_init = value
x
})
##' @title logfun
##' @description Get or set the logging function in an object associated with a SwitchrParam
##' @rdname logfun
##' @param x An object with a SwitchrParam
##' @docType methods
##' @export
setGeneric("logfun", function(x) standardGeneric("logfun"))
##' @aliases logfun,SwitchrParam
##' @rdname logfun
setMethod("logfun", "SwitchrParam", function(x) x@logfun)
##' @export
##' @rdname logfun
##' @param value The new logging function
setGeneric("logfun<-", function(x, value) standardGeneric("logfun<-"))
##' @aliases logfun<-,SwitchrParam
##' @rdname logfun
setMethod("logfun<-", "SwitchrParam", function(x, value) {
x@logfun = value
x
})
## @title Add/replace rows in a data.frame
##
## Combine two dataframes together with rows in one optionally replacing
## those in the other when they match on a specified index column
##
## @param df data.frame. The "first" or "old" data.frame.
## @param newdf data.frame. The "new" data frame of rows to add to \code{df}
## @param replace logical. Should replacement happen when rows of \code{df} and
## \code{newdf} match based on \code{indexcol}. Defaults to \code{TRUE}. If
## \code{FALSE}, an error is thrown in the matching case.
## @param indexcol character. The name of the column to use for matching.
## Defaults to \code{"name"} for convenience of internal usage.
##
## @return A combined data.frame with only columns found in both data.frames
## and one row per unique value of the specified index column across both
## datasets.
addReplaceDF = function(df, newdf, replace = TRUE, indexcol = "name") {
df = df[,intersect(names(df), names(newdf))]
newdf = newdf[,intersect(names(df), names(newdf))]
oldvec = df[[indexcol]]
newvec = newdf[[indexcol]]
dups = oldvec[oldvec %in% newvec]
if(length(dups)) {
if(!replace)
stop("Values in new rows already appear in existing rows, set replace=TRUE to replace them inplace. [",
paste(dups, collapse=", "),
"]")
dupdf = newdf[newvec %in% dups,]
newdf = newdf[!newvec %in% dups,]
df[match(dupdf[[indexcol]], oldvec),] = dupdf
}
rbind(df, newdf)
}
##' @title addPkg
##' @description Add a package to an object associated with a manifest
##' @export
##' @rdname addPkg
##' @param x A manifest or manifest-associate objec tto add the pkg 2
##' @param \dots The information regarding the package to place in the manifest
##' @param rows An already-created data.frame to add to the manifest
##' @param versions A data.frame of package names and versions, if adding to
##' a SessionManifest, ignored otherwise
##' @param replace logical. If true, the specified package info will replace
##' any already in the manifest in the case of duplicates. Otherwise, an error
##' is thrown.
##' @return \code{x}, with the relevant package(s) added to it (in the case
##' of a manifest) or its associated manifest.
##' @docType methods
setGeneric("addPkg", function(x, ..., rows = makeManifest(...),
versions = data.frame(name = manifest_df(rows)$name,
version = NA_character_,
stringsAsFactors=FALSE),
replace = FALSE)
standardGeneric("addPkg")
)
##' @rdname addPkg
##' @aliases addPkg,PkgManifest
setMethod("addPkg", "PkgManifest",
function(x, ..., rows= makeManifest(...), versions, replace) {
oldman = manifest_df(x)
newman = manifest_df(rows)
## oldman = oldman[,names(newman)]
## dups = oldman$name[oldman$name %in% newman$name]
## if(length(dups)) {
## if(!replace)
## stop("Attempted to add package(s) already in manifest with replace=FALSE: ",
## paste(dups, collapse=", "))
## dupman = newman[newman$name %in% dups,]
## newman = newman[!newman$name %in% dups,]
## oldman[match(newman$name, oldman$name),] = newman
## }
## manifest_df(x) = rbind(oldman, newman)
manifest_df(x) = addReplaceDF(oldman, newman, replace= replace)
dep_repos = unique(c(dep_repos(x), dep_repos(rows)))
x
})
##' @rdname addPkg
##' @aliases addPkg,SessionManifest
setMethod("addPkg", "SessionManifest",
function(x, ..., rows, versions, replace) {
manifest(x) = addPkg(manifest(x), ..., rows = rows, versions = NULL,
replace = replace)
if(!missing(versions) && length(versions) > 0) {
if(is(versions, "character"))
versions = data.frame(name = names(versions),
version = versions, stringsAsFactors = FALSE)
oldv = versions_df(x)
## versions = versions[,names(oldv)]
## versions_df(x) = rbind(oldv, versions)
versions_df(x) = addReplaceDF(oldv, versions, replace = replace)
}
x
})
##' @title library_paths
##'
##' @description Accessor for which directories an SwitchrCtx is associated with.
##' @param seed An SwitchrCtx
##' @export
##' @docType methods
##' @rdname library_paths
##' @return for the getter, the set of library paths associated with
##' the \code{SwitchrCtx} object, for the setter, said context udpated
##' with the new full set of library paths.
setGeneric("library_paths", function(seed) standardGeneric("library_paths"))
##' @rdname library_paths
##' @aliases library_paths,SwitchrCtx
setMethod("library_paths", "SwitchrCtx", function(seed) {
seed@libpaths
})
##' @title full_libpaths
##'
##' @description Accessor for the full library path associate with a SwitchrCtx, including
##' the R library and (if not excluded) the site library
##'
##' @param seed a SwitchrCtx
##' @export
##' @docType methods
##' @rdname full_libpaths
##' @return For the getter, the full set of library paths associated with the
##' \code{SwitchrCtx} object, for the setter, the object, updated with the
##' new set of full lib paths.
setGeneric("full_libpaths", function(seed) standardGeneric("full_libpaths"))
##' @rdname full_libpaths
##' @aliases full_libpaths,SwitchrCtx
setMethod("full_libpaths", "SwitchrCtx", function(seed) {
unique(c(library_paths(seed), if(seed@exclude.site) character() else .Library.site, .Library))
})
##' @title packages
##' @description List the packages installed in a switchr context (library)
##' @docType methods
##' @rdname packages
##' @param seed A switchr context
##' @export
##' @return a vector of package names installed in the
##' specified switchr context.
setGeneric("packages", function(seed) standardGeneric("packages"))
##' @rdname packages
##' @aliases packages,SwitchrCtx
setMethod("packages", "SwitchrCtx", function(seed) seed@packages)
setGeneric("update_pkgs_list", function(seed) standardGeneric("update_pkgs_list"))
setMethod("update_pkgs_list", "SwitchrCtx", function(seed){
pathsToLook = full_libpaths(seed)
pkgs = installed.packages(pathsToLook,
noCache=TRUE)[,c("Package", "Version", "LibPath")]
pkgs = pkgs[!duplicated(pkgs[,"Package"]),]
pkgs = as.data.frame(pkgs, stringsAsFactors = FALSE)
seed@packages = pkgs
seed
})
##' @title Notrack directory
##'
##' @description This function is not intended to be called directly by the user.
##'
##' @param repo The object.
##' @return the path where retrieved package versions should be. If \code{repo}
##' is NULL, a notrack directory is constructed within a temp directory.
##' @export
##' @docType methods
##' @rdname notrack
setGeneric("notrack", function(repo) standardGeneric("notrack"))
##' @rdname notrack
##' @aliases notrack,NULL
setMethod("notrack", "NULL", function(repo) file.path(tempdir(), "notrack"))
##' @description Number of rows
##'
##' @title Number of rows
##'
##' @param x A tabular data structure.
##' @return The number of rows in the structure
##' @docType methods
##' @export
##' @rdname nrow
setGeneric("nrow", nrow)
##' @rdname nrow
##' @aliases nrow,PkgManifest
setMethod("nrow", "PkgManifest",
function(x) base::nrow(manifest_df(x)))
##' @rdname nrow
##' @aliases nrow,SessionManifest
setMethod("nrow", "SessionManifest",
function(x) base::nrow(manifest_df(x)))
|
/R/accessors.R
|
no_license
|
cran/switchr
|
R
| false
| false
| 21,895
|
r
|
##' @title archive_timing
##'
##' @description Get or set the number of seconds to wait after trying to
##' retrieve a file from the CRAN Archive.
##'
##' This is intended to stop intermittent install failures
##' due to failing to retrieve files that *are* in the
##' archive but are not downloading properly when a larger
##' number of packages is being retrieved.
##'
##' @param x A SwitchrParam object
##' @return When getting, the number of seconds to wait,
##' when setting, a new, updated SwitchrParam object.
##' @rdname archive_timing
##' @docType methods
##' @export
setGeneric("archive_timing", function(x) standardGeneric("archive_timing"))
##' @rdname archive_timing
##' @aliases archive_timing,SwitchrParam
setMethod("archive_timing", "SwitchrParam", function(x) x@archive_timing)
##' @rdname archive_timing
##' @aliases archive_timing<-
##' @export
setGeneric("archive_timing<-", function(x, value) standardGeneric("archive_timing<-"))
##' @rdname archive_timing
##' @param value The new number of seconds to wait
##' @aliases archive_timing<-,SwitchrParam
setMethod("archive_timing<-", "SwitchrParam", function(x, value) {
x@archive_timing = value
x
})
##' @title Get or set the number of seconds to wait between successive shell commands
##'
##' @description This is intended to stop intermittent install failures
##' due to network drive latency interacting with git commands
##'
##' @param x A SwitchrParam object
##' @return When getting, the number of seconds to wait,
##' when setting, a new, updated SwitchrParam object.
##' @rdname shell_timing
##' @docType methods
##' @export
setGeneric("shell_timing", function(x) standardGeneric("shell_timing"))
##' @rdname shell_timing
##' @aliases shell_timing,SwitchrParam
setMethod("shell_timing", "SwitchrParam", function(x) x@shell_timing)
##' @rdname shell_timing
##' @aliases shell_timing<-
##' @export
setGeneric("shell_timing<-", function(x, value) standardGeneric("shell_timing<-"))
##' @rdname shell_timing
##' @param value The new number of seconds to wait
##' @aliases shell_timing<-,SwitchrParam
setMethod("shell_timing<-", "SwitchrParam", function(x, value) {
x@shell_timing = value
x
})
##' @title dl_method
##' @description Get or set the download method for retreiving files.
##' @param x A SwitchrParam object
##' @rdname dl_method
##' @docType methods
##' @return for the getter, the download method specified in the
##' \code{SwitchrParam} object, for the setter, the object
##' updated with the new download method.
##' @export
setGeneric("dl_method", function(x) standardGeneric("dl_method"))
##' @rdname dl_method
##' @aliases dl_method,SwitchrParam
setMethod("dl_method", "SwitchrParam", function(x) x@dl_method)
##' @rdname dl_method
##' @aliases dl_method<-
##' @export
setGeneric("dl_method<-", function(x, value) standardGeneric("dl_method<-"))
##' @rdname dl_method
##' @param value The new number of seconds to wait
##' @aliases dl_method<-,SwitchrParam
setMethod("dl_method<-", "SwitchrParam", function(x, value) {
x@dl_method = value
x
})
##' @title archive_retries
##'
##' @description Get or set the number of times to retry downloading a file from
##' the CRAN archive
##'
##' This is intended to stop intermittent install failures
##' due to failing to retrieve files that *are* in the
##' archive but are not downloading properly when a larger
##' number of packages is being retrieved.
##'
##' @param x A SwitchrParam object
##' @return When getting, the number of seconds to wait,
##' when setting, a new, updated SwitchrParam object.
##' @rdname archive_retries
##' @docType methods
##' @export
setGeneric("archive_retries", function(x) standardGeneric("archive_retries"))
##' @rdname archive_retries
##' @aliases archive_retries,SwitchrParam
setMethod("archive_retries", "SwitchrParam", function(x) x@archive_retries)
##' @rdname archive_retries
##' @aliases archive_retries<-
##' @export
setGeneric("archive_retries<-",
function(x, value) standardGeneric("archive_retries<-"))
##' @rdname archive_retries
##' @param value The new number of seconds to wait
##' @aliases archive_retries<-,SwitchrParam
setMethod("archive_retries<-", "SwitchrParam", function(x, value) {
x@archive_retries = value
x
})
##' @title dep_repos
##'
##' @description Get or set repositories to be used to fullfill dependencies beyond packages
##' within the manifest
##' @return Character vector with existing repository urls
##' @param x A package or session manifest
##' @rdname dep_repos
##' @docType methods
##' @export
setGeneric("dep_repos", function(x) standardGeneric("dep_repos"))
##' @rdname dep_repos
##' @aliases dep_repos,PkgManifest
setMethod("dep_repos", "PkgManifest", function(x) x@dependency_repos)
##' @rdname dep_repos
##' @aliases dep_repos,SessionManifest
setMethod("dep_repos", "SessionManifest", function(x) manifest(x)@dependency_repos)
##'
##' @rdname dep_repos
##' @param value A character vector with the new dependency repos
##' @export
setGeneric("dep_repos<-", function(x, value) standardGeneric("dep_repos<-"))
##'@rdname dep_repos
##' @aliases dep_repos<-,PkgManifest
setMethod("dep_repos<-", "PkgManifest", function(x, value) {
x@dependency_repos = value
x
})
##' @rdname dep_repos
##' @aliases dep_repos<-,SessionManifest
setMethod("dep_repos<-", "SessionManifest", function(x, value) {
man = manifest(x)
dep_repos(man) = value
manifest(x) = man
x
})
##' @title Get or set the manifest associated with an object
##' @description Get or set manifest associated with an object
##'
##' @rdname manifest_methods
##' @param x An object which contains a manifest
##' @docType methods
##' @return A PkgManifest or SessionManifest object
##' @export
setGeneric("manifest", function(x) standardGeneric("manifest"))
##setMethod("manifest", "PkgManifest", function(x) x@manifest)
##' @export
##' @rdname manifest_methods
##' @param value A PkgManifest
setGeneric("manifest<-", function(x, value) standardGeneric("manifest<-"))
##' @rdname manifest_methods
##' @aliases manifest,SessionManifest
setMethod("manifest", "SessionManifest",
function(x) x@pkg_manifest)
##' @rdname manifest_methods
##' @aliases manifest<-,SessionManifest
setMethod("manifest<-", "SessionManifest",
function(x, value ) {
x@pkg_manifest = value
x
})
##' @title manifest_df
##'
##' @description Get or set the package location manifest (data.frame) associated with an
##' object
##'
##' @rdname manifest_df
##' @param x The object
##' @param ... unused.
##' @docType methods
##' @export
##' @return for the getter, the manifest data.frame corresponding to the manifest,
##' for the setter, a manifest updated with the new manifest data.frame.
setGeneric("manifest_df", function(x, ...) standardGeneric("manifest_df"))
## only get manifest rows for pkgs in the 'session' by default
## override with session_only=FALSE if desired
##' @aliases manifest_df,SessionManifest
##' @param session_only Only return manifest rows associated with the
##' versioned cohort defined in \code{x} (\code{SessionManifest}s only).
##' @rdname manifest_df
setMethod("manifest_df", "SessionManifest",
function(x, session_only = TRUE, ...) {
## all pkgs in the manifest
mdf = manifest_df(manifest(x))
## restrict to pkgs in the 'session' if desired
if(session_only)
mdf = mdf[mdf$name %in% versions_df(x)$name,]
mdf
})
##' @aliases manifest_df,PkgManifest
##' @rdname manifest_df
setMethod("manifest_df", "PkgManifest", function(x) x@manifest)
##' @rdname manifest_df
##' @param value A data.frame of package manifest information.
##' See \code{\link{ManifestRow}}
##' @export
setGeneric("manifest_df<-", function(x, value) standardGeneric("manifest_df<-"))
##' @aliases manifest_df<-,SessionManifest
##' @rdname manifest_df
setMethod("manifest_df<-", "SessionManifest", function(x, value) {
manifest_df(manifest(x)) = value
x
})
##' @aliases manifest_df<-,PkgManifest
##' @rdname manifest_df
setMethod("manifest_df<-", "PkgManifest", function(x, value) {
x@manifest = value
x
})
##' @title versions_df
##'
##' @description Get or set the the versions information in a SessionManifest
##'
##' @param x An object containing package version information
##' @rdname versions
##' @docType methods
##' @export
##' @return For the getter, a data.frame indicating the versions-specific cohort
##' of packages defined by \code{x}, for the setter, the \code{SessionManifest} object
##' updated with the new versions data.frame.
setGeneric("versions_df", function(x) standardGeneric("versions_df"))
##' @aliases versions_df,SessionManifest
##' @rdname versions
setMethod("versions_df", "SessionManifest",
function(x) x@pkg_versions)
##' @rdname versions
##' @param value A data.frame of package version information.
##' @export
setGeneric("versions_df<-", function(x, value) standardGeneric("versions_df<-"))
##' @aliases versions_df<-,SessionManifest
##' @rdname versions
setMethod("versions_df<-", "SessionManifest", function(x, value) {
x@pkg_versions = value
x
})
##' @title branch
##' @description Get or set the branch associated with a Package Source
##' @export
##' @param x A source
##' @rdname branch
##' @docType methods
##' @return for the getter, the branch associated with the source
##' object, for the setter, the object updated to use the specified
##' branch.
setGeneric("branch", function(x) standardGeneric("branch"))
##' @aliases branch,PkgSource
##' @rdname branch
setMethod("branch", "PkgSource", function(x) x@branch)
##' @rdname branch
##' @param value The new branch
setGeneric("branch<-", function(x, value) standardGeneric("branch<-"))
##' @aliases branch<-,PkgSource
##' @rdname branch
setMethod("branch<-", "PkgSource", function(x, value) {
x@branch = value
x
})
##' @title pkgname
##' @description Get or set the package name associated with a Package Source
##' @export
##' @param x A source
##' @rdname pkgname
##' @docType methods
##' @return for the getter, the package name as a string, for the setter,
##' an updated \code{PkgSource} (or subclass) object with the new
##' package name.
setGeneric("pkgname", function(x) standardGeneric("pkgname"))
##' @aliases pkgname,PkgSource
##' @rdname pkgname
setMethod("pkgname", "PkgSource", function(x) x@name)
##' @rdname pkgname
##' @param value The new pkgname
setGeneric("pkgname<-", function(x, value) standardGeneric("pkgname<-"))
##' @aliases pkgname<-,PkgSource
##' @rdname pkgname
setMethod("pkgname<-", "PkgSource", function(x, value) {
x@name = value
x
})
##' @title subdir
##' @description accessor for subdirectory.
##' @rdname subdir
##' @param x An object associated with a subdirectory, typically a PkgSource
##' @docType methods
##' @export
##' @return For the getter, the subdirectory within the overall source to
##' find the actual package source code, for the setter, an updated
##' \code{PkgSource} object with the new subdirectory set.
setGeneric("subdir", function(x) standardGeneric("subdir"))
##' @aliases subdir,PkgSource
##' @rdname subdir
setMethod("subdir", "PkgSource", function(x) x@subdir)
##' @export
##' @param value The new subdirectory to associate with the object
##' @rdname subdir
setGeneric("subdir<-", function(x, value) standardGeneric("subdir<-"))
##' @rdname subdir
##' @aliases subdir<-,PkgSource
setMethod("subdir<-", "PkgSource", function(x, value) {
x@subdir = value
x
})
##' @title location
##' @description Retreive the directory associated with an object
##' @rdname location-methods
##' @return a character containing the associated path
##' @author Gabriel Becker
##' @param repo An object associated with a path
##' @docType methods
##' @export
setGeneric("location", function(repo) standardGeneric("location"))
##' @rdname location-methods
##' @aliases location,PkgSource-method
##' @export
setMethod("location", "PkgSource", function(repo) repo@location)
##' @title shell init
##' @description Set or Retrieve the shell initialization script for an object
##' @export
##' @param x An object associated with a SwitchrParam object
##' @rdname sh_init
##' @return For the getter, the shell initialization script/commands,
##' for the setter, an updated \code{SwitchrParam} object with the
##' new shell initialization set.
setGeneric("sh_init_script", function(x) standardGeneric("sh_init_script"))
##' @aliases sh_init_script,SwitchrParam
##' @rdname sh_init
setMethod("sh_init_script", "SwitchrParam", function(x) x@shell_init)
##' @export
##' @rdname sh_init
##' @docType methods
##' @param value The new value.
setGeneric("sh_init_script<-", function(x, value) standardGeneric("sh_init_script<-"))
##' @aliases sh_init_script<-,SwitchrParam,ANY
##' @rdname sh_init
setMethod("sh_init_script<-", "SwitchrParam", function(x, value) {
x@shell_init = value
x
})
##' @title logfun
##' @description Get or set the logging function in an object associated with a SwitchrParam
##' @rdname logfun
##' @param x An object with a SwitchrParam
##' @docType methods
##' @export
setGeneric("logfun", function(x) standardGeneric("logfun"))
##' @aliases logfun,SwitchrParam
##' @rdname logfun
setMethod("logfun", "SwitchrParam", function(x) x@logfun)
##' @export
##' @rdname logfun
##' @param value The new logging function
setGeneric("logfun<-", function(x, value) standardGeneric("logfun<-"))
##' @aliases logfun<-,SwitchrParam
##' @rdname logfun
setMethod("logfun<-", "SwitchrParam", function(x, value) {
x@logfun = value
x
})
## @title Add/replace rows in a data.frame
##
## Combine two dataframes together with rows in one optionally replacing
## those in the other when they match on a specified index column
##
## @param df data.frame. The "first" or "old" data.frame.
## @param newdf data.frame. The "new" data frame of rows to add to \code{df}
## @param replace logical. Should replacement happen when rows of \code{df} and
## \code{newdf} match based on \code{indexcol}. Defaults to \code{TRUE}. If
## \code{FALSE}, an error is thrown in the matching case.
## @param indexcol character. The name of the column to use for matching.
## Defaults to \code{"name"} for convenience of internal usage.
##
## @return A combined data.frame with only columns found in both data.frames
## and one row per unique value of the specified index column across both
## datasets.
addReplaceDF = function(df, newdf, replace = TRUE, indexcol = "name") {
df = df[,intersect(names(df), names(newdf))]
newdf = newdf[,intersect(names(df), names(newdf))]
oldvec = df[[indexcol]]
newvec = newdf[[indexcol]]
dups = oldvec[oldvec %in% newvec]
if(length(dups)) {
if(!replace)
stop("Values in new rows already appear in existing rows, set replace=TRUE to replace them inplace. [",
paste(dups, collapse=", "),
"]")
dupdf = newdf[newvec %in% dups,]
newdf = newdf[!newvec %in% dups,]
df[match(dupdf[[indexcol]], oldvec),] = dupdf
}
rbind(df, newdf)
}
##' @title addPkg
##' @description Add a package to an object associated with a manifest
##' @export
##' @rdname addPkg
##' @param x A manifest or manifest-associate objec tto add the pkg 2
##' @param \dots The information regarding the package to place in the manifest
##' @param rows An already-created data.frame to add to the manifest
##' @param versions A data.frame of package names and versions, if adding to
##' a SessionManifest, ignored otherwise
##' @param replace logical. If true, the specified package info will replace
##' any already in the manifest in the case of duplicates. Otherwise, an error
##' is thrown.
##' @return \code{x}, with the relevant package(s) added to it (in the case
##' of a manifest) or its associated manifest.
##' @docType methods
setGeneric("addPkg", function(x, ..., rows = makeManifest(...),
versions = data.frame(name = manifest_df(rows)$name,
version = NA_character_,
stringsAsFactors=FALSE),
replace = FALSE)
standardGeneric("addPkg")
)
##' @rdname addPkg
##' @aliases addPkg,PkgManifest
setMethod("addPkg", "PkgManifest",
function(x, ..., rows= makeManifest(...), versions, replace) {
oldman = manifest_df(x)
newman = manifest_df(rows)
## oldman = oldman[,names(newman)]
## dups = oldman$name[oldman$name %in% newman$name]
## if(length(dups)) {
## if(!replace)
## stop("Attempted to add package(s) already in manifest with replace=FALSE: ",
## paste(dups, collapse=", "))
## dupman = newman[newman$name %in% dups,]
## newman = newman[!newman$name %in% dups,]
## oldman[match(newman$name, oldman$name),] = newman
## }
## manifest_df(x) = rbind(oldman, newman)
manifest_df(x) = addReplaceDF(oldman, newman, replace= replace)
dep_repos = unique(c(dep_repos(x), dep_repos(rows)))
x
})
##' @rdname addPkg
##' @aliases addPkg,SessionManifest
setMethod("addPkg", "SessionManifest",
function(x, ..., rows, versions, replace) {
manifest(x) = addPkg(manifest(x), ..., rows = rows, versions = NULL,
replace = replace)
if(!missing(versions) && length(versions) > 0) {
if(is(versions, "character"))
versions = data.frame(name = names(versions),
version = versions, stringsAsFactors = FALSE)
oldv = versions_df(x)
## versions = versions[,names(oldv)]
## versions_df(x) = rbind(oldv, versions)
versions_df(x) = addReplaceDF(oldv, versions, replace = replace)
}
x
})
##' @title library_paths
##'
##' @description Accessor for which directories an SwitchrCtx is associated with.
##' @param seed An SwitchrCtx
##' @export
##' @docType methods
##' @rdname library_paths
##' @return for the getter, the set of library paths associated with
##' the \code{SwitchrCtx} object, for the setter, said context udpated
##' with the new full set of library paths.
setGeneric("library_paths", function(seed) standardGeneric("library_paths"))
##' @rdname library_paths
##' @aliases library_paths,SwitchrCtx
setMethod("library_paths", "SwitchrCtx", function(seed) {
seed@libpaths
})
##' @title full_libpaths
##'
##' @description Accessor for the full library path associate with a SwitchrCtx, including
##' the R library and (if not excluded) the site library
##'
##' @param seed a SwitchrCtx
##' @export
##' @docType methods
##' @rdname full_libpaths
##' @return For the getter, the full set of library paths associated with the
##' \code{SwitchrCtx} object, for the setter, the object, updated with the
##' new set of full lib paths.
setGeneric("full_libpaths", function(seed) standardGeneric("full_libpaths"))
##' @rdname full_libpaths
##' @aliases full_libpaths,SwitchrCtx
setMethod("full_libpaths", "SwitchrCtx", function(seed) {
unique(c(library_paths(seed), if(seed@exclude.site) character() else .Library.site, .Library))
})
##' @title packages
##' @description List the packages installed in a switchr context (library)
##' @docType methods
##' @rdname packages
##' @param seed A switchr context
##' @export
##' @return a vector of package names installed in the
##' specified switchr context.
setGeneric("packages", function(seed) standardGeneric("packages"))
##' @rdname packages
##' @aliases packages,SwitchrCtx
setMethod("packages", "SwitchrCtx", function(seed) seed@packages)
setGeneric("update_pkgs_list", function(seed) standardGeneric("update_pkgs_list"))
setMethod("update_pkgs_list", "SwitchrCtx", function(seed){
pathsToLook = full_libpaths(seed)
pkgs = installed.packages(pathsToLook,
noCache=TRUE)[,c("Package", "Version", "LibPath")]
pkgs = pkgs[!duplicated(pkgs[,"Package"]),]
pkgs = as.data.frame(pkgs, stringsAsFactors = FALSE)
seed@packages = pkgs
seed
})
##' @title Notrack directory
##'
##' @description This function is not intended to be called directly by the user.
##'
##' @param repo The object.
##' @return the path where retrieved package versions should be. If \code{repo}
##' is NULL, a notrack directory is constructed within a temp directory.
##' @export
##' @docType methods
##' @rdname notrack
setGeneric("notrack", function(repo) standardGeneric("notrack"))
##' @rdname notrack
##' @aliases notrack,NULL
setMethod("notrack", "NULL", function(repo) file.path(tempdir(), "notrack"))
##' @description Number of rows
##'
##' @title Number of rows
##'
##' @param x A tabular data structure.
##' @return The number of rows in the structure
##' @docType methods
##' @export
##' @rdname nrow
setGeneric("nrow", nrow)
##' @rdname nrow
##' @aliases nrow,PkgManifest
setMethod("nrow", "PkgManifest",
function(x) base::nrow(manifest_df(x)))
##' @rdname nrow
##' @aliases nrow,SessionManifest
setMethod("nrow", "SessionManifest",
function(x) base::nrow(manifest_df(x)))
|
#### Read in NRS Stillbirths ####
data_nrs_stillbirths_raw <- as_tibble(
dbGetQuery(
SMRAConnection, paste0(
"
SELECT MOTHER_DERIVED_CHI, MOTHER_UPI_NUMBER, POSTCODE, DATE_OF_BIRTH,
DURATION_OF_PREGNANCY, TOTAL_BIRTHS_LIVE_AND_STILL, SEX, WEIGHT_OF_FOETUS,
PRIMARY_CAUSE_OF_DEATH, SECONDARY_CAUSE_OF_DEATH_0, SECONDARY_CAUSE_OF_DEATH_1,
SECONDARY_CAUSE_OF_DEATH_2, SECONDARY_CAUSE_OF_DEATH_3
FROM A.GRO_STILLBIRTHS_C
WHERE DATE_OF_BIRTH >= TO_DATE('", cohort_start_date, "', 'yyyy-mm-dd')")
)) %>%
clean_names()
#### Add SIMD and HB ####
simd_healthboard_lookup <-
readRDS(
simd_hb_lookup
) %>%
select(pc7, hb2019, simd2020v2_sc_quintile)
data_nrs_stillbirths <- data_nrs_stillbirths_raw %>%
left_join(simd_healthboard_lookup, by=c("postcode" = "pc7")) %>%
left_join(hb_lookup, by = c("hb2019" = "healthboard_code")) %>%
mutate(hb2019 = healthboard) %>%
select(-healthboard) %>%
mutate(mother_upi_number = chi_pad(mother_upi_number)) %>%
mutate(validity = chi_check(mother_upi_number)) %>%
mutate(mother_upi_number = case_when(validity == "Valid CHI" ~ mother_upi_number,
T ~ NA_character_)) %>%
select(-validity) %>%
mutate(mother_upi_number = case_when(
is.na(mother_upi_number) ~ paste0("46", str_pad(
string = row_number(),
width = 8,
side = "left",
pad = "0"
)),
T ~ mother_upi_number
)) %>%
mutate(
termination = case_when(
primary_cause_of_death == "P964" ~ T,
secondary_cause_of_death_0 == "P964" ~ T,
secondary_cause_of_death_1 == "P964" ~ T,
secondary_cause_of_death_2 == "P964" ~ T,
secondary_cause_of_death_3 == "P964" ~ T,
T ~ F
)
) %>%
mutate(stillbirth = case_when(termination == T ~ F,
T ~ T)) %>%
mutate(
duration_of_pregnancy = case_when(duration_of_pregnancy == 99 ~ NA_real_,
T ~ duration_of_pregnancy)
) %>%
mutate(duration_of_pregnancy = if_else(duration_of_pregnancy %in% feasible_gestation_sb, duration_of_pregnancy, NA_real_)) %>%
mutate(assumed_gestation = if_else(is.na(duration_of_pregnancy), 1, 0)) %>%
mutate(duration_of_pregnancy = case_when(is.na(duration_of_pregnancy) ~ assumed_gestation_stillbirth,
T ~ duration_of_pregnancy)) %>%
mutate(estimated_date_of_conception = date_of_birth - (weeks(duration_of_pregnancy) - weeks(2))) %>%
mutate(outcome_type = case_when(
termination == T ~ "Termination",
stillbirth == T ~ "Stillbirth"
)) %>%
mutate(total_births_live_and_still = case_when(is.na(total_births_live_and_still) ~ 1,
total_births_live_and_still == 0 ~ 1,
T ~ total_births_live_and_still)) %>%
select(-c(mother_derived_chi)) %>%
select(
mother_upi_number,
date_of_birth,
duration_of_pregnancy,
estimated_date_of_conception,
termination,
stillbirth,
everything()
) %>%
filter(date_of_birth < Sys.Date()) %>% # We have a tiny number of stillbirths in the future. We need accurate dates, so remove any stillbirths which happen in the future.
mutate(sex = case_when(sex == "1" ~ "M", sex == "2" ~ "F", T ~ NA_character_)) %>%
rename_with( ~ paste0("nrssb_", .)) %>%
replace_with_na_at(.vars = c("nrssb_sex"),
condition = ~.x == "9") %>%
rowwise() %>% mutate(event_id = UUIDgenerate()) %>% ungroup()
write_rds(data_nrs_stillbirths, paste0(folder_temp_data, "nrs_stillbirths.rds"), compress = "gz")
#dates
dataset_dates("NRS stillbirths", data_nrs_stillbirths$nrssb_date_of_birth)
rm(data_nrs_stillbirths_raw, data_nrs_stillbirths)
|
/COPS code/1f. read in nrs stillbirths.r
|
no_license
|
Public-Health-Scotland/COPS-public
|
R
| false
| false
| 3,785
|
r
|
#### Read in NRS Stillbirths ####
data_nrs_stillbirths_raw <- as_tibble(
dbGetQuery(
SMRAConnection, paste0(
"
SELECT MOTHER_DERIVED_CHI, MOTHER_UPI_NUMBER, POSTCODE, DATE_OF_BIRTH,
DURATION_OF_PREGNANCY, TOTAL_BIRTHS_LIVE_AND_STILL, SEX, WEIGHT_OF_FOETUS,
PRIMARY_CAUSE_OF_DEATH, SECONDARY_CAUSE_OF_DEATH_0, SECONDARY_CAUSE_OF_DEATH_1,
SECONDARY_CAUSE_OF_DEATH_2, SECONDARY_CAUSE_OF_DEATH_3
FROM A.GRO_STILLBIRTHS_C
WHERE DATE_OF_BIRTH >= TO_DATE('", cohort_start_date, "', 'yyyy-mm-dd')")
)) %>%
clean_names()
#### Add SIMD and HB ####
simd_healthboard_lookup <-
readRDS(
simd_hb_lookup
) %>%
select(pc7, hb2019, simd2020v2_sc_quintile)
data_nrs_stillbirths <- data_nrs_stillbirths_raw %>%
left_join(simd_healthboard_lookup, by=c("postcode" = "pc7")) %>%
left_join(hb_lookup, by = c("hb2019" = "healthboard_code")) %>%
mutate(hb2019 = healthboard) %>%
select(-healthboard) %>%
mutate(mother_upi_number = chi_pad(mother_upi_number)) %>%
mutate(validity = chi_check(mother_upi_number)) %>%
mutate(mother_upi_number = case_when(validity == "Valid CHI" ~ mother_upi_number,
T ~ NA_character_)) %>%
select(-validity) %>%
mutate(mother_upi_number = case_when(
is.na(mother_upi_number) ~ paste0("46", str_pad(
string = row_number(),
width = 8,
side = "left",
pad = "0"
)),
T ~ mother_upi_number
)) %>%
mutate(
termination = case_when(
primary_cause_of_death == "P964" ~ T,
secondary_cause_of_death_0 == "P964" ~ T,
secondary_cause_of_death_1 == "P964" ~ T,
secondary_cause_of_death_2 == "P964" ~ T,
secondary_cause_of_death_3 == "P964" ~ T,
T ~ F
)
) %>%
mutate(stillbirth = case_when(termination == T ~ F,
T ~ T)) %>%
mutate(
duration_of_pregnancy = case_when(duration_of_pregnancy == 99 ~ NA_real_,
T ~ duration_of_pregnancy)
) %>%
mutate(duration_of_pregnancy = if_else(duration_of_pregnancy %in% feasible_gestation_sb, duration_of_pregnancy, NA_real_)) %>%
mutate(assumed_gestation = if_else(is.na(duration_of_pregnancy), 1, 0)) %>%
mutate(duration_of_pregnancy = case_when(is.na(duration_of_pregnancy) ~ assumed_gestation_stillbirth,
T ~ duration_of_pregnancy)) %>%
mutate(estimated_date_of_conception = date_of_birth - (weeks(duration_of_pregnancy) - weeks(2))) %>%
mutate(outcome_type = case_when(
termination == T ~ "Termination",
stillbirth == T ~ "Stillbirth"
)) %>%
mutate(total_births_live_and_still = case_when(is.na(total_births_live_and_still) ~ 1,
total_births_live_and_still == 0 ~ 1,
T ~ total_births_live_and_still)) %>%
select(-c(mother_derived_chi)) %>%
select(
mother_upi_number,
date_of_birth,
duration_of_pregnancy,
estimated_date_of_conception,
termination,
stillbirth,
everything()
) %>%
filter(date_of_birth < Sys.Date()) %>% # We have a tiny number of stillbirths in the future. We need accurate dates, so remove any stillbirths which happen in the future.
mutate(sex = case_when(sex == "1" ~ "M", sex == "2" ~ "F", T ~ NA_character_)) %>%
rename_with( ~ paste0("nrssb_", .)) %>%
replace_with_na_at(.vars = c("nrssb_sex"),
condition = ~.x == "9") %>%
rowwise() %>% mutate(event_id = UUIDgenerate()) %>% ungroup()
write_rds(data_nrs_stillbirths, paste0(folder_temp_data, "nrs_stillbirths.rds"), compress = "gz")
#dates
dataset_dates("NRS stillbirths", data_nrs_stillbirths$nrssb_date_of_birth)
rm(data_nrs_stillbirths_raw, data_nrs_stillbirths)
|
a <- 1
f <- function(x) {
return (x^2)
}
f(1)
round(1.5)
round(2.500001)
l <- list(name="Jane", age=3, grades = c(4,56,6,3))
str(l)
df <- data.frame(a=c(1,2,3), b = c("A", "B", "C")) ; df
str(df)
str(mtcars)
View(iris)
kml <- mtcars$mpg * 0.425144
mtcars$kml <- kml
mtcars
|
/workScript.R
|
no_license
|
NLAHAM3/r-training
|
R
| false
| false
| 295
|
r
|
a <- 1
f <- function(x) {
return (x^2)
}
f(1)
round(1.5)
round(2.500001)
l <- list(name="Jane", age=3, grades = c(4,56,6,3))
str(l)
df <- data.frame(a=c(1,2,3), b = c("A", "B", "C")) ; df
str(df)
str(mtcars)
View(iris)
kml <- mtcars$mpg * 0.425144
mtcars$kml <- kml
mtcars
|
## Creates a matrix that caches it's inverse when it is calculated using the
## cacheSolve() function
## Creates a matrix that can cache it's own inverse.
## Returns a list of functions that operate on the matrix.
makeCacheMatrix <- function(x = matrix()) {
m_inverse <- NULL
set <- function(y) {
x <<- y
m_inverse <<- NULL
}
get <- function() x
setinverse <- function(solve) m_inverse <<- solve
getinverse <- function() m_inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Computes the inverse of a matrix using cached matrix if available, using a
## makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
karlfsia/ProgrammingAssignment2
|
R
| false
| false
| 968
|
r
|
## Creates a matrix that caches it's inverse when it is calculated using the
## cacheSolve() function
## Creates a matrix that can cache it's own inverse.
## Returns a list of functions that operate on the matrix.
makeCacheMatrix <- function(x = matrix()) {
m_inverse <- NULL
set <- function(y) {
x <<- y
m_inverse <<- NULL
}
get <- function() x
setinverse <- function(solve) m_inverse <<- solve
getinverse <- function() m_inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Computes the inverse of a matrix using cached matrix if available, using a
## makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
# Red Neuranal R ----------------------------------------------------------
#########################
# Implementando #
# Redes Neuronales en R #
#########################
install.packages("NeuralNetTools")
install.packages("neuralnet")
library(neuralnet)
library(NeuralNetTools)
# cualidades y acciones del código
qualities <- matrix (c(1, 1, 1, 0, 0, 0,
0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1), byrow = TRUE, nrow = 3)
colnames(qualities) <- c("big_ears", "big_eyes", "big_teeth", "kindly", "wrinkled", "handsome")
rownames(qualities) <- c("wolf", "grannie", "woodcutter")
qualities
actions <- matrix (c(1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0,
0, 0, 0, 1, 0, 1, 1), byrow = TRUE, nrow = 3)
colnames(actions) <- c("run_away", "scream", "look_for_woodcutter", "kiss_on_cheek", "approach", "offer_food", "flirt_with")
rownames(actions) <- rownames(qualities)
actions
data <- cbind(qualities, actions)
# Entrenar la neural network (NN)
set.seed(123) # para reproductabilidad
neuralnetwork <- neuralnet(run_away + scream + look_for_woodcutter + kiss_on_cheek + approach +
offer_food + flirt_with ~
big_ears + big_eyes + big_teeth + kindly + wrinkled + handsome,
data = data, hidden = 3, linear.output = FALSE)
# Graficar la NN
par_bkp <- par(mar = c(0, 0, 0, 0)) # establecer un margen diferente para minimizar el texto de cutoff
plotnet(neuralnetwork, bias = FALSE)
par(par_bkp)
# ¿Qué acciones aprendió la red?
round(neuralnetwork$net.result[[1]])
|
/caperucita_example.R
|
no_license
|
GenoKiller777/PythonMyFirstNeuronalNetwork
|
R
| false
| false
| 1,704
|
r
|
# Red Neuranal R ----------------------------------------------------------
#########################
# Implementando #
# Redes Neuronales en R #
#########################
install.packages("NeuralNetTools")
install.packages("neuralnet")
library(neuralnet)
library(NeuralNetTools)
# cualidades y acciones del código
qualities <- matrix (c(1, 1, 1, 0, 0, 0,
0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1), byrow = TRUE, nrow = 3)
colnames(qualities) <- c("big_ears", "big_eyes", "big_teeth", "kindly", "wrinkled", "handsome")
rownames(qualities) <- c("wolf", "grannie", "woodcutter")
qualities
actions <- matrix (c(1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0,
0, 0, 0, 1, 0, 1, 1), byrow = TRUE, nrow = 3)
colnames(actions) <- c("run_away", "scream", "look_for_woodcutter", "kiss_on_cheek", "approach", "offer_food", "flirt_with")
rownames(actions) <- rownames(qualities)
actions
data <- cbind(qualities, actions)
# Entrenar la neural network (NN)
set.seed(123) # para reproductabilidad
neuralnetwork <- neuralnet(run_away + scream + look_for_woodcutter + kiss_on_cheek + approach +
offer_food + flirt_with ~
big_ears + big_eyes + big_teeth + kindly + wrinkled + handsome,
data = data, hidden = 3, linear.output = FALSE)
# Graficar la NN
par_bkp <- par(mar = c(0, 0, 0, 0)) # establecer un margen diferente para minimizar el texto de cutoff
plotnet(neuralnetwork, bias = FALSE)
par(par_bkp)
# ¿Qué acciones aprendió la red?
round(neuralnetwork$net.result[[1]])
|
# Reading, naming and subsetting power consumption data
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# initiating a composite plot with many graphs
par(mfrow=c(2,2))
# calling the basic plot function that calls different plot functions to build the 4 plots that form the graph
with(subpower,{
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l", xlab="",ylab="Global Active Power")
plot(subpower$Time,as.numeric(as.character(subpower$Voltage)), type="l",xlab="datetime",ylab="Voltage")
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6)
plot(subpower$Time,as.numeric(as.character(subpower$Global_reactive_power)),type="l",xlab="datetime",ylab="Global_reactive_power")
})
dev.copy(png,file="Plot4.png")
dev.off()
|
/Plot4.R
|
no_license
|
GeorgePergeris/ExData_Plotting1
|
R
| false
| false
| 1,834
|
r
|
# Reading, naming and subsetting power consumption data
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# initiating a composite plot with many graphs
par(mfrow=c(2,2))
# calling the basic plot function that calls different plot functions to build the 4 plots that form the graph
with(subpower,{
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l", xlab="",ylab="Global Active Power")
plot(subpower$Time,as.numeric(as.character(subpower$Voltage)), type="l",xlab="datetime",ylab="Voltage")
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6)
plot(subpower$Time,as.numeric(as.character(subpower$Global_reactive_power)),type="l",xlab="datetime",ylab="Global_reactive_power")
})
dev.copy(png,file="Plot4.png")
dev.off()
|
library(tidyverse)
library(caret)
library(doParallel)
library(ROCR)
#AUCを算出するための関数
calAUC <- function(predictions, labels){
pred <- prediction(predictions, labels)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
message(paste0("> AUC:",auc))
return(auc)
}
# selective lavelsのデータセットのXBG学習を行う関数
trainXGB_withSSL <- function(bank_train_sl, alloc_pred, Threshold = 0.9){
bank_train_with_semiLabels <- bank_train_sl %>%
mutate(alloc_pred = alloc_pred$`TRUE`,
R = alloc_pred > Threshold,
argment = alloc_pred < 0.05,
y = y %>% as.integer(.)-1) %>%
filter(bb == TRUE | argment == TRUE) %>%
mutate(y_tmp = if_else(bb == FALSE, -1, y)) %>%
mutate(label_Ds = if_else(R, as.integer(bb) - 1, 0),
label_Ys = if_else(R, as.character(y_tmp), "0"))
#print(dim(bank_train_with_semiLabels))
# 最終的なテスト
model <- train(label_Ys ~ .,
data = bank_train_with_semiLabels %>% select(1:16, label_Ys),
method = "xgbTree",
metric = "Kappa",
tunelength = 4,
preProcess = c("center", "scale"),
trControl = ctrl)
return(model)
}
#並列処理、使用するコア数4(使用できるコア数を知りたい場合は「detectCores()」を実行すればわかる)
cl <- makePSOCKcluster(4)
registerDoParallel(cl)
ctrl <- trainControl(method = "cv",
number = 4,
selectionFunction = "best")
#データ読み込み
bank_df_org <- read.csv("data/bank-full.csv")
bank_df <- bank_df_org %>%
mutate(y = as.factor(y))
# 10分割
N <- 10
holdout <- split(sample(1:nrow(bank_df)), 1:N)
all_auc <- c()
all_samplen <- c()
for(i in 1:1){
message(paste0("#### N=",i," ####"))
auc <- c()
samplen <- c()
bank_test <- bank_df %>%
slice(holdout[[i]])
bank_train <- bank_df %>%
anti_join(bank_test, key = "id")
bank_pre_train <- bank_train %>%
sample_frac(0.4)
bank_train <- bank_train %>%
anti_join(bank_pre_train, key = "id")
# スクリーニングモデルを作成
model_make_blackbox <- train(y ~ .,
data = bank_pre_train,
method = "xgbTree",
trControl = ctrl,
verbose = TRUE)
# ブラックボックスの対応を表すラベルを生成
pred_lavels <- predict(model_make_blackbox, bank_train, type = "prob")
print(table(pred_lavels$yes > 0.15, bank_train$y))
select_labels <- pred_lavels$yes > 0.15
bank_train_sl <- bank_train %>%
mutate(bb = as.factor(select_labels))
#テストデータの予測精度を確認
#selective labels状態のデータでの予測精度を確認
bank_sl.train <- bank_train_sl %>%
filter(bb == TRUE) %>%
select(-bb) %>%
mutate(y = as.factor(y))
model_xgb_tune <- train(y ~ .,
data = bank_sl.train,
method = "xgbTree",
metric = "Kappa",
tunelength = 4,
preProcess = c("center", "scale"),
trControl = ctrl,
verbose = TRUE)
pred <- predict(model_xgb_tune, newdata = bank_test, type = "prob")
auc[1] <- calAUC(pred$yes,bank_test$y)
samplen[1] <- nrow(bank_sl.train)
# selective labelなしの場合
bank_sl.train_all <- bank_train_sl %>%
select(-bb) %>%
mutate(y = as.factor(y))
model_xgb_tune2 <- train(y ~ .,
data = bank_sl.train_all,
method = "xgbTree",
metric = "Kappa",
tunelength = 4,
preProcess = c("center", "scale"),
trControl = ctrl)
pred2 <- predict(model_xgb_tune2, newdata = bank_test, type = "prob")
auc[2] <- calAUC(pred2$yes, bank_test$y)
samplen[2] <- nrow(bank_sl.train)
#割当てのモデルをランダムフォレストで作成
allocation_model <- train(bb ~ .,
data = bank_train_sl[,-c(17)],
method = "rf",
trControl = ctrl,
verbose = TRUE
)
alloc_pred <- predict(allocation_model, newdata = bank_train_sl[,-c(17)], type = "prob")
hist(alloc_pred$`TRUE`)
# 最終的なテスト
model_test <- trainXGB_withSSL(bank_train_sl, alloc_pred, Threshold = 0.9)
pred_lst <- predict(model_test, newdata = bank_test, type = "prob")
auc[3] <- calAUC(pred_lst$`1`,bank_test$y)
samplen[3] <- nrow(model_test$trainingData)
model_test <- trainXGB_withSSL(bank_train_sl, alloc_pred, Threshold = 0.8)
pred_lst <- predict(model_test, newdata = bank_test, type = "prob")
auc[4] <- calAUC(pred_lst$`1`,bank_test$y)
samplen[4] <- nrow(model_test$trainingData)
model_test <- trainXGB_withSSL(bank_train_sl, alloc_pred, Threshold = 0.7)
pred_lst <- predict(model_test, newdata = bank_test, type = "prob")
auc[5] <- calAUC(pred_lst$`1`,bank_test$y)
samplen[5] <- nrow(model_test$trainingData)
all_auc <- rbind(all_auc, auc)
all_samplen <- rbind(all_samplen, samplen)
}
stopCluster(cl)
write.csv(all_auc, "all_auc_bank.csv")
write.csv(all_samplen, "all_samplen_bank.csv")
|
/selective_labels_program.R
|
no_license
|
saltcooky/Learning_under_selective_labels
|
R
| false
| false
| 5,484
|
r
|
library(tidyverse)
library(caret)
library(doParallel)
library(ROCR)
#AUCを算出するための関数
calAUC <- function(predictions, labels){
pred <- prediction(predictions, labels)
auc.tmp <- performance(pred,"auc")
auc <- as.numeric(auc.tmp@y.values)
message(paste0("> AUC:",auc))
return(auc)
}
# selective lavelsのデータセットのXBG学習を行う関数
trainXGB_withSSL <- function(bank_train_sl, alloc_pred, Threshold = 0.9){
bank_train_with_semiLabels <- bank_train_sl %>%
mutate(alloc_pred = alloc_pred$`TRUE`,
R = alloc_pred > Threshold,
argment = alloc_pred < 0.05,
y = y %>% as.integer(.)-1) %>%
filter(bb == TRUE | argment == TRUE) %>%
mutate(y_tmp = if_else(bb == FALSE, -1, y)) %>%
mutate(label_Ds = if_else(R, as.integer(bb) - 1, 0),
label_Ys = if_else(R, as.character(y_tmp), "0"))
#print(dim(bank_train_with_semiLabels))
# 最終的なテスト
model <- train(label_Ys ~ .,
data = bank_train_with_semiLabels %>% select(1:16, label_Ys),
method = "xgbTree",
metric = "Kappa",
tunelength = 4,
preProcess = c("center", "scale"),
trControl = ctrl)
return(model)
}
#並列処理、使用するコア数4(使用できるコア数を知りたい場合は「detectCores()」を実行すればわかる)
cl <- makePSOCKcluster(4)
registerDoParallel(cl)
ctrl <- trainControl(method = "cv",
number = 4,
selectionFunction = "best")
#データ読み込み
bank_df_org <- read.csv("data/bank-full.csv")
bank_df <- bank_df_org %>%
mutate(y = as.factor(y))
# 10分割
N <- 10
holdout <- split(sample(1:nrow(bank_df)), 1:N)
all_auc <- c()
all_samplen <- c()
for(i in 1:1){
message(paste0("#### N=",i," ####"))
auc <- c()
samplen <- c()
bank_test <- bank_df %>%
slice(holdout[[i]])
bank_train <- bank_df %>%
anti_join(bank_test, key = "id")
bank_pre_train <- bank_train %>%
sample_frac(0.4)
bank_train <- bank_train %>%
anti_join(bank_pre_train, key = "id")
# スクリーニングモデルを作成
model_make_blackbox <- train(y ~ .,
data = bank_pre_train,
method = "xgbTree",
trControl = ctrl,
verbose = TRUE)
# ブラックボックスの対応を表すラベルを生成
pred_lavels <- predict(model_make_blackbox, bank_train, type = "prob")
print(table(pred_lavels$yes > 0.15, bank_train$y))
select_labels <- pred_lavels$yes > 0.15
bank_train_sl <- bank_train %>%
mutate(bb = as.factor(select_labels))
#テストデータの予測精度を確認
#selective labels状態のデータでの予測精度を確認
bank_sl.train <- bank_train_sl %>%
filter(bb == TRUE) %>%
select(-bb) %>%
mutate(y = as.factor(y))
model_xgb_tune <- train(y ~ .,
data = bank_sl.train,
method = "xgbTree",
metric = "Kappa",
tunelength = 4,
preProcess = c("center", "scale"),
trControl = ctrl,
verbose = TRUE)
pred <- predict(model_xgb_tune, newdata = bank_test, type = "prob")
auc[1] <- calAUC(pred$yes,bank_test$y)
samplen[1] <- nrow(bank_sl.train)
# selective labelなしの場合
bank_sl.train_all <- bank_train_sl %>%
select(-bb) %>%
mutate(y = as.factor(y))
model_xgb_tune2 <- train(y ~ .,
data = bank_sl.train_all,
method = "xgbTree",
metric = "Kappa",
tunelength = 4,
preProcess = c("center", "scale"),
trControl = ctrl)
pred2 <- predict(model_xgb_tune2, newdata = bank_test, type = "prob")
auc[2] <- calAUC(pred2$yes, bank_test$y)
samplen[2] <- nrow(bank_sl.train)
#割当てのモデルをランダムフォレストで作成
allocation_model <- train(bb ~ .,
data = bank_train_sl[,-c(17)],
method = "rf",
trControl = ctrl,
verbose = TRUE
)
alloc_pred <- predict(allocation_model, newdata = bank_train_sl[,-c(17)], type = "prob")
hist(alloc_pred$`TRUE`)
# 最終的なテスト
model_test <- trainXGB_withSSL(bank_train_sl, alloc_pred, Threshold = 0.9)
pred_lst <- predict(model_test, newdata = bank_test, type = "prob")
auc[3] <- calAUC(pred_lst$`1`,bank_test$y)
samplen[3] <- nrow(model_test$trainingData)
model_test <- trainXGB_withSSL(bank_train_sl, alloc_pred, Threshold = 0.8)
pred_lst <- predict(model_test, newdata = bank_test, type = "prob")
auc[4] <- calAUC(pred_lst$`1`,bank_test$y)
samplen[4] <- nrow(model_test$trainingData)
model_test <- trainXGB_withSSL(bank_train_sl, alloc_pred, Threshold = 0.7)
pred_lst <- predict(model_test, newdata = bank_test, type = "prob")
auc[5] <- calAUC(pred_lst$`1`,bank_test$y)
samplen[5] <- nrow(model_test$trainingData)
all_auc <- rbind(all_auc, auc)
all_samplen <- rbind(all_samplen, samplen)
}
stopCluster(cl)
write.csv(all_auc, "all_auc_bank.csv")
write.csv(all_samplen, "all_samplen_bank.csv")
|
#' Comparing two Kaplan Meier curves in one plot
#'
#' @description The function compares two Kaplan Meier curves in one plot
#'
#' @param x1 Nx2 data matrix,first columen represents survival time of the i-th subject, second column represents censored flag (0 if not censored, 1 if censored)
#' @param x2 Nx2 data matrix,first columen represents survival time of the i-th subject, second column represents censored flag (0 if not censored, 1 if censored)
#' @param pos The position of the legend. Can be 0 or 1. The legend will be
#' on the topright if set to 0. The legend will be on the bottomleft if set to 1. Default is 0.
#'
#' @importFrom graphics plot
#' @importFrom graphics points
#' @importFrom graphics lines
#' @importFrom graphics title
#' @importFrom graphics legend
#'
#' @return
#' A combined Kaplan Meier curve
#'
#' @export
#'
#' @examples
#' t1 <- c(2,3,4,5.5,7,10,12,15)
#' c1 <- c(0,0,1,0,0,1,0,0)
#' t2 <- c(1,3,5,4,8,10,9,11)
#' c2 <- c(0,0,0,0,1,0,0,0)
#' x1<-cbind(t1,c1)
#' x2<-cbind(t2,c2)
#' km_combine(x1,x2)
#' km_combine(x1,x2,pos=1)
km_combine <- function(x1, x2, pos = 0){
kmout1 <- kmvalue(x1)
kmout2 <- kmvalue(x2)
table1 = kmout1$table1
table12 = kmout1$table12
t1 = kmout1$t1
T1 = kmout1$T1
xcg1 = kmout1$xcg
ycg1 = kmout1$ycg
t2 = kmout2$t1
T2 = kmout2$T1
xcg2 = kmout2$xcg
ycg2 = kmout2$ycg
# plot Kaplan_Meier
plot(t1, T1, type = "s",xlab="Time", ylab="Estimated survival functions", lty = 2,
col = "blue",ylim = c(0,1))
points(xcg1, ycg1,cex=.8,pch=3, col = "black")
lines(t2, T2, type = "s",xlab="Time", ylab="Estimated survival functions", lty = 2,
col = "red",ylim = c(0,1))
points(xcg2, ycg2,cex=.8,pch=3, col = "black")
title('Kaplan-Meier estimate of survival functions')
if (pos == 0)
position = "topright"
if (pos == 1)
position = "bottomleft"
legend(position,legend=c("Treatment 1","Treatment 2","Censored"),col=c("blue", "red","black"), lty=c(2,2,NA),pch = c(NA,NA,3), cex=0.8)
}
|
/R/km_combine.R
|
no_license
|
baihongguo/RPEXE.RPEXT-1
|
R
| false
| false
| 2,025
|
r
|
#' Comparing two Kaplan Meier curves in one plot
#'
#' @description The function compares two Kaplan Meier curves in one plot
#'
#' @param x1 Nx2 data matrix,first columen represents survival time of the i-th subject, second column represents censored flag (0 if not censored, 1 if censored)
#' @param x2 Nx2 data matrix,first columen represents survival time of the i-th subject, second column represents censored flag (0 if not censored, 1 if censored)
#' @param pos The position of the legend. Can be 0 or 1. The legend will be
#' on the topright if set to 0. The legend will be on the bottomleft if set to 1. Default is 0.
#'
#' @importFrom graphics plot
#' @importFrom graphics points
#' @importFrom graphics lines
#' @importFrom graphics title
#' @importFrom graphics legend
#'
#' @return
#' A combined Kaplan Meier curve
#'
#' @export
#'
#' @examples
#' t1 <- c(2,3,4,5.5,7,10,12,15)
#' c1 <- c(0,0,1,0,0,1,0,0)
#' t2 <- c(1,3,5,4,8,10,9,11)
#' c2 <- c(0,0,0,0,1,0,0,0)
#' x1<-cbind(t1,c1)
#' x2<-cbind(t2,c2)
#' km_combine(x1,x2)
#' km_combine(x1,x2,pos=1)
km_combine <- function(x1, x2, pos = 0){
kmout1 <- kmvalue(x1)
kmout2 <- kmvalue(x2)
table1 = kmout1$table1
table12 = kmout1$table12
t1 = kmout1$t1
T1 = kmout1$T1
xcg1 = kmout1$xcg
ycg1 = kmout1$ycg
t2 = kmout2$t1
T2 = kmout2$T1
xcg2 = kmout2$xcg
ycg2 = kmout2$ycg
# plot Kaplan_Meier
plot(t1, T1, type = "s",xlab="Time", ylab="Estimated survival functions", lty = 2,
col = "blue",ylim = c(0,1))
points(xcg1, ycg1,cex=.8,pch=3, col = "black")
lines(t2, T2, type = "s",xlab="Time", ylab="Estimated survival functions", lty = 2,
col = "red",ylim = c(0,1))
points(xcg2, ycg2,cex=.8,pch=3, col = "black")
title('Kaplan-Meier estimate of survival functions')
if (pos == 0)
position = "topright"
if (pos == 1)
position = "bottomleft"
legend(position,legend=c("Treatment 1","Treatment 2","Censored"),col=c("blue", "red","black"), lty=c(2,2,NA),pch = c(NA,NA,3), cex=0.8)
}
|
library(ggplot2)
library(dplyr)
library(tidyr)
out.dir="output"
#Mean GOM zooplankton abundance anomalies from Ryan Morse (GOM_mean_seasonal_anomalies.csv)
GOMseasonZooAbund <- readr::read_csv(here::here(data.dir, "GOM_mean_seasonal_anomalies.csv"))
GOMsummerZoop <- GOMseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Spring') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, mlucens_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
GOMZoopRegime <- GOMsummerZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=GOMZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
saveRDS(Regime[["cptable"]],file = here::here("output", "GOMspringZoop_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
GOMZoopRegime$year <- as.numeric(as.character(GOMZoopRegime$year))
#Line plot of condition
p2 <- ggplot(GOMZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "GOM Spring Zooplankton Abundance Anomalies", y = "Total Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"GOMspringZoop_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
GOMseasonZooAbund <- readr::read_csv(here::here(data.dir, "GOM_mean_seasonal_anomalies.csv"))
GOMsummerZoop <- GOMseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Summer') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, mlucens_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
GOMZoopRegime <- GOMsummerZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=GOMZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "GOMsummerZoopAnom_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
GOMZoopRegime$year <- as.numeric(as.character(GOMZoopRegime$year))
#Line plot of condition
p2 <- ggplot(GOMZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "GOM Summer Zooplankton Abundance Anomalies", y = "Zooplankton Anomaly") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"GOMsummerZoopAnom_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Mean GB zooplankton abundance anomalies from Ryan Morse (GBK_mean_seasonal_anomalies.csv)
GBseasonZooAbund <- readr::read_csv(here::here(data.dir, "GBK_mean_seasonal_anomalies.csv"))
GBsummerZoop <- GBseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Summer') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, chaeto_100m3, cham_100m3, para_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
GBZoopRegime <- GBsummerZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=GBZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "GBsummerZoop_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
GBZoopRegime$year <- as.numeric(as.character(GBZoopRegime$year))
#Line plot of condition
p2 <- ggplot(GBZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "GB Summer Zooplankton Abundance Anomalies", y = "Total Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"GBsummerZoop_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Mean MAB zooplankton abundance anomalies from Ryan Morse (MAB_mean_seasonal_anomalies.csv)
MABseasonZooAbund <- readr::read_csv(here::here(data.dir, "MAB_mean_seasonal_anomalies.csv"))
MABsummerZoop <- MABseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Spring') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, tlong_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
MABZoopRegime <- MABsummerZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=MABZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "MABspringZoop_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
MABZoopRegime$year <- as.numeric(as.character(MABZoopRegime$year))
#Line plot of condition
p2 <- ggplot(MABZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "MAB Spring Zooplankton Abundance Anomalies", y = "Total Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"MABspringZoop_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#MAB fall Zooplankton anomaly from Ryan Morse:
MABseasonZooAbund <- readr::read_csv(here::here(data.dir, "MAB_mean_seasonal_anomalies.csv"))
MABfallZoop <- MABseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Fall') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, tlong_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
MABZoopRegime <- MABfallZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=MABZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "MABfallZoopAnom_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
MABZoopRegime$year <- as.numeric(as.character(MABZoopRegime$year))
#Line plot of condition
p2 <- ggplot(MABZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "MAB Fall Zooplankton Abundance Anomalies", y = "Total Zooplankton Anomaly") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"MABfallZoopAnom_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Automate zooplankton regime shifts from Harvey's data: EcoMon_ZooplanktonData2021_BTSMeanAbundance.csv from gam_calcs_strata.R
#Dataset of zooplankton for regime shift
ZoopSeasonEPU <- ZoopDataSeasonEPU %>% dplyr::select(YEAR, EPU, SEASONS,
CopepodSmLgEPU,TotCopEPU,ZoopAbundEPU) %>%
dplyr::filter(!is.na(EPU)) %>%
dplyr::filter(!is.na(SEASONS)) %>%
dplyr::distinct()
Zoop1 <- ZoopSeasonEPU %>%
# key=zoopIndex <- c(CopepodSmLgSpringEPU,CopepodSmLgSummmerEPU,CopepodSmLgFallEPU,
# CopepodSmLgWinterEPU,TotCopSpringEPU,TotCopSummerEPU,
# TotCopFallEPU,TotCopWinterEPU,ZoopAbundSpringEPU,
# ZoopAbundSummerEPU,ZoopAbundFallEPU,ZoopAbundWinterEPU)
tidyr::gather(key= 'ZoopName', value = 'ZoopData', -YEAR, -EPU, -SEASONS)
Zoop <- Zoop1 %>% dplyr::select(YEAR, EPU, SEASONS, ZoopName, ZoopData) %>%
dplyr::mutate(ZoopIndex=paste(ZoopName, EPU, sep="_"))
####Automating regime shift plots for zooplankton data by EPU:
# create a character vector of species names
ZoopList <- unique(Zoop) %>%
dplyr::distinct(ZoopIndex) %>%
dplyr::pull()
#Only select Species with names that have sufficient data:
# numZoop <- length(ZoopList)
# for (i in numZoop:1) {
# if (!is.na(as.numeric(ZoopList[i]))) {
# ZoopList <-ZoopList[-i]
# }
# }
#
# #loop over zooplankton index:
# for (aZoop in ZoopList) {
# print(aZoop)
#
#
# #Test for regime shifts in each zooplankton index (same method as in Perretti et al. 2017, although Perretti uses MRT, gives error when method="mrt"):
# ZoopRegDat <- Zoop %>% dplyr::filter(ZoopIndex == aZoop) %>% dplyr::select(ZoopData, ZoopIndex, YEAR, SEASONS)
# Regime <- rpart::rpart(ZoopData~YEAR, data=ZoopRegDat)
# #Selecting best fit (gives optimal CP value associated with the minimum error)::
# # Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
#
# ZoopRegimePlot <- rpart.plot::rpart.plot(Regime)
#
# # Prettier plot of pruned tree (not currently working):
# # library(rpart.plot)
# # library(RColorBrewer)
#
#
#
# # ptree<- prune(Regime,
# # + cp= Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"])
# # fancyRpartPlot(ptree, uniform=TRUE,
# # + main="Pruned Classification Tree")
# #plotcp
#
#
# #Outputs pruning tree table:
# saveRDS(Regime[["cptable"]],file = here::here("output","RegimeShifts", paste0(aZoop,"_Zooplankton_Regimes.RDS")))
# # printcp(Regime)
#
# #Select best pruned tree (outputs the row of the cptable that has the number of splits with the lowest error (xerror)
# # Used rpart::prune
# optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
# optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
# Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
# Regime <- Regime_pruned
#
#
# #Pull regime shift years into new data frame to add to plot (use the simplest tree
# #within one standard error (xstd) of the best tree (lowest xerror)):
# Results <- as.data.frame(Regime[["splits"]])
# SppSplit1 <- Results$index[1]
# SppSplit2 <- Results$index[2]
# SppSplit3 <- Results$index[3]
# SppSplit4 <- Results$index[4]
# SppSplit5 <- Results$index[5]
#
#
# ZoopDat <- Zoop
#
# #change YEAR to continuous numeric for plotting function below:
# ZoopDat$YEAR <- as.numeric(as.character(ZoopDat$YEAR))
#
# ZoopIndexRegime <- ZoopDat %>% dplyr::filter(YEAR >= 1992) %>%
# dplyr::select(YEAR, SEASONS, ZoopData) %>% group_by(SEASONS)
#
#
# #Line plot of condition
# p2 <- ggplot(ZoopDat, aes(x = YEAR, y = ZoopData)) +
# geom_line(aes(color = SEASONS)) +
# scale_color_manual(values = c("red", "blue", "green", "orange")) +
# # geom_errorbar(width=.1, aes(ymin=lower.ci.cond, ymax=upper.ci.cond), colour="black") +
# # geom_errorbar(width=.1, aes(ymin=lower.ci.cond, ymax=upper.ci.cond)) +
# geom_point(aes(color = SEASONS)) +
# labs(title= paste0(aZoop, " Regime Shifts by Season"), y = "aZoop") +
# geom_vline(xintercept=SppSplit1, color='red')+
# geom_vline(xintercept=SppSplit2, color='red')+
# geom_vline(xintercept=SppSplit3, color='red')+
# geom_vline(xintercept=SppSplit4, color='red')+
# geom_vline(xintercept=SppSplit5, color='red')+
# # ylim(0.85, 1.21)+
# xlim(1992, 2022)
#
# # ggsave(path= here::here("output","RegimeShifts"),paste0(aZoop, "_RegimeShifts.jpg"), width = 8, height = 3.75, units = "in", dpi = 300)
# }
#readRDS(file = here::here("output","RegimeShifts", paste0("American plaice_RelCondition_Regimes_Fall.RDS")))
##End automated regime shift plots by species
#FEMALE butterfish condition and regime shift:
# annualCondition <- FemButtCondPlot
#
# #change YEAR to continuous numeric for plotting function below:
# annualCondition$YEAR <- as.numeric(as.character(annualCondition$YEAR))
#
# speciesNames <- annualCondition
#
# #Line plot of condition
# p2 <- ggplot(speciesNames, aes(x = YEAR, y = MeanCond)) +
# geom_line()+
# geom_point() +
# labs(title="Female Butterfish Relative Condition", y = "Relative Condition") +
# geom_vline(xintercept=FemButtSplit1, color='red')+
# geom_vline(xintercept=FemButtSplit2, color='red')
#
# ggsave(path= here::here(out.dir),"Female_Butterfish_ShelfCondition_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#
# #MALE butterfish condition and regime shift:
# annualCondition <- MaleButtCondPlot
#
# #change YEAR to continuous numeric for plotting function below:
# annualCondition$YEAR <- as.numeric(as.character(annualCondition$YEAR))
#
# speciesNames <- annualCondition
#
# #Line plot of condition
# p2 <- ggplot(speciesNames, aes(x = YEAR, y = MeanCond)) +
# geom_line()+
# geom_point() +
# labs(title="Male Butterfish Relative Condition", y = "Relative Condition") +
# geom_vline(xintercept=MaleButtSplit1, color='red')+
# geom_vline(xintercept=MaleButtSplit2, color='red')
#
# ggsave(path= here::here(out.dir),"Male_Butterfish_ShelfCondition_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Average bottom temp data by EPU and season (from gam_calcs_strata.R):
AvgSummerTemp <- AvgTempSummerFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, AvgTempSummer) %>% group_by(EPU)
#Line plot of summer temp:
p2 <- ggplot(AvgSummerTemp, aes(x = YEAR, y = AvgTempSummer)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Average Summer Bottom Temperature by EPU", y = "Average Summer Bottom Temp") +
geom_vline(xintercept=SummerSplit1, color='red')
#+
# geom_vline(xintercept=SummerSplit2, color='red') +
# geom_vline(xintercept=SummerSplit3, color='red') +
# geom_vline(xintercept=SummerSplit4, color='red') +
# geom_vline(xintercept=SummerSplit5, color='red')
#ggsave(path= here::here(out.dir),"AverageSummerBottomTempEPU2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Average bottom temp data by EPU and season (from gam_calcs_strata.R):
AvgSpringTemp <- AvgTempSpringFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, AvgTempSpring) %>% group_by(EPU)
#Line plot of spring temp:
p2 <- ggplot(AvgSpringTemp, aes(x = YEAR, y = AvgTempSpring)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Average Spring Bottom Temperature by EPU", y = "Average Spring Bottom Temp") +
geom_vline(xintercept=SpringSplit1, color='red')
#+
# geom_vline(xintercept=SpringSplit2, color='red') +
# geom_vline(xintercept=SpringSplit3, color='red') +
# geom_vline(xintercept=SpringSplit4, color='red')
#ggsave(path= here::here(out.dir),"AverageSpringBottomTempEPU2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Average bottom temp data by EPU and season (from gam_calcs_strata.R):
AvgFallTemp <- AvgTempFallFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, AvgTempFall) %>% group_by(EPU)
#Line plot of fall temp:
p2 <- ggplot(AvgFallTemp, aes(x = YEAR, y = AvgTempFall)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Average Fall Bottom Temperature by EPU", y = "Average Fall Bottom Temp") +
geom_vline(xintercept=FallSplit1, color='red')
#ggsave(path= here::here(out.dir),"AverageFallBottomTempEPU2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Average bottom temp data by EPU and season (from gam_calcs_strata.R):
AvgWinterTemp <- AvgTempWinterFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, AvgTempWinter) %>% group_by(EPU)
#Line plot of winter temp:
p2 <- ggplot(AvgWinterTemp, aes(x = YEAR, y = AvgTempWinter)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Average Winter Bottom Temperature by EPU", y = "Average Winter Bottom Temp") +
geom_vline(xintercept=WinterSplit1, color='red')
#ggsave(path= here::here(out.dir),"AverageWinterBottomTempEPU2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Copepod small to large ratio data by EPU (from gam_calcs_strata.R):
CopepodEPUdata <- CalfinFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, CopepodSmallLarge) %>% group_by(EPU)
#Line plot of coppepod small to large index:
p2 <- ggplot(CopepodEPUdata, aes(x = YEAR, y = CopepodSmallLarge)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Copepod Size Index by EPU", y = "Copepod Size Index") +
geom_vline(xintercept=CopepodEPUSplit1, color='red') +
geom_vline(xintercept=CopepodEPUSplit2, color='red')
# +
# geom_vline(xintercept=CopepodEPUSplit3, color='red') +
# geom_vline(xintercept=CopepodEPUSplit4, color='red') +
# geom_vline(xintercept=CopepodEPUSplit5, color='red') +
# geom_vline(xintercept=CopepodEPUSplit6, color='red')
ggsave(path= here::here(out.dir),"CopepodSmLgEPU_regime2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#shelf-wide Copepod small to large ratio data by Shelf (from gam_calcs_strata.R):
CopepodShelfdata <- CalfinFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, CopepodSmallLarge)
#Line plot of coppepod small to large index:
p2 <- ggplot(CopepodShelfdata, aes(x = YEAR, y = CopepodSmallLarge)) +
geom_line() +
scale_color_manual(values = "red") +
geom_point() +
labs(title="Shelf-wide Copepod Size Index", y = "Copepod Size Index") +
geom_vline(xintercept=CopepodShelfSplit1, color='red') +
geom_vline(xintercept=CopepodShelfSplit2, color='red')
# +
# geom_vline(xintercept=CopepodShelfSplit3, color='red') +
# geom_vline(xintercept=CopepodShelfSplit4, color='red') +
# geom_vline(xintercept=CopepodShelfSplit5, color='red') +
# geom_vline(xintercept=CopepodShelfSplit6, color='red')
ggsave(path= here::here(out.dir),"CopepodSmLgShelf_regime2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Fall phyoplanktion bloom magnitude data (from gam_calcs_strata.R):
FallBloomdata <- Fallbloom %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, RangeMagnitude)
#Line plot of fall phytoplankton mag:
p2 <- ggplot(FallBloomdata, aes(x = YEAR, y = RangeMagnitude)) +
geom_line(color = "blue") +
geom_point(color = "blue") +
labs(title="Fall Phytoplankton Bloom Magnitude", y = "Fall Bloom Magnitude")
#ggsave(path= here::here(out.dir),"FallBloom_regime2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Fall phytoplankton bloom duration data (from gam_calcs_strata.R):
FallBloomdata <- Fallbloom %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, RangeDuration)
#Line plot of fall phytoplankton duration:
p2 <- ggplot(FallBloomdata, aes(x = YEAR, y = RangeDuration)) +
geom_line(color = "blue") +
geom_point(color = "blue") +
labs(title="Fall Phytoplankton Bloom Duration", y = "Fall Bloom Duration")
#ggsave(path= here::here(out.dir),"FallBloom_duration_regime2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#"TotalCopepods","ZooplanktonBiomass","StomachFullness"
#Total Copopods data (from NEFSCZooplankton_v3_6b_v2018.xls from gam_calcs_strata.R):
TotCopData <- TotalCopepods %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, TotalCopepodsMillions) %>% group_by(EPU)
#Line plot of total copepods:
p2 <- ggplot(TotCopData, aes(x = YEAR, y = TotalCopepodsMillions)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green")) +
geom_point(aes(color = EPU)) +
labs(title="Total Copepods", y = "Total Copepods Millions") +
geom_vline(xintercept=TotCopepodsSplit1, color='red') +
geom_vline(xintercept=TotCopepodsSplit2, color='red')
#ggsave(path= here::here(out.dir),"TotalCopepods_regime2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Spring Total copepods from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZoopData from gam_calcs_strata.R:
#******Need to run these regime shifts separately by EPU:
TotalCopSpr <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Spring') %>%
dplyr::select(YEAR, TotalCopepodStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotCop = sum(TotalCopepodStrata)) %>%
dplyr::select(YEAR, SumTotCop) %>%
dplyr::distinct()
#Regime analysis:
CopRegime <- TotalCopSpr %>% dplyr::select(SumTotCop, YEAR)
Regime <- rpart::rpart(SumTotCop~YEAR, data=CopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalCopepds_Regimes_spr2022.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualCopepods <- CopRegime
#change YEAR to continuous numeric for plotting function below:
annualCopepods$YEAR <- as.numeric(as.character(annualCopepods$YEAR))
TotCopRegime <- annualCopepods
#Line plot of condition
p2 <- ggplot(TotCopRegime, aes(x = YEAR, y = SumTotCop)) +
geom_line()+
geom_point() +
labs(title= "Total Copepods Spring", y = "Total Copepods (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalCopepods_Regimes_spring.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Summer Total copepods from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZoopData from gam_calcs_strata.R:
TotalCopSummer <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Summer') %>%
dplyr::select(YEAR, TotalCopepodStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotCop = sum(TotalCopepodStrata)) %>%
dplyr::select(YEAR, SumTotCop) %>%
dplyr::distinct()
#Regime analysis:
CopRegime <- TotalCopSummer %>% dplyr::select(SumTotCop, YEAR)
Regime <- rpart::rpart(SumTotCop~YEAR, data=CopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalCopepds_Regimes_Summer2022.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualCopepods <- CopRegime
#change YEAR to continuous numeric for plotting function below:
annualCopepods$YEAR <- as.numeric(as.character(annualCopepods$YEAR))
TotCopRegime <- annualCopepods
#Line plot of condition
p2 <- ggplot(TotCopRegime, aes(x = YEAR, y = SumTotCop)) +
geom_line()+
geom_point() +
labs(title= "Total Copepods Summer", y = "Total Copepods (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalCopepods_Regimes_Summer.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Fall Total copepods from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZoopData from gam_calcs_strata.R:
TotalCopFall <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Fall') %>%
dplyr::select(YEAR, TotalCopepodStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotCop = sum(TotalCopepodStrata)) %>%
dplyr::select(YEAR, SumTotCop) %>%
dplyr::distinct()
#Regime analysis:
CopRegime <- TotalCopFall %>% dplyr::select(SumTotCop, YEAR)
Regime <- rpart::rpart(SumTotCop~YEAR, data=CopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalCopepds_Regimes_Fall2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualCopepods <- CopRegime
#change YEAR to continuous numeric for plotting function below:
annualCopepods$YEAR <- as.numeric(as.character(annualCopepods$YEAR))
TotCopRegime <- annualCopepods
#Line plot of condition
p2 <- ggplot(TotCopRegime, aes(x = YEAR, y = SumTotCop)) +
geom_line()+
geom_point() +
labs(title= "Total Copepods Fall", y = "Total Copepods (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalCopepods_Regimes_Fall.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Winter Total copepods from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZoopData from gam_calcs_strata.R:
TotalCopWinter <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Winter') %>%
dplyr::select(YEAR, TotalCopepodStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotCop = sum(TotalCopepodStrata)) %>%
dplyr::select(YEAR, SumTotCop) %>%
dplyr::distinct()
#Regime analysis:
CopRegime <- TotalCopWinter %>% dplyr::select(SumTotCop, YEAR)
Regime <- rpart::rpart(SumTotCop~YEAR, data=CopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalCopepds_Regimes_Winter2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualCopepods <- CopRegime
#change YEAR to continuous numeric for plotting function below:
annualCopepods$YEAR <- as.numeric(as.character(annualCopepods$YEAR))
TotCopRegime <- annualCopepods
#Line plot of condition
p2 <- ggplot(TotCopRegime, aes(x = YEAR, y = SumTotCop)) +
geom_line()+
geom_point() +
labs(title= "Total Copepods Winter", y = "Total Copepods (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalCopepods_Regimes_Winter.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Spring Total zooplankton abundance from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZooSeason from gam_calcs_strata.R:
TotalZoopSpr <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Spring') %>%
dplyr::select(YEAR, ZooplAbundStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotalZooSpr = sum(ZooplAbundStrata), na.rm = TRUE) %>%
dplyr::select(YEAR, SumTotalZooSpr) %>%
dplyr::distinct()
#Regime analysis:
ZooRegime <- TotalZoopSpr %>% dplyr::select(SumTotalZooSpr, YEAR)
Regime <- rpart::rpart(SumTotalZooSpr~YEAR, data=ZooRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalZoopl_Regimes_Spring2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualZoopl <- ZooRegime
#change YEAR to continuous numeric for plotting function below:
annualZoopl$YEAR <- as.numeric(as.character(annualZoopl$YEAR))
TotZooRegime <- annualZoopl
#Line plot of condition
p2 <- ggplot(TotZooRegime, aes(x = YEAR, y = SumTotalZooSpr)) +
geom_line()+
geom_point() +
labs(title= "Total Zooplankton Abundance Spring", y = "Total Zooplankton Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalZoopl_Regimes_Spring.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Summer Total zooplankton abundance from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZooSeason from gam_calcs_strata.R:
TotalZoopSummer <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Summer') %>%
dplyr::select(YEAR, ZooplAbundStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotalZooSummer = sum(ZooplAbundStrata), na.rm = TRUE) %>%
dplyr::select(YEAR, SumTotalZooSummer) %>%
dplyr::distinct()
#Regime analysis:
ZooRegime <- TotalZoopSummer %>% dplyr::select(SumTotalZooSummer, YEAR)
Regime <- rpart::rpart(SumTotalZooSummer~YEAR, data=ZooRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalZoopl_Regimes_Summer2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualZoopl <- ZooRegime
#change YEAR to continuous numeric for plotting function below:
annualZoopl$YEAR <- as.numeric(as.character(annualZoopl$YEAR))
TotZooRegime <- annualZoopl
#Line plot of condition
p2 <- ggplot(TotZooRegime, aes(x = YEAR, y = SumTotalZooSummer)) +
geom_line()+
geom_point() +
labs(title= "Total Zooplankton Abundance Summer", y = "Total Zooplankton Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalZoopl_Regimes_Summer.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Fall Total zooplankton abundance from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZooSeason from gam_calcs_strata.R:
TotalZoopFall <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Fall') %>%
dplyr::select(YEAR, ZooplAbundStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotalZooFall = sum(ZooplAbundStrata), na.rm = TRUE) %>%
dplyr::select(YEAR, SumTotalZooFall) %>%
dplyr::distinct()
#Regime analysis:
ZooRegime <- TotalZoopFall %>% dplyr::select(SumTotalZooFall, YEAR)
Regime <- rpart::rpart(SumTotalZooFall~YEAR, data=ZooRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalZoopl_Regimes_Fall2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualZoopl <- ZooRegime
#change YEAR to continuous numeric for plotting function below:
annualZoopl$YEAR <- as.numeric(as.character(annualZoopl$YEAR))
TotZooRegime <- annualZoopl
#Line plot of condition
p2 <- ggplot(TotZooRegime, aes(x = YEAR, y = SumTotalZooFall)) +
geom_line()+
geom_point() +
labs(title= "Total Zooplankton Abundance Fall", y = "Total Zooplankton Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalZoopl_Regimes_Fall.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Winter Total zooplankton abundance from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZooSeason from gam_calcs_strata.R:
TotalZoopWinter <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Winter') %>%
dplyr::select(YEAR, ZooplAbundStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotalZooWinter = sum(ZooplAbundStrata), na.rm = TRUE) %>%
dplyr::select(YEAR, SumTotalZooWinter) %>%
dplyr::distinct()
#Regime analysis:
ZooRegime <- TotalZoopWinter %>% dplyr::select(SumTotalZooWinter, YEAR)
Regime <- rpart::rpart(SumTotalZooWinter~YEAR, data=ZooRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalZoopl_Regimes_Winter2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualZoopl <- ZooRegime
#change YEAR to continuous numeric for plotting function below:
annualZoopl$YEAR <- as.numeric(as.character(annualZoopl$YEAR))
TotZooRegime <- annualZoopl
#Line plot of condition
p2 <- ggplot(TotZooRegime, aes(x = YEAR, y = SumTotalZooWinter)) +
geom_line()+
geom_point() +
labs(title= "Total Zooplankton Abundance Winter", y = "Total Zooplankton Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalZoopl_Regimes_Winter.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Regime shifts in surface temp:
Surfdata <- cond.epu %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, SURFTEMP) %>%
dplyr::filter(!is.na(SURFTEMP)) %>%
dplyr::group_by(YEAR) %>%
dplyr::summarize(AvgSurfTemp = mean(SURFTEMP))
#Surface data for mackerel ESP:
#readr::write_csv(Surfdata, here::here(out.dir,"NEFSC_SpringSurfaceTemp2022.csv"))
#Regime analysis:
SurfRegime <- Surfdata %>% dplyr::select(AvgSurfTemp, YEAR)
Regime <- rpart::rpart(AvgSurfTemp~YEAR, data=SurfRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "SurfaceTemp_Spring.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#annualZoopl <- SurfRegime
#change YEAR to continuous numeric for plotting function below:
SurfRegime$YEAR <- as.numeric(as.character(SurfRegime$YEAR))
AnnualSurfRegime <- SurfRegime
#Line plot of condition
p2 <- ggplot(AnnualSurfRegime, aes(x = YEAR, y = AvgSurfTemp)) +
geom_line()+
geom_point() +
labs(title= "Average Spring Surface Temperature", y = "Average Surface Temperature") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"SurfaceTemp_Spring_Regimes2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
|
/R/RegimeShifts_EnvirVar.R
|
no_license
|
Laurels1/Condition
|
R
| false
| false
| 43,494
|
r
|
library(ggplot2)
library(dplyr)
library(tidyr)
out.dir="output"
#Mean GOM zooplankton abundance anomalies from Ryan Morse (GOM_mean_seasonal_anomalies.csv)
GOMseasonZooAbund <- readr::read_csv(here::here(data.dir, "GOM_mean_seasonal_anomalies.csv"))
GOMsummerZoop <- GOMseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Spring') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, mlucens_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
GOMZoopRegime <- GOMsummerZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=GOMZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
saveRDS(Regime[["cptable"]],file = here::here("output", "GOMspringZoop_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
GOMZoopRegime$year <- as.numeric(as.character(GOMZoopRegime$year))
#Line plot of condition
p2 <- ggplot(GOMZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "GOM Spring Zooplankton Abundance Anomalies", y = "Total Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"GOMspringZoop_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
GOMseasonZooAbund <- readr::read_csv(here::here(data.dir, "GOM_mean_seasonal_anomalies.csv"))
GOMsummerZoop <- GOMseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Summer') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, mlucens_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
GOMZoopRegime <- GOMsummerZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=GOMZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "GOMsummerZoopAnom_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
GOMZoopRegime$year <- as.numeric(as.character(GOMZoopRegime$year))
#Line plot of condition
p2 <- ggplot(GOMZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "GOM Summer Zooplankton Abundance Anomalies", y = "Zooplankton Anomaly") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"GOMsummerZoopAnom_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Mean GB zooplankton abundance anomalies from Ryan Morse (GBK_mean_seasonal_anomalies.csv)
GBseasonZooAbund <- readr::read_csv(here::here(data.dir, "GBK_mean_seasonal_anomalies.csv"))
GBsummerZoop <- GBseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Summer') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, chaeto_100m3, cham_100m3, para_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
GBZoopRegime <- GBsummerZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=GBZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "GBsummerZoop_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
GBZoopRegime$year <- as.numeric(as.character(GBZoopRegime$year))
#Line plot of condition
p2 <- ggplot(GBZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "GB Summer Zooplankton Abundance Anomalies", y = "Total Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"GBsummerZoop_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Mean MAB zooplankton abundance anomalies from Ryan Morse (MAB_mean_seasonal_anomalies.csv)
MABseasonZooAbund <- readr::read_csv(here::here(data.dir, "MAB_mean_seasonal_anomalies.csv"))
MABsummerZoop <- MABseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Spring') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, tlong_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
MABZoopRegime <- MABsummerZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=MABZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "MABspringZoop_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
MABZoopRegime$year <- as.numeric(as.character(MABZoopRegime$year))
#Line plot of condition
p2 <- ggplot(MABZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "MAB Spring Zooplankton Abundance Anomalies", y = "Total Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"MABspringZoop_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#MAB fall Zooplankton anomaly from Ryan Morse:
MABseasonZooAbund <- readr::read_csv(here::here(data.dir, "MAB_mean_seasonal_anomalies.csv"))
MABfallZoop <- MABseasonZooAbund %>% dplyr::filter(year >= 1992, season == 'Fall') %>%
dplyr::group_by(year) %>%
dplyr::mutate(SumZoop = sum(ctyp_100m3, calfin_100m3, tlong_100m3, pseudo_100m3)) %>%
dplyr::select(year, SumZoop)
#Regime analysis:
MABZoopRegime <- MABfallZoop %>% dplyr::select(SumZoop, year)
Regime <- rpart::rpart(SumZoop~year, data=MABZoopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "MABfallZoopAnom_Regimes_2021.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#change YEAR to continuous numeric for plotting function below:
MABZoopRegime$year <- as.numeric(as.character(MABZoopRegime$year))
#Line plot of condition
p2 <- ggplot(MABZoopRegime, aes(x = year, y = SumZoop)) +
geom_line()+
geom_point() +
labs(title= "MAB Fall Zooplankton Abundance Anomalies", y = "Total Zooplankton Anomaly") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"MABfallZoopAnom_Regimes_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Automate zooplankton regime shifts from Harvey's data: EcoMon_ZooplanktonData2021_BTSMeanAbundance.csv from gam_calcs_strata.R
#Dataset of zooplankton for regime shift
ZoopSeasonEPU <- ZoopDataSeasonEPU %>% dplyr::select(YEAR, EPU, SEASONS,
CopepodSmLgEPU,TotCopEPU,ZoopAbundEPU) %>%
dplyr::filter(!is.na(EPU)) %>%
dplyr::filter(!is.na(SEASONS)) %>%
dplyr::distinct()
Zoop1 <- ZoopSeasonEPU %>%
# key=zoopIndex <- c(CopepodSmLgSpringEPU,CopepodSmLgSummmerEPU,CopepodSmLgFallEPU,
# CopepodSmLgWinterEPU,TotCopSpringEPU,TotCopSummerEPU,
# TotCopFallEPU,TotCopWinterEPU,ZoopAbundSpringEPU,
# ZoopAbundSummerEPU,ZoopAbundFallEPU,ZoopAbundWinterEPU)
tidyr::gather(key= 'ZoopName', value = 'ZoopData', -YEAR, -EPU, -SEASONS)
Zoop <- Zoop1 %>% dplyr::select(YEAR, EPU, SEASONS, ZoopName, ZoopData) %>%
dplyr::mutate(ZoopIndex=paste(ZoopName, EPU, sep="_"))
####Automating regime shift plots for zooplankton data by EPU:
# create a character vector of species names
ZoopList <- unique(Zoop) %>%
dplyr::distinct(ZoopIndex) %>%
dplyr::pull()
#Only select Species with names that have sufficient data:
# numZoop <- length(ZoopList)
# for (i in numZoop:1) {
# if (!is.na(as.numeric(ZoopList[i]))) {
# ZoopList <-ZoopList[-i]
# }
# }
#
# #loop over zooplankton index:
# for (aZoop in ZoopList) {
# print(aZoop)
#
#
# #Test for regime shifts in each zooplankton index (same method as in Perretti et al. 2017, although Perretti uses MRT, gives error when method="mrt"):
# ZoopRegDat <- Zoop %>% dplyr::filter(ZoopIndex == aZoop) %>% dplyr::select(ZoopData, ZoopIndex, YEAR, SEASONS)
# Regime <- rpart::rpart(ZoopData~YEAR, data=ZoopRegDat)
# #Selecting best fit (gives optimal CP value associated with the minimum error)::
# # Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
#
# ZoopRegimePlot <- rpart.plot::rpart.plot(Regime)
#
# # Prettier plot of pruned tree (not currently working):
# # library(rpart.plot)
# # library(RColorBrewer)
#
#
#
# # ptree<- prune(Regime,
# # + cp= Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"])
# # fancyRpartPlot(ptree, uniform=TRUE,
# # + main="Pruned Classification Tree")
# #plotcp
#
#
# #Outputs pruning tree table:
# saveRDS(Regime[["cptable"]],file = here::here("output","RegimeShifts", paste0(aZoop,"_Zooplankton_Regimes.RDS")))
# # printcp(Regime)
#
# #Select best pruned tree (outputs the row of the cptable that has the number of splits with the lowest error (xerror)
# # Used rpart::prune
# optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
# optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
# Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
# Regime <- Regime_pruned
#
#
# #Pull regime shift years into new data frame to add to plot (use the simplest tree
# #within one standard error (xstd) of the best tree (lowest xerror)):
# Results <- as.data.frame(Regime[["splits"]])
# SppSplit1 <- Results$index[1]
# SppSplit2 <- Results$index[2]
# SppSplit3 <- Results$index[3]
# SppSplit4 <- Results$index[4]
# SppSplit5 <- Results$index[5]
#
#
# ZoopDat <- Zoop
#
# #change YEAR to continuous numeric for plotting function below:
# ZoopDat$YEAR <- as.numeric(as.character(ZoopDat$YEAR))
#
# ZoopIndexRegime <- ZoopDat %>% dplyr::filter(YEAR >= 1992) %>%
# dplyr::select(YEAR, SEASONS, ZoopData) %>% group_by(SEASONS)
#
#
# #Line plot of condition
# p2 <- ggplot(ZoopDat, aes(x = YEAR, y = ZoopData)) +
# geom_line(aes(color = SEASONS)) +
# scale_color_manual(values = c("red", "blue", "green", "orange")) +
# # geom_errorbar(width=.1, aes(ymin=lower.ci.cond, ymax=upper.ci.cond), colour="black") +
# # geom_errorbar(width=.1, aes(ymin=lower.ci.cond, ymax=upper.ci.cond)) +
# geom_point(aes(color = SEASONS)) +
# labs(title= paste0(aZoop, " Regime Shifts by Season"), y = "aZoop") +
# geom_vline(xintercept=SppSplit1, color='red')+
# geom_vline(xintercept=SppSplit2, color='red')+
# geom_vline(xintercept=SppSplit3, color='red')+
# geom_vline(xintercept=SppSplit4, color='red')+
# geom_vline(xintercept=SppSplit5, color='red')+
# # ylim(0.85, 1.21)+
# xlim(1992, 2022)
#
# # ggsave(path= here::here("output","RegimeShifts"),paste0(aZoop, "_RegimeShifts.jpg"), width = 8, height = 3.75, units = "in", dpi = 300)
# }
#readRDS(file = here::here("output","RegimeShifts", paste0("American plaice_RelCondition_Regimes_Fall.RDS")))
##End automated regime shift plots by species
#FEMALE butterfish condition and regime shift:
# annualCondition <- FemButtCondPlot
#
# #change YEAR to continuous numeric for plotting function below:
# annualCondition$YEAR <- as.numeric(as.character(annualCondition$YEAR))
#
# speciesNames <- annualCondition
#
# #Line plot of condition
# p2 <- ggplot(speciesNames, aes(x = YEAR, y = MeanCond)) +
# geom_line()+
# geom_point() +
# labs(title="Female Butterfish Relative Condition", y = "Relative Condition") +
# geom_vline(xintercept=FemButtSplit1, color='red')+
# geom_vline(xintercept=FemButtSplit2, color='red')
#
# ggsave(path= here::here(out.dir),"Female_Butterfish_ShelfCondition_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#
# #MALE butterfish condition and regime shift:
# annualCondition <- MaleButtCondPlot
#
# #change YEAR to continuous numeric for plotting function below:
# annualCondition$YEAR <- as.numeric(as.character(annualCondition$YEAR))
#
# speciesNames <- annualCondition
#
# #Line plot of condition
# p2 <- ggplot(speciesNames, aes(x = YEAR, y = MeanCond)) +
# geom_line()+
# geom_point() +
# labs(title="Male Butterfish Relative Condition", y = "Relative Condition") +
# geom_vline(xintercept=MaleButtSplit1, color='red')+
# geom_vline(xintercept=MaleButtSplit2, color='red')
#
# ggsave(path= here::here(out.dir),"Male_Butterfish_ShelfCondition_2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Average bottom temp data by EPU and season (from gam_calcs_strata.R):
AvgSummerTemp <- AvgTempSummerFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, AvgTempSummer) %>% group_by(EPU)
#Line plot of summer temp:
p2 <- ggplot(AvgSummerTemp, aes(x = YEAR, y = AvgTempSummer)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Average Summer Bottom Temperature by EPU", y = "Average Summer Bottom Temp") +
geom_vline(xintercept=SummerSplit1, color='red')
#+
# geom_vline(xintercept=SummerSplit2, color='red') +
# geom_vline(xintercept=SummerSplit3, color='red') +
# geom_vline(xintercept=SummerSplit4, color='red') +
# geom_vline(xintercept=SummerSplit5, color='red')
#ggsave(path= here::here(out.dir),"AverageSummerBottomTempEPU2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Average bottom temp data by EPU and season (from gam_calcs_strata.R):
AvgSpringTemp <- AvgTempSpringFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, AvgTempSpring) %>% group_by(EPU)
#Line plot of spring temp:
p2 <- ggplot(AvgSpringTemp, aes(x = YEAR, y = AvgTempSpring)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Average Spring Bottom Temperature by EPU", y = "Average Spring Bottom Temp") +
geom_vline(xintercept=SpringSplit1, color='red')
#+
# geom_vline(xintercept=SpringSplit2, color='red') +
# geom_vline(xintercept=SpringSplit3, color='red') +
# geom_vline(xintercept=SpringSplit4, color='red')
#ggsave(path= here::here(out.dir),"AverageSpringBottomTempEPU2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Average bottom temp data by EPU and season (from gam_calcs_strata.R):
AvgFallTemp <- AvgTempFallFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, AvgTempFall) %>% group_by(EPU)
#Line plot of fall temp:
p2 <- ggplot(AvgFallTemp, aes(x = YEAR, y = AvgTempFall)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Average Fall Bottom Temperature by EPU", y = "Average Fall Bottom Temp") +
geom_vline(xintercept=FallSplit1, color='red')
#ggsave(path= here::here(out.dir),"AverageFallBottomTempEPU2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Average bottom temp data by EPU and season (from gam_calcs_strata.R):
AvgWinterTemp <- AvgTempWinterFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, AvgTempWinter) %>% group_by(EPU)
#Line plot of winter temp:
p2 <- ggplot(AvgWinterTemp, aes(x = YEAR, y = AvgTempWinter)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Average Winter Bottom Temperature by EPU", y = "Average Winter Bottom Temp") +
geom_vline(xintercept=WinterSplit1, color='red')
#ggsave(path= here::here(out.dir),"AverageWinterBottomTempEPU2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Copepod small to large ratio data by EPU (from gam_calcs_strata.R):
CopepodEPUdata <- CalfinFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, CopepodSmallLarge) %>% group_by(EPU)
#Line plot of coppepod small to large index:
p2 <- ggplot(CopepodEPUdata, aes(x = YEAR, y = CopepodSmallLarge)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green", "orange")) +
geom_point(aes(color = EPU)) +
labs(title="Copepod Size Index by EPU", y = "Copepod Size Index") +
geom_vline(xintercept=CopepodEPUSplit1, color='red') +
geom_vline(xintercept=CopepodEPUSplit2, color='red')
# +
# geom_vline(xintercept=CopepodEPUSplit3, color='red') +
# geom_vline(xintercept=CopepodEPUSplit4, color='red') +
# geom_vline(xintercept=CopepodEPUSplit5, color='red') +
# geom_vline(xintercept=CopepodEPUSplit6, color='red')
ggsave(path= here::here(out.dir),"CopepodSmLgEPU_regime2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#shelf-wide Copepod small to large ratio data by Shelf (from gam_calcs_strata.R):
CopepodShelfdata <- CalfinFormat %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, CopepodSmallLarge)
#Line plot of coppepod small to large index:
p2 <- ggplot(CopepodShelfdata, aes(x = YEAR, y = CopepodSmallLarge)) +
geom_line() +
scale_color_manual(values = "red") +
geom_point() +
labs(title="Shelf-wide Copepod Size Index", y = "Copepod Size Index") +
geom_vline(xintercept=CopepodShelfSplit1, color='red') +
geom_vline(xintercept=CopepodShelfSplit2, color='red')
# +
# geom_vline(xintercept=CopepodShelfSplit3, color='red') +
# geom_vline(xintercept=CopepodShelfSplit4, color='red') +
# geom_vline(xintercept=CopepodShelfSplit5, color='red') +
# geom_vline(xintercept=CopepodShelfSplit6, color='red')
ggsave(path= here::here(out.dir),"CopepodSmLgShelf_regime2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Fall phyoplanktion bloom magnitude data (from gam_calcs_strata.R):
FallBloomdata <- Fallbloom %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, RangeMagnitude)
#Line plot of fall phytoplankton mag:
p2 <- ggplot(FallBloomdata, aes(x = YEAR, y = RangeMagnitude)) +
geom_line(color = "blue") +
geom_point(color = "blue") +
labs(title="Fall Phytoplankton Bloom Magnitude", y = "Fall Bloom Magnitude")
#ggsave(path= here::here(out.dir),"FallBloom_regime2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Fall phytoplankton bloom duration data (from gam_calcs_strata.R):
FallBloomdata <- Fallbloom %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, RangeDuration)
#Line plot of fall phytoplankton duration:
p2 <- ggplot(FallBloomdata, aes(x = YEAR, y = RangeDuration)) +
geom_line(color = "blue") +
geom_point(color = "blue") +
labs(title="Fall Phytoplankton Bloom Duration", y = "Fall Bloom Duration")
#ggsave(path= here::here(out.dir),"FallBloom_duration_regime2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#"TotalCopepods","ZooplanktonBiomass","StomachFullness"
#Total Copopods data (from NEFSCZooplankton_v3_6b_v2018.xls from gam_calcs_strata.R):
TotCopData <- TotalCopepods %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, EPU, TotalCopepodsMillions) %>% group_by(EPU)
#Line plot of total copepods:
p2 <- ggplot(TotCopData, aes(x = YEAR, y = TotalCopepodsMillions)) +
geom_line(aes(color = EPU)) +
scale_color_manual(values = c("red", "blue", "green")) +
geom_point(aes(color = EPU)) +
labs(title="Total Copepods", y = "Total Copepods Millions") +
geom_vline(xintercept=TotCopepodsSplit1, color='red') +
geom_vline(xintercept=TotCopepodsSplit2, color='red')
#ggsave(path= here::here(out.dir),"TotalCopepods_regime2022.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Spring Total copepods from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZoopData from gam_calcs_strata.R:
#******Need to run these regime shifts separately by EPU:
TotalCopSpr <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Spring') %>%
dplyr::select(YEAR, TotalCopepodStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotCop = sum(TotalCopepodStrata)) %>%
dplyr::select(YEAR, SumTotCop) %>%
dplyr::distinct()
#Regime analysis:
CopRegime <- TotalCopSpr %>% dplyr::select(SumTotCop, YEAR)
Regime <- rpart::rpart(SumTotCop~YEAR, data=CopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalCopepds_Regimes_spr2022.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualCopepods <- CopRegime
#change YEAR to continuous numeric for plotting function below:
annualCopepods$YEAR <- as.numeric(as.character(annualCopepods$YEAR))
TotCopRegime <- annualCopepods
#Line plot of condition
p2 <- ggplot(TotCopRegime, aes(x = YEAR, y = SumTotCop)) +
geom_line()+
geom_point() +
labs(title= "Total Copepods Spring", y = "Total Copepods (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalCopepods_Regimes_spring.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Summer Total copepods from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZoopData from gam_calcs_strata.R:
TotalCopSummer <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Summer') %>%
dplyr::select(YEAR, TotalCopepodStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotCop = sum(TotalCopepodStrata)) %>%
dplyr::select(YEAR, SumTotCop) %>%
dplyr::distinct()
#Regime analysis:
CopRegime <- TotalCopSummer %>% dplyr::select(SumTotCop, YEAR)
Regime <- rpart::rpart(SumTotCop~YEAR, data=CopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalCopepds_Regimes_Summer2022.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualCopepods <- CopRegime
#change YEAR to continuous numeric for plotting function below:
annualCopepods$YEAR <- as.numeric(as.character(annualCopepods$YEAR))
TotCopRegime <- annualCopepods
#Line plot of condition
p2 <- ggplot(TotCopRegime, aes(x = YEAR, y = SumTotCop)) +
geom_line()+
geom_point() +
labs(title= "Total Copepods Summer", y = "Total Copepods (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalCopepods_Regimes_Summer.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Fall Total copepods from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZoopData from gam_calcs_strata.R:
TotalCopFall <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Fall') %>%
dplyr::select(YEAR, TotalCopepodStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotCop = sum(TotalCopepodStrata)) %>%
dplyr::select(YEAR, SumTotCop) %>%
dplyr::distinct()
#Regime analysis:
CopRegime <- TotalCopFall %>% dplyr::select(SumTotCop, YEAR)
Regime <- rpart::rpart(SumTotCop~YEAR, data=CopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalCopepds_Regimes_Fall2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualCopepods <- CopRegime
#change YEAR to continuous numeric for plotting function below:
annualCopepods$YEAR <- as.numeric(as.character(annualCopepods$YEAR))
TotCopRegime <- annualCopepods
#Line plot of condition
p2 <- ggplot(TotCopRegime, aes(x = YEAR, y = SumTotCop)) +
geom_line()+
geom_point() +
labs(title= "Total Copepods Fall", y = "Total Copepods (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalCopepods_Regimes_Fall.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Winter Total copepods from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZoopData from gam_calcs_strata.R:
TotalCopWinter <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Winter') %>%
dplyr::select(YEAR, TotalCopepodStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotCop = sum(TotalCopepodStrata)) %>%
dplyr::select(YEAR, SumTotCop) %>%
dplyr::distinct()
#Regime analysis:
CopRegime <- TotalCopWinter %>% dplyr::select(SumTotCop, YEAR)
Regime <- rpart::rpart(SumTotCop~YEAR, data=CopRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalCopepds_Regimes_Winter2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualCopepods <- CopRegime
#change YEAR to continuous numeric for plotting function below:
annualCopepods$YEAR <- as.numeric(as.character(annualCopepods$YEAR))
TotCopRegime <- annualCopepods
#Line plot of condition
p2 <- ggplot(TotCopRegime, aes(x = YEAR, y = SumTotCop)) +
geom_line()+
geom_point() +
labs(title= "Total Copepods Winter", y = "Total Copepods (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalCopepods_Regimes_Winter.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Spring Total zooplankton abundance from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZooSeason from gam_calcs_strata.R:
TotalZoopSpr <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Spring') %>%
dplyr::select(YEAR, ZooplAbundStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotalZooSpr = sum(ZooplAbundStrata), na.rm = TRUE) %>%
dplyr::select(YEAR, SumTotalZooSpr) %>%
dplyr::distinct()
#Regime analysis:
ZooRegime <- TotalZoopSpr %>% dplyr::select(SumTotalZooSpr, YEAR)
Regime <- rpart::rpart(SumTotalZooSpr~YEAR, data=ZooRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalZoopl_Regimes_Spring2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualZoopl <- ZooRegime
#change YEAR to continuous numeric for plotting function below:
annualZoopl$YEAR <- as.numeric(as.character(annualZoopl$YEAR))
TotZooRegime <- annualZoopl
#Line plot of condition
p2 <- ggplot(TotZooRegime, aes(x = YEAR, y = SumTotalZooSpr)) +
geom_line()+
geom_point() +
labs(title= "Total Zooplankton Abundance Spring", y = "Total Zooplankton Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalZoopl_Regimes_Spring.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Summer Total zooplankton abundance from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZooSeason from gam_calcs_strata.R:
TotalZoopSummer <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Summer') %>%
dplyr::select(YEAR, ZooplAbundStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotalZooSummer = sum(ZooplAbundStrata), na.rm = TRUE) %>%
dplyr::select(YEAR, SumTotalZooSummer) %>%
dplyr::distinct()
#Regime analysis:
ZooRegime <- TotalZoopSummer %>% dplyr::select(SumTotalZooSummer, YEAR)
Regime <- rpart::rpart(SumTotalZooSummer~YEAR, data=ZooRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalZoopl_Regimes_Summer2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualZoopl <- ZooRegime
#change YEAR to continuous numeric for plotting function below:
annualZoopl$YEAR <- as.numeric(as.character(annualZoopl$YEAR))
TotZooRegime <- annualZoopl
#Line plot of condition
p2 <- ggplot(TotZooRegime, aes(x = YEAR, y = SumTotalZooSummer)) +
geom_line()+
geom_point() +
labs(title= "Total Zooplankton Abundance Summer", y = "Total Zooplankton Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalZoopl_Regimes_Summer.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Fall Total zooplankton abundance from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZooSeason from gam_calcs_strata.R:
TotalZoopFall <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Fall') %>%
dplyr::select(YEAR, ZooplAbundStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotalZooFall = sum(ZooplAbundStrata), na.rm = TRUE) %>%
dplyr::select(YEAR, SumTotalZooFall) %>%
dplyr::distinct()
#Regime analysis:
ZooRegime <- TotalZoopFall %>% dplyr::select(SumTotalZooFall, YEAR)
Regime <- rpart::rpart(SumTotalZooFall~YEAR, data=ZooRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalZoopl_Regimes_Fall2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualZoopl <- ZooRegime
#change YEAR to continuous numeric for plotting function below:
annualZoopl$YEAR <- as.numeric(as.character(annualZoopl$YEAR))
TotZooRegime <- annualZoopl
#Line plot of condition
p2 <- ggplot(TotZooRegime, aes(x = YEAR, y = SumTotalZooFall)) +
geom_line()+
geom_point() +
labs(title= "Total Zooplankton Abundance Fall", y = "Total Zooplankton Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalZoopl_Regimes_Fall.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Winter Total zooplankton abundance from Harvey: EcoMon_ZooplanktonData_BTSMeanAbundance.csv, as ZooSeason from gam_calcs_strata.R:
TotalZoopWinter <- ZooSeason %>% dplyr::filter(YEAR >= 1992, season1 == 'Winter') %>%
dplyr::select(YEAR, ZooplAbundStrata) %>%
dplyr::group_by(YEAR) %>%
dplyr::mutate(SumTotalZooWinter = sum(ZooplAbundStrata), na.rm = TRUE) %>%
dplyr::select(YEAR, SumTotalZooWinter) %>%
dplyr::distinct()
#Regime analysis:
ZooRegime <- TotalZoopWinter %>% dplyr::select(SumTotalZooWinter, YEAR)
Regime <- rpart::rpart(SumTotalZooWinter~YEAR, data=ZooRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "TotalZoopl_Regimes_Winter2022.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
annualZoopl <- ZooRegime
#change YEAR to continuous numeric for plotting function below:
annualZoopl$YEAR <- as.numeric(as.character(annualZoopl$YEAR))
TotZooRegime <- annualZoopl
#Line plot of condition
p2 <- ggplot(TotZooRegime, aes(x = YEAR, y = SumTotalZooWinter)) +
geom_line()+
geom_point() +
labs(title= "Total Zooplankton Abundance Winter", y = "Total Zooplankton Abundance (millions)") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"TotalZoopl_Regimes_Winter.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
#Regime shifts in surface temp:
Surfdata <- cond.epu %>% dplyr::filter(YEAR >= 1992) %>%
dplyr::select(YEAR, SURFTEMP) %>%
dplyr::filter(!is.na(SURFTEMP)) %>%
dplyr::group_by(YEAR) %>%
dplyr::summarize(AvgSurfTemp = mean(SURFTEMP))
#Surface data for mackerel ESP:
#readr::write_csv(Surfdata, here::here(out.dir,"NEFSC_SpringSurfaceTemp2022.csv"))
#Regime analysis:
SurfRegime <- Surfdata %>% dplyr::select(AvgSurfTemp, YEAR)
Regime <- rpart::rpart(AvgSurfTemp~YEAR, data=SurfRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
#saveRDS(Regime[["cptable"]],file = here::here("output", "SurfaceTemp_Spring.RDS"))
#printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
#annualZoopl <- SurfRegime
#change YEAR to continuous numeric for plotting function below:
SurfRegime$YEAR <- as.numeric(as.character(SurfRegime$YEAR))
AnnualSurfRegime <- SurfRegime
#Line plot of condition
p2 <- ggplot(AnnualSurfRegime, aes(x = YEAR, y = AvgSurfTemp)) +
geom_line()+
geom_point() +
labs(title= "Average Spring Surface Temperature", y = "Average Surface Temperature") +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
geom_vline(xintercept=SppSplit3, color='red')+
geom_vline(xintercept=SppSplit4, color='red')+
geom_vline(xintercept=SppSplit5, color='red')
#ggsave(path= here::here("output"),"SurfaceTemp_Spring_Regimes2021.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
|
Set2_Fittness <- function(BW)
{
for(EvaNr in seq(1,nEva))
{
for(St in Dist_Lev.Train) #Loop distance level
{
# Pause Density
dfPause <- approxfun(density(Act_Veh.Train$Dist_Pause[which(Act_Veh.Train$Distance == St)],BW[which(Dist_Lev.Train == St)]))
# Pause to the last such drive (with the same distance)
LastDriveNr <- max(which(Act_Veh.Train$Distance == St))
TDiff <- (Act_Veh.Eva$time_day[EvaNr] - Act_Veh.Train$time_day[LastDriveNr]) *24 *60
# Density/Probability, how possible is it to drive such a distance now
PDist[which(Dist_Lev.Train == St)] <- dfPause(TDiff)
PWD_Dep[which(Dist_Lev.Train == St)] <- length(which((Act_Veh.Train$WD_Dep == Act_Veh.Eva$WD_Dep[EvaNr]) & (Act_Veh.Train$Distance == St) ))/length(which(Act_Veh.Train$Distance == St))
# possible departure time, in Min
dfMin <- approxfun(density(Act_Veh.Train$Min[which(Act_Veh.Train$Distance == St)],BW[length(Dist_Lev.Train)+which(Dist_Lev.Train == St)]))
PMin[which(Dist_Lev.Train == St)] <- dfMin(Act_Veh.Eva$Min[EvaNr])
#following sentence is still needed, because some Distance are very little in the TrainData,
if(is.na(PDist[which(Dist_Lev.Train == St)])) { PDist[which(Dist_Lev.Train == St)] <- 0}
if(is.na(PMin[which(Dist_Lev.Train == St)])) { PMin[which(Dist_Lev.Train == St)] <- 0}
}
# Normalisation of PDist
if(sum(PDist) != 0) PDist <- PDist/sum(PDist)
# Normalisation of PWD_Dep
if(sum(PWD_Dep) != 0) PWD_Dep <- PWD_Dep/sum(PWD_Dep)
# Normalisation of PMin
if(sum(PMin) != 0) PMin <- PMin/sum(PMin)
# Probability
PDist <- PDist * PWD_Dep* PMin
if(sum(PDist) != 0) PDist <- PDist/sum(PDist)
Eva.Est_Dist_Exp[EvaNr] <- sum(PDist*Dist_Lev.Train)
Eva.Err_Dist_Exp_Abs[EvaNr] <- Act_Veh.Eva$Distance[EvaNr] - Eva.Est_Dist_Exp[EvaNr]
Eva.Err_Dist_Exp_Rel[EvaNr] <- Eva.Err_Dist_Exp_Abs[EvaNr]/Act_Veh.Eva$Distance[EvaNr]
}
Set2_Fittness <- length(Eva.Err_Dist_Exp_Rel[which(abs(Eva.Err_Dist_Exp_Rel)<0.1)])/length(Eva.Err_Dist_Exp_Rel)
}
|
/Set2_Fittness.R
|
no_license
|
ChenyangZhou/Trip-Prediction
|
R
| false
| false
| 2,114
|
r
|
Set2_Fittness <- function(BW)
{
for(EvaNr in seq(1,nEva))
{
for(St in Dist_Lev.Train) #Loop distance level
{
# Pause Density
dfPause <- approxfun(density(Act_Veh.Train$Dist_Pause[which(Act_Veh.Train$Distance == St)],BW[which(Dist_Lev.Train == St)]))
# Pause to the last such drive (with the same distance)
LastDriveNr <- max(which(Act_Veh.Train$Distance == St))
TDiff <- (Act_Veh.Eva$time_day[EvaNr] - Act_Veh.Train$time_day[LastDriveNr]) *24 *60
# Density/Probability, how possible is it to drive such a distance now
PDist[which(Dist_Lev.Train == St)] <- dfPause(TDiff)
PWD_Dep[which(Dist_Lev.Train == St)] <- length(which((Act_Veh.Train$WD_Dep == Act_Veh.Eva$WD_Dep[EvaNr]) & (Act_Veh.Train$Distance == St) ))/length(which(Act_Veh.Train$Distance == St))
# possible departure time, in Min
dfMin <- approxfun(density(Act_Veh.Train$Min[which(Act_Veh.Train$Distance == St)],BW[length(Dist_Lev.Train)+which(Dist_Lev.Train == St)]))
PMin[which(Dist_Lev.Train == St)] <- dfMin(Act_Veh.Eva$Min[EvaNr])
#following sentence is still needed, because some Distance are very little in the TrainData,
if(is.na(PDist[which(Dist_Lev.Train == St)])) { PDist[which(Dist_Lev.Train == St)] <- 0}
if(is.na(PMin[which(Dist_Lev.Train == St)])) { PMin[which(Dist_Lev.Train == St)] <- 0}
}
# Normalisation of PDist
if(sum(PDist) != 0) PDist <- PDist/sum(PDist)
# Normalisation of PWD_Dep
if(sum(PWD_Dep) != 0) PWD_Dep <- PWD_Dep/sum(PWD_Dep)
# Normalisation of PMin
if(sum(PMin) != 0) PMin <- PMin/sum(PMin)
# Probability
PDist <- PDist * PWD_Dep* PMin
if(sum(PDist) != 0) PDist <- PDist/sum(PDist)
Eva.Est_Dist_Exp[EvaNr] <- sum(PDist*Dist_Lev.Train)
Eva.Err_Dist_Exp_Abs[EvaNr] <- Act_Veh.Eva$Distance[EvaNr] - Eva.Est_Dist_Exp[EvaNr]
Eva.Err_Dist_Exp_Rel[EvaNr] <- Eva.Err_Dist_Exp_Abs[EvaNr]/Act_Veh.Eva$Distance[EvaNr]
}
Set2_Fittness <- length(Eva.Err_Dist_Exp_Rel[which(abs(Eva.Err_Dist_Exp_Rel)<0.1)])/length(Eva.Err_Dist_Exp_Rel)
}
|
# currently "outliers" is not used by the functions in the package
# but it may be useful to define the input of some functions,
# e.g. argument "mo" in functions "outliers.effects" and "outliers.regressors"
outliers <- function(type, ind, weight = NULL)
{
if (is.null(weight))
weight <- rep(1, length(type))
m <- data.frame(type = factor(type, levels = c("IO", "AO", "LS", "TC", "SLS")),
ind = ind, coefhat = weight)
#structure(m, class = "outliers")
m
}
|
/outliers-data-frame.R
|
no_license
|
stevejobsmyguru/TSOOutlier-Mini-R
|
R
| false
| false
| 478
|
r
|
# currently "outliers" is not used by the functions in the package
# but it may be useful to define the input of some functions,
# e.g. argument "mo" in functions "outliers.effects" and "outliers.regressors"
outliers <- function(type, ind, weight = NULL)
{
if (is.null(weight))
weight <- rep(1, length(type))
m <- data.frame(type = factor(type, levels = c("IO", "AO", "LS", "TC", "SLS")),
ind = ind, coefhat = weight)
#structure(m, class = "outliers")
m
}
|
#reference to professor's code on course website
wdat<-read.csv('pima-indians-diabetes.csv', header=FALSE)
library(klaR)
library(caret)
wdat[, 3][wdat[, 3] == 0] <- NA
wdat[, 4][wdat[, 4] == 0] <- NA
wdat[, 6][wdat[, 6] == 0] <- NA
wdat[, 8][wdat[, 8] == 0] <- NA
features<-wdat[,-c(9)]
labels<-wdat[,9]
trscore<-array(dim=10)
tescore<-array(dim=10)
for (wi in 1:10){
wtd<-createDataPartition(y=labels, p=.8, list=FALSE)
nbx<-features
ntrbx<-nbx[wtd, ]
ntrby<-labels[wtd]
trposflag<-ntrby>0
ptregs<-ntrbx[trposflag, ]
ntregs<-ntrbx[!trposflag,]
ntebx<-nbx[-wtd, ]
nteby<-labels[-wtd]
ptrmean<-sapply(ptregs, mean, na.rm=TRUE)
ntrmean<-sapply(ntregs, mean, na.rm=TRUE)
ptrsd<-sapply(ptregs, sd, na.rm=TRUE)
ntrsd<-sapply(ntregs, sd, na.rm=TRUE)
ptroffsets<-t(t(ntrbx)-ptrmean)
ptrscales<-t(t(ptroffsets)/ptrsd)
ptrlogs<--(1/2)*rowSums(apply(ptrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd))
ntroffsets<-t(t(ntrbx)-ntrmean)
ntrscales<-t(t(ntroffsets)/ntrsd)
ntrlogs<--(1/2)*rowSums(apply(ntrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd))
ptr_rate<-sum(ntrby)/length(ntrby)
ntr_rate<-sum(!ntrby)/length(ntrby)
ptr_prior<-ptrlogs + log(ptr_rate)
ntr_prior<-ntrlogs + log(ntr_rate)
lvwtr<-ptr_prior>ntr_prior
gotrighttr<-lvwtr==ntrby
trscore[wi]<-sum(gotrighttr)/(sum(gotrighttr)+sum(!gotrighttr))
pteoffsets<-t(t(ntebx)-ptrmean)
ptescales<-t(t(pteoffsets)/ptrsd)
ptelogs<--(1/2)*rowSums(apply(ptescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd))
nteoffsets<-t(t(ntebx)-ntrmean)
ntescales<-t(t(nteoffsets)/ntrsd)
ntelogs<--(1/2)*rowSums(apply(ntescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd))
pte_prior<-ptelogs + log(ptr_rate)
nte_prior<-ntelogs + log(ntr_rate)
lvwte<-ptelogs>ntelogs
gotright<-lvwte==nteby
tescore[wi]<-sum(gotright)/(sum(gotright)+sum(!gotright))
}
print(c("Training score: ", mean(trscore)))
print(c("Testing score: ", mean(tescore)))
|
/hw1/hw1_1B.R
|
no_license
|
conniehuang98/CS498-Applied-Machine-Learning
|
R
| false
| false
| 2,093
|
r
|
#reference to professor's code on course website
wdat<-read.csv('pima-indians-diabetes.csv', header=FALSE)
library(klaR)
library(caret)
wdat[, 3][wdat[, 3] == 0] <- NA
wdat[, 4][wdat[, 4] == 0] <- NA
wdat[, 6][wdat[, 6] == 0] <- NA
wdat[, 8][wdat[, 8] == 0] <- NA
features<-wdat[,-c(9)]
labels<-wdat[,9]
trscore<-array(dim=10)
tescore<-array(dim=10)
for (wi in 1:10){
wtd<-createDataPartition(y=labels, p=.8, list=FALSE)
nbx<-features
ntrbx<-nbx[wtd, ]
ntrby<-labels[wtd]
trposflag<-ntrby>0
ptregs<-ntrbx[trposflag, ]
ntregs<-ntrbx[!trposflag,]
ntebx<-nbx[-wtd, ]
nteby<-labels[-wtd]
ptrmean<-sapply(ptregs, mean, na.rm=TRUE)
ntrmean<-sapply(ntregs, mean, na.rm=TRUE)
ptrsd<-sapply(ptregs, sd, na.rm=TRUE)
ntrsd<-sapply(ntregs, sd, na.rm=TRUE)
ptroffsets<-t(t(ntrbx)-ptrmean)
ptrscales<-t(t(ptroffsets)/ptrsd)
ptrlogs<--(1/2)*rowSums(apply(ptrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd))
ntroffsets<-t(t(ntrbx)-ntrmean)
ntrscales<-t(t(ntroffsets)/ntrsd)
ntrlogs<--(1/2)*rowSums(apply(ntrscales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd))
ptr_rate<-sum(ntrby)/length(ntrby)
ntr_rate<-sum(!ntrby)/length(ntrby)
ptr_prior<-ptrlogs + log(ptr_rate)
ntr_prior<-ntrlogs + log(ntr_rate)
lvwtr<-ptr_prior>ntr_prior
gotrighttr<-lvwtr==ntrby
trscore[wi]<-sum(gotrighttr)/(sum(gotrighttr)+sum(!gotrighttr))
pteoffsets<-t(t(ntebx)-ptrmean)
ptescales<-t(t(pteoffsets)/ptrsd)
ptelogs<--(1/2)*rowSums(apply(ptescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ptrsd))
nteoffsets<-t(t(ntebx)-ntrmean)
ntescales<-t(t(nteoffsets)/ntrsd)
ntelogs<--(1/2)*rowSums(apply(ntescales,c(1, 2), function(x)x^2), na.rm=TRUE)-sum(log(ntrsd))
pte_prior<-ptelogs + log(ptr_rate)
nte_prior<-ntelogs + log(ntr_rate)
lvwte<-ptelogs>ntelogs
gotright<-lvwte==nteby
tescore[wi]<-sum(gotright)/(sum(gotright)+sum(!gotright))
}
print(c("Training score: ", mean(trscore)))
print(c("Testing score: ", mean(tescore)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe_helpers.R
\name{iff}
\alias{iff}
\alias{iffn}
\title{Apply a function if and only if test is TRUE}
\usage{
iff(obj, test, fun, ...)
iffn(obj, test, fun, ...)
}
\arguments{
\item{obj}{object to apply test and fun to}
\item{test}{logical or function to apply to test}
\item{fun}{function to apply}
\item{...}{passed on to test}
}
\description{
otherwise return input value unchanged
}
\details{
iffn is ... if and only if test is FALSE
}
|
/man/iff.Rd
|
no_license
|
cran/manifestoR
|
R
| false
| true
| 523
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe_helpers.R
\name{iff}
\alias{iff}
\alias{iffn}
\title{Apply a function if and only if test is TRUE}
\usage{
iff(obj, test, fun, ...)
iffn(obj, test, fun, ...)
}
\arguments{
\item{obj}{object to apply test and fun to}
\item{test}{logical or function to apply to test}
\item{fun}{function to apply}
\item{...}{passed on to test}
}
\description{
otherwise return input value unchanged
}
\details{
iffn is ... if and only if test is FALSE
}
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641785L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result)
|
/dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615939935-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false
| false
| 826
|
r
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641785L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result)
|
testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.8157142318935e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613126058-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 225
|
r
|
testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.8157142318935e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
#' Add a new factor with a change between the values of two factors in a tidy table
#'
#'This function is used to calculate changes between values of categories (dates, ages, countries, etc).
#'
#'The data may come as TIDY, meaning that all values are in 1 column and other columns show information related to the value, meaning categories.
#'
#' All inputs must be in quotes ("").
#'
#'\strong{Inputs:}
#'\enumerate{
#' \item df = is the database containing a column with Factors.
#' \item Var1 = ARE the "VariableS" valueS that will be used to calculate the differences vs Var0.
#' \item Var0 = IS the Variable value anchor for all changes calculation.
#'}
#'Var1, Var0 and Var must be factors with leverls or characters NOT NUMBERS
#'
#'
#'@param... numeric
#'
#'@return a dataframe
#'
#'@examples
#'
#'@export
#'
addcht<-function(df,VarA,VarB,Var,Value){
{
# === aux variables
Var1<-rlang::sym(Var)
Value1<-rlang::sym(Value)
VarTable<<-c(as.character(unlist(df%>%select(!!Var1)%>%unique())))
# === Spread the data to calculate changes
df1<-df
df2<-spread(df1,key=!!Var1,value=!!Value1)
df3<-df2
} # spread data to calc changes
{# === create change columns
for (i in 1:length(VarB)){
VarA11<-rlang::sym(VarA[1])
VarB11<-rlang::sym(VarB[i])
Chng<-rlang::sym(paste0("Chng_",VarA,"_",VarB[i]))
df3<-df3 %>%
dplyr::mutate(!!Chng:=!!VarA11-!!VarB11)
}
} # calculate changes
{ # === gather the data to original layout
df3<-df3%>%gather(key=!!enquo(Var),value=Value,VarTable,contains("Chng_"))
} # gather the data to original layout
df3
}
|
/R/addcht.R
|
no_license
|
aalvaradob/utildf
|
R
| false
| false
| 1,640
|
r
|
#' Add a new factor with a change between the values of two factors in a tidy table
#'
#'This function is used to calculate changes between values of categories (dates, ages, countries, etc).
#'
#'The data may come as TIDY, meaning that all values are in 1 column and other columns show information related to the value, meaning categories.
#'
#' All inputs must be in quotes ("").
#'
#'\strong{Inputs:}
#'\enumerate{
#' \item df = is the database containing a column with Factors.
#' \item Var1 = ARE the "VariableS" valueS that will be used to calculate the differences vs Var0.
#' \item Var0 = IS the Variable value anchor for all changes calculation.
#'}
#'Var1, Var0 and Var must be factors with leverls or characters NOT NUMBERS
#'
#'
#'@param... numeric
#'
#'@return a dataframe
#'
#'@examples
#'
#'@export
#'
addcht<-function(df,VarA,VarB,Var,Value){
{
# === aux variables
Var1<-rlang::sym(Var)
Value1<-rlang::sym(Value)
VarTable<<-c(as.character(unlist(df%>%select(!!Var1)%>%unique())))
# === Spread the data to calculate changes
df1<-df
df2<-spread(df1,key=!!Var1,value=!!Value1)
df3<-df2
} # spread data to calc changes
{# === create change columns
for (i in 1:length(VarB)){
VarA11<-rlang::sym(VarA[1])
VarB11<-rlang::sym(VarB[i])
Chng<-rlang::sym(paste0("Chng_",VarA,"_",VarB[i]))
df3<-df3 %>%
dplyr::mutate(!!Chng:=!!VarA11-!!VarB11)
}
} # calculate changes
{ # === gather the data to original layout
df3<-df3%>%gather(key=!!enquo(Var),value=Value,VarTable,contains("Chng_"))
} # gather the data to original layout
df3
}
|
####### --Ignore if already load the data-- ########################################
####################################################################################
### Read the data data.table format
library(data.table)
library(dplyr)
power_consumption <- fread("household_power_consumption.txt")
### subset to required dates
subconsumption <- power_consumption %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Time = as.POSIXct(strptime(paste(Date, " ", Time), "%Y-%m-%d %H:%M:%S"))) %>%
filter(Time >= strftime("2007-02-01 00:00:00"), Time <strftime("2007-02-03 00:00:00"))
### remove the original dataset
rm(power_consumption)
####################################################################################
### convert the variables to numeric
subconsumption <- mutate(subconsumption, Sub_metering_1 = as.numeric(Sub_metering_1),
Sub_metering_2 = as.numeric(Sub_metering_2),
Voltage = as.numeric(Voltage),
Global_active_power = as.numeric(Global_active_power),
Global_reactive_power = as.numeric(Global_reactive_power))
### set the limit for the Time axis
timestart <- as.POSIXct(strftime("2007-02-01 00:00:00"))
timeend <- as.POSIXct(strftime("2007-02-03 00:00:00"))
par(mfrow=c(2,2))
### Plot first graph, same as plot 2
with(subconsumption, plot(Time, Global_active_power,
xlim = c(timestart,timeend), xaxt="n",
type="l", xlab="", ylab = "Global Active Power"))
axis.POSIXct(1, at= seq(timestart, timeend, by="day"), format="%a")
### Plot second graph
with(subconsumption, plot(Time, Voltage,
xlim = c(timestart,timeend), xaxt="n",
type="l", xlab="datetime", ylab = "Voltage"))
axis.POSIXct(1, at= seq(timestart, timeend, by="day"), format="%a")
### Plot third graph, same as plot 3
with(subconsumption, plot(Time, Sub_metering_1,
xlim = c(timestart,timeend), xaxt="n",
type="l", xlab="", ylab = "Energy sub metering"))
with(subconsumption, lines(Time,Sub_metering_2, col="red"))
with(subconsumption, lines(Time,Sub_metering_3, col="blue"))
axis.POSIXct(1, at= seq(timestart, timeend, by="day"), format="%a")
legend("topright", legend= c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),
col = c("black","red", "blue"), lty = c(1,1,1))
### Plot fourth graph
with(subconsumption, plot(Time, Global_reactive_power,
xlim = c(timestart,timeend), xaxt="n",
type="l", xlab="datetime", ylab = "Global_rective_power"))
axis.POSIXct(1, at= seq(timestart, timeend, by="day"), format="%a")
|
/plot4.R
|
no_license
|
QSFTW/ExData_Plotting1
|
R
| false
| false
| 2,797
|
r
|
####### --Ignore if already load the data-- ########################################
####################################################################################
### Read the data data.table format
library(data.table)
library(dplyr)
power_consumption <- fread("household_power_consumption.txt")
### subset to required dates
subconsumption <- power_consumption %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Time = as.POSIXct(strptime(paste(Date, " ", Time), "%Y-%m-%d %H:%M:%S"))) %>%
filter(Time >= strftime("2007-02-01 00:00:00"), Time <strftime("2007-02-03 00:00:00"))
### remove the original dataset
rm(power_consumption)
####################################################################################
### convert the variables to numeric
subconsumption <- mutate(subconsumption, Sub_metering_1 = as.numeric(Sub_metering_1),
Sub_metering_2 = as.numeric(Sub_metering_2),
Voltage = as.numeric(Voltage),
Global_active_power = as.numeric(Global_active_power),
Global_reactive_power = as.numeric(Global_reactive_power))
### set the limit for the Time axis
timestart <- as.POSIXct(strftime("2007-02-01 00:00:00"))
timeend <- as.POSIXct(strftime("2007-02-03 00:00:00"))
par(mfrow=c(2,2))
### Plot first graph, same as plot 2
with(subconsumption, plot(Time, Global_active_power,
xlim = c(timestart,timeend), xaxt="n",
type="l", xlab="", ylab = "Global Active Power"))
axis.POSIXct(1, at= seq(timestart, timeend, by="day"), format="%a")
### Plot second graph
with(subconsumption, plot(Time, Voltage,
xlim = c(timestart,timeend), xaxt="n",
type="l", xlab="datetime", ylab = "Voltage"))
axis.POSIXct(1, at= seq(timestart, timeend, by="day"), format="%a")
### Plot third graph, same as plot 3
with(subconsumption, plot(Time, Sub_metering_1,
xlim = c(timestart,timeend), xaxt="n",
type="l", xlab="", ylab = "Energy sub metering"))
with(subconsumption, lines(Time,Sub_metering_2, col="red"))
with(subconsumption, lines(Time,Sub_metering_3, col="blue"))
axis.POSIXct(1, at= seq(timestart, timeend, by="day"), format="%a")
legend("topright", legend= c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),
col = c("black","red", "blue"), lty = c(1,1,1))
### Plot fourth graph
with(subconsumption, plot(Time, Global_reactive_power,
xlim = c(timestart,timeend), xaxt="n",
type="l", xlab="datetime", ylab = "Global_rective_power"))
axis.POSIXct(1, at= seq(timestart, timeend, by="day"), format="%a")
|
source("gini Function.R")
source("utils.R")
RpartModel <- function(data, cp) {
dataset <- data
# create a model
tree_model <- rpart(DefFlag ~ .,
data=dataset,
control=rpart.control(minsplit=2,
minbucket=1,
cp=cp),
parms = list(split="Gini"))
rpart.plot(tree_model)
# create some predictions
new <- data.frame(dataset)
dataset$Score <- predict(tree_model, newdata=new)
# check unique prediction values
print(unique(dataset$Score))
# check importance of each variable
# tree_model$variable.importance
# check cross validation results
# printcp(tree_model)
result = Gini_value(dataset$Score, dataset$DefFlag)
print(paste(c("Gini result for model #1:", result), collapse = " "))
}
CaretModel <- function(data) {
dataset <- data
dataset$DefFlag <- as.factor(dataset$DefFlag)
Grid <- expand.grid(cp=seq(0.1, 0.2, 0.1))
rpart_model <- train(factor(DefFlag)~.,
data=dataset,
method='rpart',
metric = "Accuracy"
# tuneGrid = Grid,
# na.action = na.omit,
# parms=list(split='Gini')
)
dataset$Score <- predict(rpart_model, dataset)
print("Gini result for model 2")
print(Gini_value(dataset$Score, dataset$DefFlag))
}
|
/models.R
|
no_license
|
adamsqi/srd
|
R
| false
| false
| 1,503
|
r
|
source("gini Function.R")
source("utils.R")
RpartModel <- function(data, cp) {
dataset <- data
# create a model
tree_model <- rpart(DefFlag ~ .,
data=dataset,
control=rpart.control(minsplit=2,
minbucket=1,
cp=cp),
parms = list(split="Gini"))
rpart.plot(tree_model)
# create some predictions
new <- data.frame(dataset)
dataset$Score <- predict(tree_model, newdata=new)
# check unique prediction values
print(unique(dataset$Score))
# check importance of each variable
# tree_model$variable.importance
# check cross validation results
# printcp(tree_model)
result = Gini_value(dataset$Score, dataset$DefFlag)
print(paste(c("Gini result for model #1:", result), collapse = " "))
}
CaretModel <- function(data) {
dataset <- data
dataset$DefFlag <- as.factor(dataset$DefFlag)
Grid <- expand.grid(cp=seq(0.1, 0.2, 0.1))
rpart_model <- train(factor(DefFlag)~.,
data=dataset,
method='rpart',
metric = "Accuracy"
# tuneGrid = Grid,
# na.action = na.omit,
# parms=list(split='Gini')
)
dataset$Score <- predict(rpart_model, dataset)
print("Gini result for model 2")
print(Gini_value(dataset$Score, dataset$DefFlag))
}
|
##1
# Consider the mtcars data set. Fit a model with mpg as the outcome that
# includes number of cylinders as a factor variable and weight as confounder.
# Give the adjusted estimate for the expected change in mpg comparing 8
# cylinders to 4.
fit <- lm(mpg ~ factor(cyl) + wt, data = mtcars)
summary(fit)
##2
# Consider the mtcars data set. Fit a model with mpg as the outcome that
# includes number of cylinders as a factor variable and weight as a possible
# confounding variable. Compare the effect of 8 versus 4 cylinders on mpg for
# the adjusted and unadjusted by weight models. Here, adjusted means including
# the weight variable as a term in the regression model and unadjusted means
# the model without weight included. What can be said about the effect
# comparing 8 and 4 cylinders after looking at models with and without weight
# included?.
fit2 <- lm(mpg ~ factor(cyl), data = mtcars)
summary(fit2)
##3
# Consider the mtcars data set. Fit a model with mpg as the outcome that
# considers number of cylinders as a factor variable and weight as confounder.
# Now fit a second model with mpg as the outcome model that considers the
# interaction between number of cylinders (as a factor variable) and weight.
# Give the P-value for the likelihood ratio test comparing the two models and
# suggest a model using 0.05 as a type I error rate significance benchmark.
fit <- lm(mpg ~ factor(cyl) + wt, data = mtcars)
fit2 <- lm(mpg ~ factor(cyl)*wt, data = mtcars)
anova(fit,fit2)
##4
# Consider the mtcars data set. Fit a model with mpg as the outcome that
# includes number of cylinders as a factor variable and weight inlcuded in
# the model as
lm(mpg ~ I(wt * 0.5) + factor(cyl), data = mtcars)
# How is the wt coefficient interpretted?
# The estimated expected change in MPG per one ton increase in weight for a
# specific number of cylinders (4, 6, 8).
##5
x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
# Give the hat diagonal for the most influential point
fit <- lm(y ~ x)
influence(fit)$hat
##6
# Consider the following data set
x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
# Give the slope dfbeta for the point with the highest hat value.
fit <- lm(y~x)
dfbetas(fit)
##7
# Consider a regression relationship between Y and X with and without
# adjustment for a third variable Z. Which of the following is true about
# comparing the regression coefficient between Y and X with and without
# adjustment for Z.
# It is possible for the coefficient to reverse sign after adjustment. For
# example, it can be strongly significant and positive before adjustment and
# strongly significant and negative after adjustment.
|
/regression_models/reg_quiz3.R
|
no_license
|
alambike123/coursera
|
R
| false
| false
| 2,753
|
r
|
##1
# Consider the mtcars data set. Fit a model with mpg as the outcome that
# includes number of cylinders as a factor variable and weight as confounder.
# Give the adjusted estimate for the expected change in mpg comparing 8
# cylinders to 4.
fit <- lm(mpg ~ factor(cyl) + wt, data = mtcars)
summary(fit)
##2
# Consider the mtcars data set. Fit a model with mpg as the outcome that
# includes number of cylinders as a factor variable and weight as a possible
# confounding variable. Compare the effect of 8 versus 4 cylinders on mpg for
# the adjusted and unadjusted by weight models. Here, adjusted means including
# the weight variable as a term in the regression model and unadjusted means
# the model without weight included. What can be said about the effect
# comparing 8 and 4 cylinders after looking at models with and without weight
# included?.
fit2 <- lm(mpg ~ factor(cyl), data = mtcars)
summary(fit2)
##3
# Consider the mtcars data set. Fit a model with mpg as the outcome that
# considers number of cylinders as a factor variable and weight as confounder.
# Now fit a second model with mpg as the outcome model that considers the
# interaction between number of cylinders (as a factor variable) and weight.
# Give the P-value for the likelihood ratio test comparing the two models and
# suggest a model using 0.05 as a type I error rate significance benchmark.
fit <- lm(mpg ~ factor(cyl) + wt, data = mtcars)
fit2 <- lm(mpg ~ factor(cyl)*wt, data = mtcars)
anova(fit,fit2)
##4
# Consider the mtcars data set. Fit a model with mpg as the outcome that
# includes number of cylinders as a factor variable and weight inlcuded in
# the model as
lm(mpg ~ I(wt * 0.5) + factor(cyl), data = mtcars)
# How is the wt coefficient interpretted?
# The estimated expected change in MPG per one ton increase in weight for a
# specific number of cylinders (4, 6, 8).
##5
x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
# Give the hat diagonal for the most influential point
fit <- lm(y ~ x)
influence(fit)$hat
##6
# Consider the following data set
x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
# Give the slope dfbeta for the point with the highest hat value.
fit <- lm(y~x)
dfbetas(fit)
##7
# Consider a regression relationship between Y and X with and without
# adjustment for a third variable Z. Which of the following is true about
# comparing the regression coefficient between Y and X with and without
# adjustment for Z.
# It is possible for the coefficient to reverse sign after adjustment. For
# example, it can be strongly significant and positive before adjustment and
# strongly significant and negative after adjustment.
|
# load packages, get data
packages <- c("data.table", "reshape2")
sapply(packages, require, character.only=TRUE, quietly=TRUE)
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
# load labels & feats
activityLabels <- fread(file.path(path, "UCI HAR Dataset/activity_labels.txt")
, col.names = c("classLabels", "activityName"))
features <- fread(file.path(path, "UCI HAR Dataset/features.txt")
, col.names = c("index", "featureNames"))
features_wanted <- grep("(mean|std)\\(\\)", features[, featureNames])
measurements <- features[features_wanted, featureNames]
measurements <- gsub('[()]', '', measurements)
# train data
train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, features_wanted, with = FALSE]
data.table::setnames(train, colnames(train), measurements)
trainActivities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt")
, col.names = c("Activity"))
trainSubjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt")
, col.names = c("SubjectNum"))
train <- cbind(trainSubjects, trainActivities, train)
# test data
test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, features_wanted, with = FALSE]
data.table::setnames(test, colnames(test), measurements)
testActivities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt")
, col.names = c("Activity"))
testSubjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt")
, col.names = c("SubjectNum"))
test <- cbind(testSubjects, testActivities, test)
# merge data
combined <- rbind(train, test)
combined[["Activity"]] <- factor(combined[, Activity]
, levels = activityLabels[["classLabels"]]
, labels = activityLabels[["activityName"]])
combined[["SubjectNum"]] <- as.factor(combined[, SubjectNum])
combined <- reshape2::melt(data = combined, id = c("SubjectNum", "Activity"))
combined <- reshape2::dcast(data = combined, SubjectNum + Activity ~ variable, fun.aggregate = mean)
data.table::fwrite(x = combined, file = "tidydata.txt", quote = FALSE)
|
/run_analysis.R
|
no_license
|
wdang4/get_clean_data_proj
|
R
| false
| false
| 2,334
|
r
|
# load packages, get data
packages <- c("data.table", "reshape2")
sapply(packages, require, character.only=TRUE, quietly=TRUE)
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
# load labels & feats
activityLabels <- fread(file.path(path, "UCI HAR Dataset/activity_labels.txt")
, col.names = c("classLabels", "activityName"))
features <- fread(file.path(path, "UCI HAR Dataset/features.txt")
, col.names = c("index", "featureNames"))
features_wanted <- grep("(mean|std)\\(\\)", features[, featureNames])
measurements <- features[features_wanted, featureNames]
measurements <- gsub('[()]', '', measurements)
# train data
train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, features_wanted, with = FALSE]
data.table::setnames(train, colnames(train), measurements)
trainActivities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt")
, col.names = c("Activity"))
trainSubjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt")
, col.names = c("SubjectNum"))
train <- cbind(trainSubjects, trainActivities, train)
# test data
test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, features_wanted, with = FALSE]
data.table::setnames(test, colnames(test), measurements)
testActivities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt")
, col.names = c("Activity"))
testSubjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt")
, col.names = c("SubjectNum"))
test <- cbind(testSubjects, testActivities, test)
# merge data
combined <- rbind(train, test)
combined[["Activity"]] <- factor(combined[, Activity]
, levels = activityLabels[["classLabels"]]
, labels = activityLabels[["activityName"]])
combined[["SubjectNum"]] <- as.factor(combined[, SubjectNum])
combined <- reshape2::melt(data = combined, id = c("SubjectNum", "Activity"))
combined <- reshape2::dcast(data = combined, SubjectNum + Activity ~ variable, fun.aggregate = mean)
data.table::fwrite(x = combined, file = "tidydata.txt", quote = FALSE)
|
library(data.table)
library(ggplot2)
library(ggrepel)
## DEPRECATED
# see format_coloc_v2.R
setwd('/oak/stanford/groups/smontgom/nicolerg/LAVA/REVISED_COLOC')
if(!file.exists('all_merged-egenes_coloc_finemap-1e-04-20200415.RData')){
load('master_coloc-1e-04-20200414.RData') # filtered down to loci of interest (eGenes at 1e-4; different lead SNPs)
finemap=fread(cmd='zcat all_finemap_results.tsv.gz', sep='\t', header=T)
finemap[, tissue := sapply(eqtl_file, function(x) unname(unlist(strsplit(x, '_filtered')))[1])]
finemap[, method := sapply(eqtl_file, function(x) ifelse(grepl('global',x), 'global','local'))]
table(finemap[,tissue])
table(finemap[,method])
finemap[,eqtl_file := NULL]
finemap[,gwas := sapply(gwas_trait, function(x) gsub('\\.formatted.*','',x))]
length(unique(finemap[,gwas]))
finemap[,c('base_gwas_file','gwas_trait'):=NULL]
finemap = finemap[order(tissue, feature, gwas)]
setnames(finemap, "gwas", "gwas_trait")
setnames(finemap, "feature", "gene_id")
g = finemap[method=='global']
l = finemap[method=='local']
l[,method:=NULL]
g[,method:=NULL]
# pick best per gene/tissue/trait combination
l = l[,list(ref_snp = ref_snp[which.min(clpp)],
n_snps = n_snps[which.min(clpp)],
clpp = min(clpp, na.rm=T),
gwas_pval = `-log_gwas_pval`[which.min(clpp)],
eqtl_pval = `-log_eqtl_pval`[which.min(clpp)]),
by=c('gene_id','tissue','gwas_trait')]
g = g[,list(ref_snp = ref_snp[which.min(clpp)],
n_snps = n_snps[which.min(clpp)],
clpp = min(clpp, na.rm=T),
gwas_pval = `-log_gwas_pval`[which.min(clpp)],
eqtl_pval = `-log_eqtl_pval`[which.min(clpp)]),
by=c('gene_id','tissue','gwas_trait')]
f = merge(g, l, by=c('gene_id','tissue','gwas_trait'), suffixes = c('_global','_local'), all=T)
# are there duplicate tests for FINEMAP?
nrow(f)
nrow(unique(f))
nrow(unique(f[,.(gene_id, tissue, gwas_trait)]))
# merge with COLOC
setnames(master_coloc, "clpp_h4_global", "coloc_h4_global")
setnames(master_coloc, "clpp_h4_local", "coloc_h4_local")
setnames(f, "clpp_global", "finemap_clpp_global")
setnames(f, "clpp_local", "finemap_clpp_local")
all_merged = merge(master_coloc, f, by=c('gene_id','tissue','gwas_trait'), suffixes=c('_coloc','_finemap'), all.x=T)
table(all_merged[,ref_snp_local_finemap] == all_merged[,ref_snp_global_finemap])
save(all_merged, file='all_merged-egenes_coloc_finemap-1e-04-20200415.RData')
}else{
load('all_merged-egenes_coloc_finemap-1e-04-20200415.RData')
}
# we already know the correlation between COLOC and FINEMAP is going to be very low
# # look at correlation between COLOC and FINEMAP
# pdf('~/global_coloc_compare.pdf',width=12,height=8)
# ggplot(all_merged, aes(x=coloc_h4_global, y=-log10(finemap_clpp_global))) +
# geom_point(alpha=0.3, aes(colour=tissue)) +
# geom_abline(linetype='dashed') +
# theme_classic() +
# labs(x='COLOC pp4',y='FINEMAP CLPP (-log10)',title='GlobalAA') +
# geom_text_repel(data=all_merged[-log10(finemap_clpp_global) > 4.2],
# aes(label=gwas_trait),
# nudge_x=0.2,
# hjust=0)
# dev.off()
#
# all_merged[-log10(finemap_clpp_global) > 4.2]
#
# # wow that's awful
# # what are some of these loci that have high probabilities of colocalization with only one method?
# master_coloc = all_merged
# ggplot(master_coloc, aes(x=coloc_h4_global, y=coloc_h4_local, colour=tissue)) +
#
# geom_point() +
# geom_hline(yintercept = 0.5) +
# geom_vline(xintercept = 0.5) +
#
# labs(x='GlobalAA colocalization probability (PP4)', y='LocalAA colocalization probability (PP4)') +
# theme_classic() +
# #guides(colour = guide_legend(override.aes=list(alpha=0.7,shape=19,size=3),ncol=6),fill=FALSE,label=F) +
# theme(panel.grid=element_blank(),
# legend.position='top',
# legend.justification='left',
# legend.title=element_blank(),
# legend.margin=margin(b = -2, unit='mm'),
# legend.direction='horizontal',
# axis.title.x=element_text(hjust=0.15),
# legend.text=element_text(size=11)
# # axis.title=element_text(size=14)
# ) +
# annotate("text",x=0.75,y=0.025,hjust=0.5,label='Stronger GlobalAA coloc.') +
# annotate("text",x=0.25,y=0.975,hjust=0.5,label='Stronger LocalAA coloc.')
|
/colocalization/merge_coloc_finemap.R
|
no_license
|
nicolerg/gtex-admixture-la
|
R
| false
| false
| 4,422
|
r
|
library(data.table)
library(ggplot2)
library(ggrepel)
## DEPRECATED
# see format_coloc_v2.R
setwd('/oak/stanford/groups/smontgom/nicolerg/LAVA/REVISED_COLOC')
if(!file.exists('all_merged-egenes_coloc_finemap-1e-04-20200415.RData')){
load('master_coloc-1e-04-20200414.RData') # filtered down to loci of interest (eGenes at 1e-4; different lead SNPs)
finemap=fread(cmd='zcat all_finemap_results.tsv.gz', sep='\t', header=T)
finemap[, tissue := sapply(eqtl_file, function(x) unname(unlist(strsplit(x, '_filtered')))[1])]
finemap[, method := sapply(eqtl_file, function(x) ifelse(grepl('global',x), 'global','local'))]
table(finemap[,tissue])
table(finemap[,method])
finemap[,eqtl_file := NULL]
finemap[,gwas := sapply(gwas_trait, function(x) gsub('\\.formatted.*','',x))]
length(unique(finemap[,gwas]))
finemap[,c('base_gwas_file','gwas_trait'):=NULL]
finemap = finemap[order(tissue, feature, gwas)]
setnames(finemap, "gwas", "gwas_trait")
setnames(finemap, "feature", "gene_id")
g = finemap[method=='global']
l = finemap[method=='local']
l[,method:=NULL]
g[,method:=NULL]
# pick best per gene/tissue/trait combination
l = l[,list(ref_snp = ref_snp[which.min(clpp)],
n_snps = n_snps[which.min(clpp)],
clpp = min(clpp, na.rm=T),
gwas_pval = `-log_gwas_pval`[which.min(clpp)],
eqtl_pval = `-log_eqtl_pval`[which.min(clpp)]),
by=c('gene_id','tissue','gwas_trait')]
g = g[,list(ref_snp = ref_snp[which.min(clpp)],
n_snps = n_snps[which.min(clpp)],
clpp = min(clpp, na.rm=T),
gwas_pval = `-log_gwas_pval`[which.min(clpp)],
eqtl_pval = `-log_eqtl_pval`[which.min(clpp)]),
by=c('gene_id','tissue','gwas_trait')]
f = merge(g, l, by=c('gene_id','tissue','gwas_trait'), suffixes = c('_global','_local'), all=T)
# are there duplicate tests for FINEMAP?
nrow(f)
nrow(unique(f))
nrow(unique(f[,.(gene_id, tissue, gwas_trait)]))
# merge with COLOC
setnames(master_coloc, "clpp_h4_global", "coloc_h4_global")
setnames(master_coloc, "clpp_h4_local", "coloc_h4_local")
setnames(f, "clpp_global", "finemap_clpp_global")
setnames(f, "clpp_local", "finemap_clpp_local")
all_merged = merge(master_coloc, f, by=c('gene_id','tissue','gwas_trait'), suffixes=c('_coloc','_finemap'), all.x=T)
table(all_merged[,ref_snp_local_finemap] == all_merged[,ref_snp_global_finemap])
save(all_merged, file='all_merged-egenes_coloc_finemap-1e-04-20200415.RData')
}else{
load('all_merged-egenes_coloc_finemap-1e-04-20200415.RData')
}
# we already know the correlation between COLOC and FINEMAP is going to be very low
# # look at correlation between COLOC and FINEMAP
# pdf('~/global_coloc_compare.pdf',width=12,height=8)
# ggplot(all_merged, aes(x=coloc_h4_global, y=-log10(finemap_clpp_global))) +
# geom_point(alpha=0.3, aes(colour=tissue)) +
# geom_abline(linetype='dashed') +
# theme_classic() +
# labs(x='COLOC pp4',y='FINEMAP CLPP (-log10)',title='GlobalAA') +
# geom_text_repel(data=all_merged[-log10(finemap_clpp_global) > 4.2],
# aes(label=gwas_trait),
# nudge_x=0.2,
# hjust=0)
# dev.off()
#
# all_merged[-log10(finemap_clpp_global) > 4.2]
#
# # wow that's awful
# # what are some of these loci that have high probabilities of colocalization with only one method?
# master_coloc = all_merged
# ggplot(master_coloc, aes(x=coloc_h4_global, y=coloc_h4_local, colour=tissue)) +
#
# geom_point() +
# geom_hline(yintercept = 0.5) +
# geom_vline(xintercept = 0.5) +
#
# labs(x='GlobalAA colocalization probability (PP4)', y='LocalAA colocalization probability (PP4)') +
# theme_classic() +
# #guides(colour = guide_legend(override.aes=list(alpha=0.7,shape=19,size=3),ncol=6),fill=FALSE,label=F) +
# theme(panel.grid=element_blank(),
# legend.position='top',
# legend.justification='left',
# legend.title=element_blank(),
# legend.margin=margin(b = -2, unit='mm'),
# legend.direction='horizontal',
# axis.title.x=element_text(hjust=0.15),
# legend.text=element_text(size=11)
# # axis.title=element_text(size=14)
# ) +
# annotate("text",x=0.75,y=0.025,hjust=0.5,label='Stronger GlobalAA coloc.') +
# annotate("text",x=0.25,y=0.975,hjust=0.5,label='Stronger LocalAA coloc.')
|
library(lhs)
### Name: improvedLHS
### Title: Improved Latin Hypercube Sample
### Aliases: improvedLHS
### Keywords: design
### ** Examples
improvedLHS(4, 3, 2)
|
/data/genthat_extracted_code/lhs/examples/improvedLHS.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 170
|
r
|
library(lhs)
### Name: improvedLHS
### Title: Improved Latin Hypercube Sample
### Aliases: improvedLHS
### Keywords: design
### ** Examples
improvedLHS(4, 3, 2)
|
#' Marginal Response for a Single Variable
#'
#' Calculates the average model response as a function of a single selected variable.
#' Use the 'type' parameter to select the type of marginal response to be calculated.
#' Currently we have Partial Dependency and Accumulated Local Effects implemented.
#' Current implementation uses the 'pdp' package (Brandon M. Greenwell (2017).
#' pdp: An R Package for Constructing Partial Dependence Plots. The R Journal, 9(1), 421--436.)
#' and 'ALEPlot' (Dan Apley (2017). ALEPlot: Accumulated Local Effects Plots and Partial Dependence Plots.)
#'
#'
#' @param explainer a model to be explained, preprocessed by the 'explain' function
#' @param variable character - name of a single variable
#' @param type character - type of the response to be calculated.
#' Currently following options are implemented: 'pdp' for Partial Dependency and 'ale' for Accumulated Local Effects
#' @param trans function - a transformation/link function that shall be applied to raw model predictions
#' @param ... other parameters
#'
#' @return An object of the class 'single_variable_explainer'.
#' It's a data frame with calculated average response.
#'
#' @export
#' @importFrom pdp partial
#' @importFrom ALEPlot ALEPlot
#'
#' @examples
#' library("randomForest")
#' library("breakDown")
#' logit <- function(x) exp(x)/(1+exp(x))
#'
#' HR_glm_model <- glm(left~., data = breakDown::HR_data, family = "binomial")
#' explainer_glm <- explain(HR_glm_model, data = HR_data)
#' expl_glm <- single_variable(explainer_glm, "satisfaction_level", "pdp", trans=logit)
#' expl_glm
#'
#' HR_rf_model <- randomForest(left~., data = breakDown::HR_data, ntree = 100)
#' explainer_rf <- explain(HR_rf_model, data = HR_data)
#' expl_rf <- single_variable(explainer_rf, variable = "satisfaction_level", type = "pdp")
#' expl_rf
#'
single_variable <- function(explainer, variable, type = "pdp", trans = I, ...) {
if (!("explainer" %in% class(explainer))) stop("The single_variable() function requires an object created with explain() function.")
if (is.null(explainer$data)) stop("The single_variable() function requires explainers created with specified 'data' parameter.")
switch(type,
pdp = {
part <- partial(explainer$model, pred.var = variable, train = explainer$data, ...)
res <- data.frame(x = part[,1], y = trans(part$yhat), var = variable, type = type, label = explainer$label)
class(res) <- c("single_variable_explainer", "data.frame", "pdp")
res
},
ale = {
# need to create a temporary file to stop ALEPlot function from plotting anytihing
tmpfn <- tempfile()
pdf(tmpfn)
part <- ALEPlot(X = explainer$data, X.model = explainer$model, yhat, J = variable)
dev.off()
unlink(tmpfn)
res <- data.frame(x = part$x.values, y = trans(part$f.values), var = variable, type = type, label = explainer$label)
class(res) <- c("single_variable_explainer", "data.frame", "ale")
res
},
stop("Currently only 'pdp' and 'ale' methods are implemented"))
}
|
/R/single_variable.R
|
no_license
|
benzei/DALEX
|
R
| false
| false
| 3,148
|
r
|
#' Marginal Response for a Single Variable
#'
#' Calculates the average model response as a function of a single selected variable.
#' Use the 'type' parameter to select the type of marginal response to be calculated.
#' Currently we have Partial Dependency and Accumulated Local Effects implemented.
#' Current implementation uses the 'pdp' package (Brandon M. Greenwell (2017).
#' pdp: An R Package for Constructing Partial Dependence Plots. The R Journal, 9(1), 421--436.)
#' and 'ALEPlot' (Dan Apley (2017). ALEPlot: Accumulated Local Effects Plots and Partial Dependence Plots.)
#'
#'
#' @param explainer a model to be explained, preprocessed by the 'explain' function
#' @param variable character - name of a single variable
#' @param type character - type of the response to be calculated.
#' Currently following options are implemented: 'pdp' for Partial Dependency and 'ale' for Accumulated Local Effects
#' @param trans function - a transformation/link function that shall be applied to raw model predictions
#' @param ... other parameters
#'
#' @return An object of the class 'single_variable_explainer'.
#' It's a data frame with calculated average response.
#'
#' @export
#' @importFrom pdp partial
#' @importFrom ALEPlot ALEPlot
#'
#' @examples
#' library("randomForest")
#' library("breakDown")
#' logit <- function(x) exp(x)/(1+exp(x))
#'
#' HR_glm_model <- glm(left~., data = breakDown::HR_data, family = "binomial")
#' explainer_glm <- explain(HR_glm_model, data = HR_data)
#' expl_glm <- single_variable(explainer_glm, "satisfaction_level", "pdp", trans=logit)
#' expl_glm
#'
#' HR_rf_model <- randomForest(left~., data = breakDown::HR_data, ntree = 100)
#' explainer_rf <- explain(HR_rf_model, data = HR_data)
#' expl_rf <- single_variable(explainer_rf, variable = "satisfaction_level", type = "pdp")
#' expl_rf
#'
single_variable <- function(explainer, variable, type = "pdp", trans = I, ...) {
if (!("explainer" %in% class(explainer))) stop("The single_variable() function requires an object created with explain() function.")
if (is.null(explainer$data)) stop("The single_variable() function requires explainers created with specified 'data' parameter.")
switch(type,
pdp = {
part <- partial(explainer$model, pred.var = variable, train = explainer$data, ...)
res <- data.frame(x = part[,1], y = trans(part$yhat), var = variable, type = type, label = explainer$label)
class(res) <- c("single_variable_explainer", "data.frame", "pdp")
res
},
ale = {
# need to create a temporary file to stop ALEPlot function from plotting anytihing
tmpfn <- tempfile()
pdf(tmpfn)
part <- ALEPlot(X = explainer$data, X.model = explainer$model, yhat, J = variable)
dev.off()
unlink(tmpfn)
res <- data.frame(x = part$x.values, y = trans(part$f.values), var = variable, type = type, label = explainer$label)
class(res) <- c("single_variable_explainer", "data.frame", "ale")
res
},
stop("Currently only 'pdp' and 'ale' methods are implemented"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pack.R
\name{pack}
\alias{pack}
\alias{unpack}
\title{Pack and unpack}
\usage{
pack(.data, ..., .names_sep = NULL, .error_call = current_env())
unpack(
data,
cols,
...,
names_sep = NULL,
names_repair = "check_unique",
error_call = current_env()
)
}
\arguments{
\item{...}{For \code{pack()}, <\code{\link[=tidyr_tidy_select]{tidy-select}}> columns to
pack, specified using name-variable pairs of the form
\code{new_col = c(col1, col2, col3)}. The right hand side can be any valid tidy
select expression.
For \code{unpack()}, these dots are for future extensions and must be empty.}
\item{data, .data}{A data frame.}
\item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to unpack.}
\item{names_sep, .names_sep}{If \code{NULL}, the default, the names will be left
as is. In \code{pack()}, inner names will come from the former outer names;
in \code{unpack()}, the new outer names will come from the inner names.
If a string, the inner and outer names will be used together. In
\code{unpack()}, the names of the new outer columns will be formed by pasting
together the outer and the inner column names, separated by \code{names_sep}. In
\code{pack()}, the new inner names will have the outer names + \code{names_sep}
automatically stripped. This makes \code{names_sep} roughly symmetric between
packing and unpacking.}
\item{names_repair}{Used to check that output data frame has valid
names. Must be one of the following options:
\itemize{
\item \verb{"minimal}": no name repair or checks, beyond basic existence,
\item \verb{"unique}": make sure names are unique and not empty,
\item \verb{"check_unique}": (the default), no name repair, but check they are unique,
\item \verb{"universal}": make the names unique and syntactic
\item a function: apply custom name repair.
\item \link{tidyr_legacy}: use the name repair from tidyr 0.8.
\item a formula: a purrr-style anonymous function (see \code{\link[rlang:as_function]{rlang::as_function()}})
}
See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more details on these terms and the
strategies used to enforce them.}
\item{error_call, .error_call}{The execution environment of a currently
running function, e.g. \code{caller_env()}. The function will be
mentioned in error messages as the source of the error. See the
\code{call} argument of \code{\link[rlang:abort]{abort()}} for more information.}
}
\description{
Packing and unpacking preserve the length of a data frame, changing its
width. \code{pack()} makes \code{df} narrow by collapsing a set of columns into a
single df-column. \code{unpack()} makes \code{data} wider by expanding df-columns
back out into individual columns.
}
\details{
Generally, unpacking is more useful than packing because it simplifies
a complex data structure. Currently, few functions work with df-cols,
and they are mostly a curiosity, but seem worth exploring further because
they mimic the nested column headers that are so popular in Excel.
}
\examples{
# Packing -------------------------------------------------------------------
# It's not currently clear why you would ever want to pack columns
# since few functions work with this sort of data.
df <- tibble(x1 = 1:3, x2 = 4:6, x3 = 7:9, y = 1:3)
df
df \%>\% pack(x = starts_with("x"))
df \%>\% pack(x = c(x1, x2, x3), y = y)
# .names_sep allows you to strip off common prefixes; this
# acts as a natural inverse to name_sep in unpack()
iris \%>\%
as_tibble() \%>\%
pack(
Sepal = starts_with("Sepal"),
Petal = starts_with("Petal"),
.names_sep = "."
)
# Unpacking -----------------------------------------------------------------
df <- tibble(
x = 1:3,
y = tibble(a = 1:3, b = 3:1),
z = tibble(X = c("a", "b", "c"), Y = runif(3), Z = c(TRUE, FALSE, NA))
)
df
df \%>\% unpack(y)
df \%>\% unpack(c(y, z))
df \%>\% unpack(c(y, z), names_sep = "_")
}
|
/man/pack.Rd
|
permissive
|
tidyverse/tidyr
|
R
| false
| true
| 3,941
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pack.R
\name{pack}
\alias{pack}
\alias{unpack}
\title{Pack and unpack}
\usage{
pack(.data, ..., .names_sep = NULL, .error_call = current_env())
unpack(
data,
cols,
...,
names_sep = NULL,
names_repair = "check_unique",
error_call = current_env()
)
}
\arguments{
\item{...}{For \code{pack()}, <\code{\link[=tidyr_tidy_select]{tidy-select}}> columns to
pack, specified using name-variable pairs of the form
\code{new_col = c(col1, col2, col3)}. The right hand side can be any valid tidy
select expression.
For \code{unpack()}, these dots are for future extensions and must be empty.}
\item{data, .data}{A data frame.}
\item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to unpack.}
\item{names_sep, .names_sep}{If \code{NULL}, the default, the names will be left
as is. In \code{pack()}, inner names will come from the former outer names;
in \code{unpack()}, the new outer names will come from the inner names.
If a string, the inner and outer names will be used together. In
\code{unpack()}, the names of the new outer columns will be formed by pasting
together the outer and the inner column names, separated by \code{names_sep}. In
\code{pack()}, the new inner names will have the outer names + \code{names_sep}
automatically stripped. This makes \code{names_sep} roughly symmetric between
packing and unpacking.}
\item{names_repair}{Used to check that output data frame has valid
names. Must be one of the following options:
\itemize{
\item \verb{"minimal}": no name repair or checks, beyond basic existence,
\item \verb{"unique}": make sure names are unique and not empty,
\item \verb{"check_unique}": (the default), no name repair, but check they are unique,
\item \verb{"universal}": make the names unique and syntactic
\item a function: apply custom name repair.
\item \link{tidyr_legacy}: use the name repair from tidyr 0.8.
\item a formula: a purrr-style anonymous function (see \code{\link[rlang:as_function]{rlang::as_function()}})
}
See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more details on these terms and the
strategies used to enforce them.}
\item{error_call, .error_call}{The execution environment of a currently
running function, e.g. \code{caller_env()}. The function will be
mentioned in error messages as the source of the error. See the
\code{call} argument of \code{\link[rlang:abort]{abort()}} for more information.}
}
\description{
Packing and unpacking preserve the length of a data frame, changing its
width. \code{pack()} makes \code{df} narrow by collapsing a set of columns into a
single df-column. \code{unpack()} makes \code{data} wider by expanding df-columns
back out into individual columns.
}
\details{
Generally, unpacking is more useful than packing because it simplifies
a complex data structure. Currently, few functions work with df-cols,
and they are mostly a curiosity, but seem worth exploring further because
they mimic the nested column headers that are so popular in Excel.
}
\examples{
# Packing -------------------------------------------------------------------
# It's not currently clear why you would ever want to pack columns
# since few functions work with this sort of data.
df <- tibble(x1 = 1:3, x2 = 4:6, x3 = 7:9, y = 1:3)
df
df \%>\% pack(x = starts_with("x"))
df \%>\% pack(x = c(x1, x2, x3), y = y)
# .names_sep allows you to strip off common prefixes; this
# acts as a natural inverse to name_sep in unpack()
iris \%>\%
as_tibble() \%>\%
pack(
Sepal = starts_with("Sepal"),
Petal = starts_with("Petal"),
.names_sep = "."
)
# Unpacking -----------------------------------------------------------------
df <- tibble(
x = 1:3,
y = tibble(a = 1:3, b = 3:1),
z = tibble(X = c("a", "b", "c"), Y = runif(3), Z = c(TRUE, FALSE, NA))
)
df
df \%>\% unpack(y)
df \%>\% unpack(c(y, z))
df \%>\% unpack(c(y, z), names_sep = "_")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unwrapLon.r
\name{unwrapLon}
\alias{unwrapLon}
\title{unwrapLon}
\usage{
unwrapLon(lon, lmin = -180)
}
\description{
unwrapLon
}
\details{
Internal function not normally called by user
}
|
/R/duckConfit/man/unwrapLon.Rd
|
no_license
|
SCAR/RAATD
|
R
| false
| true
| 265
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unwrapLon.r
\name{unwrapLon}
\alias{unwrapLon}
\title{unwrapLon}
\usage{
unwrapLon(lon, lmin = -180)
}
\description{
unwrapLon
}
\details{
Internal function not normally called by user
}
|
# FireCloud
#
# FireCloud API
#
# The version of the OpenAPI document: 0.1
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title CuratorStatus
#'
#' @description CuratorStatus Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field curator character
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
CuratorStatus <- R6::R6Class(
'CuratorStatus',
public = list(
`curator` = NULL,
initialize = function(
`curator`, ...
) {
local.optional.var <- list(...)
if (!missing(`curator`)) {
self$`curator` <- `curator`
}
},
toJSON = function() {
CuratorStatusObject <- list()
if (!is.null(self$`curator`)) {
CuratorStatusObject[['curator']] <-
self$`curator`
}
CuratorStatusObject
},
fromJSON = function(CuratorStatusJson) {
CuratorStatusObject <- jsonlite::fromJSON(CuratorStatusJson)
if (!is.null(CuratorStatusObject$`curator`)) {
self$`curator` <- CuratorStatusObject$`curator`
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`curator`)) {
sprintf(
'"curator":
"%s"
',
self$`curator`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(CuratorStatusJson) {
CuratorStatusObject <- jsonlite::fromJSON(CuratorStatusJson)
self$`curator` <- CuratorStatusObject$`curator`
self
}
)
)
|
/R/curator_status.R
|
no_license
|
vjcitn/terraClientR
|
R
| false
| false
| 1,609
|
r
|
# FireCloud
#
# FireCloud API
#
# The version of the OpenAPI document: 0.1
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title CuratorStatus
#'
#' @description CuratorStatus Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field curator character
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
CuratorStatus <- R6::R6Class(
'CuratorStatus',
public = list(
`curator` = NULL,
initialize = function(
`curator`, ...
) {
local.optional.var <- list(...)
if (!missing(`curator`)) {
self$`curator` <- `curator`
}
},
toJSON = function() {
CuratorStatusObject <- list()
if (!is.null(self$`curator`)) {
CuratorStatusObject[['curator']] <-
self$`curator`
}
CuratorStatusObject
},
fromJSON = function(CuratorStatusJson) {
CuratorStatusObject <- jsonlite::fromJSON(CuratorStatusJson)
if (!is.null(CuratorStatusObject$`curator`)) {
self$`curator` <- CuratorStatusObject$`curator`
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`curator`)) {
sprintf(
'"curator":
"%s"
',
self$`curator`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(CuratorStatusJson) {
CuratorStatusObject <- jsonlite::fromJSON(CuratorStatusJson)
self$`curator` <- CuratorStatusObject$`curator`
self
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AlleleComp.R
\name{AlleleComp}
\alias{AlleleComp}
\title{Copynumber estimation}
\usage{
AlleleComp(data, AD, file.name, method, uniform.break)
}
\arguments{
\item{data}{A \code{vcfR} object of the sequencing calls.}
\item{AD}{a \code{character} deoning \emph{ID} for depth of the reference allele.
This is often separately present in the VCF file. Default is \code{NULL}.}
\item{file.name}{an optional \code{character} to define output file name. Default is \emph{tumor.sample}.}
\item{method}{Algorithm to be used for copy number calculations. options include "apriori" wich
uses \code{\link{CopySeg_sequenza}} and "naive" using \code{\link{CopySeg_falcon}}.}
\item{uniform.break}{A numeric value signifying fixed length of the genomic window. Each window
is considered as distinct chromosomal segment with edges being the break points for copy number
estimation. A good window length is 1Mb (i.e. 1e6)}
}
\value{
A transformed \code{dataframe} usable in \emph{CloneStrat} that represents data on all variants
in the .vcf file. It returns summaries on the variants with the collumn \emph{CN.profile} depicting
the estimated allelic compositions.
}
\description{
Allelic segmentations are estimated for one sample at a time with unfiltered sequencing calls.
}
\details{
The function writes a \emph{.txt} data in working directory with the name defined in \code{file.name} used by \emph{sequenza}.
The output file written can be used in conjunction with post variants call sequence file. These can be merged and used for surther analysis
with \code{\link{cluster.doc}} or \code{\link{seqn.scale}}
}
\examples{
\donttest{#AlleleComp(data = x, AD = "AD", method = "naive")}
}
\seealso{
\code{\link{segment.plot}}
}
|
/man/AlleleComp.Rd
|
no_license
|
Subhayan18/CRUST
|
R
| false
| true
| 1,795
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AlleleComp.R
\name{AlleleComp}
\alias{AlleleComp}
\title{Copynumber estimation}
\usage{
AlleleComp(data, AD, file.name, method, uniform.break)
}
\arguments{
\item{data}{A \code{vcfR} object of the sequencing calls.}
\item{AD}{a \code{character} deoning \emph{ID} for depth of the reference allele.
This is often separately present in the VCF file. Default is \code{NULL}.}
\item{file.name}{an optional \code{character} to define output file name. Default is \emph{tumor.sample}.}
\item{method}{Algorithm to be used for copy number calculations. options include "apriori" wich
uses \code{\link{CopySeg_sequenza}} and "naive" using \code{\link{CopySeg_falcon}}.}
\item{uniform.break}{A numeric value signifying fixed length of the genomic window. Each window
is considered as distinct chromosomal segment with edges being the break points for copy number
estimation. A good window length is 1Mb (i.e. 1e6)}
}
\value{
A transformed \code{dataframe} usable in \emph{CloneStrat} that represents data on all variants
in the .vcf file. It returns summaries on the variants with the collumn \emph{CN.profile} depicting
the estimated allelic compositions.
}
\description{
Allelic segmentations are estimated for one sample at a time with unfiltered sequencing calls.
}
\details{
The function writes a \emph{.txt} data in working directory with the name defined in \code{file.name} used by \emph{sequenza}.
The output file written can be used in conjunction with post variants call sequence file. These can be merged and used for surther analysis
with \code{\link{cluster.doc}} or \code{\link{seqn.scale}}
}
\examples{
\donttest{#AlleleComp(data = x, AD = "AD", method = "naive")}
}
\seealso{
\code{\link{segment.plot}}
}
|
# Brett W.
# LIS4761 - Data Mining
# Sentiment Analysis
# Necessary libraries.
require(readr)
require(tm)
require(XML)
require(ggplot2)
# Read in list of positive and negative words.
pos <- "/home/brett/LIS4761_Data_Mining/data/opinion-lexicon-English/positive-words.txt"
neg <- "/home/brett/LIS4761_Data_Mining/data/opinion-lexicon-English/negative-words.txt"
p <- scan(pos, character(0),sep = "\n")
n <- scan(neg, character(0),sep = "\n")
# Load in the speech file.
# Read and parse HTML file.
doc.html = htmlTreeParse('http://www.analytictech.com/mb021/mlk.htm', useInternal = TRUE)
# Extract all the paragraphs (HTML tag is p, starting at
# the root of the document). Unlist flattens the list to
# create a character vector.
doc.text = unlist(xpathApply(doc.html, '//p', xmlValue))
# Replace all \n by spaces
doc.text = gsub('\\n', ' ', doc.text)
# Replace all \r by spaces
doc.text = gsub('\\r', ' ', doc.text)
# Create the corpus.
words.vec <- VectorSource(doc.text)
words.corpus <- Corpus(words.vec)
words.corpus
# Create the term matrix.
tdm <- TermDocumentMatrix(words.corpus)
tdm
# Create the matrix with counts.
m <- as.matrix(tdm)
wordCounts <- rowSums(m)
# Determine what percentage of the speech was positive.
totalWords <- sum(wordCounts)
words <- names(wordCounts)
matched <- match(words, p, nomatch = 0)
mCounts <- wordCounts[which(matched != 0)]
length(mCounts)
mWords <- names(mCounts)
nPos <- sum(mCounts)
nPos
# Determine which percentage of the speech was negative.
matched <- match(words, n, nomatch = 0)
nCounts <- wordCounts[which(matched != 0)]
nNeg <- sum(nCounts)
nWords <- names(nCounts)
nNeg
length(nCounts)
# Final percentages.
totalWords <- length(words)
ratioPos <- nPos/totalWords
ratioPos
ratioNeg <- nNeg/totalWords
ratioNeg
totalRatio <- nPos/nNeg
totalRatio
# Visualization of positive and negative words by section of the speech.
firstPart <- wordCounts[0:83]
secondPart <- wordCounts[84:166]
thirdPart <- wordCounts[167:249]
fourthPart <- wordCounts[250:332]
# First section of the speech.
firstWords <- names(firstPart)
matched <- match(firstWords, p, nomatch = 0)
m1Counts <- firstPart[which(matched != 0)]
length(m1Counts)
m1Words <- names(m1Counts)
n1Pos <- sum(m1Counts)
n1Pos
matched <- match(firstWords, n, nomatch = 0)
n1Counts <- secondPart[which(matched != 0)]
n1Neg <- sum(n1Counts)
n1Words <- names(n1Counts)
n1Neg
# Second part of the speech.
secondWords <- names(secondPart)
matched <- match(secondWords, p, nomatch = 0)
m2Counts <- secondPart[which(matched != 0)]
length(m2Counts)
m2Words <- names(m2Counts)
n2Pos <- sum(m2Counts)
n2Pos
matched <- match(secondWords, n, nomatch = 0)
n2Counts <- secondPart[which(matched != 0)]
n2Neg <- sum(n2Counts)
n2Words <- names(n2Counts)
n2Neg
# Third part of the speech.
thirdWords <- names(thirdPart)
matched <- match(thirdWords, p, nomatch = 0)
m3Counts <- thirdPart[which(matched != 0)]
length(m3Counts)
m3Words <- names(m3Counts)
n3Pos <- sum(m3Counts)
n3Pos
matched <- match(thirdWords, n, nomatch = 0)
n3Counts <- thirdPart[which(matched != 0)]
n3Neg <- sum(n3Counts)
n3Words <- names(n3Counts)
n3Neg
# Fourth part of the speech.
fourthWords <- names(fourthPart)
matched <- match(fourthWords, p, nomatch = 0)
m4Counts <- fourthPart[which(matched != 0)]
length(m4Counts)
m4Words <- names(m4Counts)
n4Pos <- sum(m4Counts)
n4Pos
matched <- match(fourthWords, n, nomatch = 0)
n4Counts <- fourthPart[which(matched != 0)]
n4Neg <- sum(n4Counts)
n4Words <- names(n4Counts)
n4Neg
# Calculate ratios for each section of the speech.
ratioPos1 <- n1Pos/totalWords
ratioNeg1 <- n1Neg/totalWords
ratio1 <- ratioPos1 / ratioNeg1
ratio1
ratioPos2 <- n2Pos/totalWords
ratioNeg2 <- n2Neg/totalWords
ratio2 <- ratioPos2 / ratioNeg2
ratioPos3 <- n3Pos/totalWords
ratioNeg3 <- n3Neg/totalWords
ratio3 <- ratioPos3 / ratioNeg3
ratioPos4 <- n4Pos/totalWords
ratioNeg4 <- n4Neg/totalWords
ratio4 <- ratioPos4 / ratioNeg4
# Dataframe of ratios.
ratios <- c(ratio1, ratio2, ratio3, ratio4)
# Visualization of positive to negative semantics during the speech.
barplot(ratios, main = "Semantics of Dr. King Speech", sub = "based on speech from: http://www.analytictech.com/mb021/mlk.htm", xlab = "Section of the Speech", ylab = "Positive to Negeative Ratio")
|
/sentimentAnalysis.R
|
no_license
|
WaughB/LIS4761_Data_Mining
|
R
| false
| false
| 4,286
|
r
|
# Brett W.
# LIS4761 - Data Mining
# Sentiment Analysis
# Necessary libraries.
require(readr)
require(tm)
require(XML)
require(ggplot2)
# Read in list of positive and negative words.
pos <- "/home/brett/LIS4761_Data_Mining/data/opinion-lexicon-English/positive-words.txt"
neg <- "/home/brett/LIS4761_Data_Mining/data/opinion-lexicon-English/negative-words.txt"
p <- scan(pos, character(0),sep = "\n")
n <- scan(neg, character(0),sep = "\n")
# Load in the speech file.
# Read and parse HTML file.
doc.html = htmlTreeParse('http://www.analytictech.com/mb021/mlk.htm', useInternal = TRUE)
# Extract all the paragraphs (HTML tag is p, starting at
# the root of the document). Unlist flattens the list to
# create a character vector.
doc.text = unlist(xpathApply(doc.html, '//p', xmlValue))
# Replace all \n by spaces
doc.text = gsub('\\n', ' ', doc.text)
# Replace all \r by spaces
doc.text = gsub('\\r', ' ', doc.text)
# Create the corpus.
words.vec <- VectorSource(doc.text)
words.corpus <- Corpus(words.vec)
words.corpus
# Create the term matrix.
tdm <- TermDocumentMatrix(words.corpus)
tdm
# Create the matrix with counts.
m <- as.matrix(tdm)
wordCounts <- rowSums(m)
# Determine what percentage of the speech was positive.
totalWords <- sum(wordCounts)
words <- names(wordCounts)
matched <- match(words, p, nomatch = 0)
mCounts <- wordCounts[which(matched != 0)]
length(mCounts)
mWords <- names(mCounts)
nPos <- sum(mCounts)
nPos
# Determine which percentage of the speech was negative.
matched <- match(words, n, nomatch = 0)
nCounts <- wordCounts[which(matched != 0)]
nNeg <- sum(nCounts)
nWords <- names(nCounts)
nNeg
length(nCounts)
# Final percentages.
totalWords <- length(words)
ratioPos <- nPos/totalWords
ratioPos
ratioNeg <- nNeg/totalWords
ratioNeg
totalRatio <- nPos/nNeg
totalRatio
# Visualization of positive and negative words by section of the speech.
firstPart <- wordCounts[0:83]
secondPart <- wordCounts[84:166]
thirdPart <- wordCounts[167:249]
fourthPart <- wordCounts[250:332]
# First section of the speech.
firstWords <- names(firstPart)
matched <- match(firstWords, p, nomatch = 0)
m1Counts <- firstPart[which(matched != 0)]
length(m1Counts)
m1Words <- names(m1Counts)
n1Pos <- sum(m1Counts)
n1Pos
matched <- match(firstWords, n, nomatch = 0)
n1Counts <- secondPart[which(matched != 0)]
n1Neg <- sum(n1Counts)
n1Words <- names(n1Counts)
n1Neg
# Second part of the speech.
secondWords <- names(secondPart)
matched <- match(secondWords, p, nomatch = 0)
m2Counts <- secondPart[which(matched != 0)]
length(m2Counts)
m2Words <- names(m2Counts)
n2Pos <- sum(m2Counts)
n2Pos
matched <- match(secondWords, n, nomatch = 0)
n2Counts <- secondPart[which(matched != 0)]
n2Neg <- sum(n2Counts)
n2Words <- names(n2Counts)
n2Neg
# Third part of the speech.
thirdWords <- names(thirdPart)
matched <- match(thirdWords, p, nomatch = 0)
m3Counts <- thirdPart[which(matched != 0)]
length(m3Counts)
m3Words <- names(m3Counts)
n3Pos <- sum(m3Counts)
n3Pos
matched <- match(thirdWords, n, nomatch = 0)
n3Counts <- thirdPart[which(matched != 0)]
n3Neg <- sum(n3Counts)
n3Words <- names(n3Counts)
n3Neg
# Fourth part of the speech.
fourthWords <- names(fourthPart)
matched <- match(fourthWords, p, nomatch = 0)
m4Counts <- fourthPart[which(matched != 0)]
length(m4Counts)
m4Words <- names(m4Counts)
n4Pos <- sum(m4Counts)
n4Pos
matched <- match(fourthWords, n, nomatch = 0)
n4Counts <- fourthPart[which(matched != 0)]
n4Neg <- sum(n4Counts)
n4Words <- names(n4Counts)
n4Neg
# Calculate ratios for each section of the speech.
ratioPos1 <- n1Pos/totalWords
ratioNeg1 <- n1Neg/totalWords
ratio1 <- ratioPos1 / ratioNeg1
ratio1
ratioPos2 <- n2Pos/totalWords
ratioNeg2 <- n2Neg/totalWords
ratio2 <- ratioPos2 / ratioNeg2
ratioPos3 <- n3Pos/totalWords
ratioNeg3 <- n3Neg/totalWords
ratio3 <- ratioPos3 / ratioNeg3
ratioPos4 <- n4Pos/totalWords
ratioNeg4 <- n4Neg/totalWords
ratio4 <- ratioPos4 / ratioNeg4
# Dataframe of ratios.
ratios <- c(ratio1, ratio2, ratio3, ratio4)
# Visualization of positive to negative semantics during the speech.
barplot(ratios, main = "Semantics of Dr. King Speech", sub = "based on speech from: http://www.analytictech.com/mb021/mlk.htm", xlab = "Section of the Speech", ylab = "Positive to Negeative Ratio")
|
#9.9, SVM example
install.packages("kernlab")
library(kernlab)
bank_data = read.csv(datafile, sep',', header=TRUE)
str(bank_data)
summary(bank_data)
nrows <- nrows(bank_data)
cutpoint <- floor(nrows/3*2)
cutpoint
rand <- sample(1:nrows)
bd.train <- bd[rand[1:cutpoint],]
bd.test <- bd[rand[(1:cutpoint+1):nrows],]
#1/3 should be test and 2/3 should be train
str(bd.test)
str(bd.train)
model <- ksvm(y ~., data=db.train)
pred <- predict(model, bd.test)
results <- table(pred, bd.test$y)
#calculate the accuracy %
totalCorrect <- results[3,1] + results [2,2]
totalInTest <- nrows(bd.test)
totalCorrect/totalInTest
#new model that predicts whether education will predict ontime on their load
model.1 <- ksvm(y ~ education, data=db.train)
#training error: 11%
model.1
pred <- predict(model.1, bd.test)
results <- table(pred, bd.test$y)
totalCorrect <- results[3,1] + results [2,2]
totalInTest <- nrows(bd.test)
totalCorrect/totalInTest
#89% correct, error
results
|
/ClassWork/Week 9_Data Mining /9.9 R SVM Example.R
|
no_license
|
josepicon/IST-687
|
R
| false
| false
| 986
|
r
|
#9.9, SVM example
install.packages("kernlab")
library(kernlab)
bank_data = read.csv(datafile, sep',', header=TRUE)
str(bank_data)
summary(bank_data)
nrows <- nrows(bank_data)
cutpoint <- floor(nrows/3*2)
cutpoint
rand <- sample(1:nrows)
bd.train <- bd[rand[1:cutpoint],]
bd.test <- bd[rand[(1:cutpoint+1):nrows],]
#1/3 should be test and 2/3 should be train
str(bd.test)
str(bd.train)
model <- ksvm(y ~., data=db.train)
pred <- predict(model, bd.test)
results <- table(pred, bd.test$y)
#calculate the accuracy %
totalCorrect <- results[3,1] + results [2,2]
totalInTest <- nrows(bd.test)
totalCorrect/totalInTest
#new model that predicts whether education will predict ontime on their load
model.1 <- ksvm(y ~ education, data=db.train)
#training error: 11%
model.1
pred <- predict(model.1, bd.test)
results <- table(pred, bd.test$y)
totalCorrect <- results[3,1] + results [2,2]
totalInTest <- nrows(bd.test)
totalCorrect/totalInTest
#89% correct, error
results
|
# Quantifying the contribution of occupational
# segregation to racial disparities in health:
# A gap-closing perspective
# Ian Lundberg
# ilundberg@princeton.edu
# See run_all.R where this file is called.
# Start with an environment with no objects so this code can be run independently
rm(list = ls(all = TRUE))
# Record printed output in a text file
sink("logs/sample_restrictions.txt")
print("Sample restrictions")
# Record the time that this code file begins.
t0 <- Sys.time()
print("Date and time of code run:")
print(t0)
# Load packages
library(tidyverse)
# Load data. Restrict to years after 2005 for the main analyses in order to use replicate weights.
full_population <- readRDS("intermediate/full_population.Rds") %>%
filter(YEAR >= 2005)
linked <- readRDS("intermediate/linked.Rds") %>%
filter(YEAR >= 2005)
d_onset <- readRDS("intermediate/d_onset.Rds") %>%
filter(YEAR >= 2005)
d <- readRDS("intermediate/d.Rds") %>%
filter(YEAR >= 2005)
# Print sample restrictions
print("Full population size")
print(nrow(full_population))
print("Oversamples")
print(sum(full_population$CPSIDP == 0))
print("From March basic CPS")
print(sum(full_population$CPSIDP != 0))
print("Not linked (person-years)")
print(sum(full_population$CPSIDP != 0) - 2 * nrow(linked))
print("Linked (persons)")
print(nrow(linked))
print("Risk of recovery")
print(sum(linked$lag))
print("History of health limitations")
print(sum(linked$QUITSICK & !linked$lag))
print("Not employed")
print(sum(!linked$employed & !linked$lag & !linked$QUITSICK))
print("Risk of onset")
print(nrow(d_onset))
print("Lack of common support")
print(nrow(d_onset) - nrow(d))
print("Occupation analyses")
print(nrow(d))
print("Occupations that remain after common support restriction:")
print(length(unique(d$OCC2010)))
print("Time spent")
print(difftime(Sys.time(),t0))
# Close the text output file
sink()
# End with an environment with no objects so this code can be run independently
rm(list = ls(all = TRUE))
|
/occupational_segregation_health/sample_restrictions.R
|
no_license
|
ilundberg/replication
|
R
| false
| false
| 1,987
|
r
|
# Quantifying the contribution of occupational
# segregation to racial disparities in health:
# A gap-closing perspective
# Ian Lundberg
# ilundberg@princeton.edu
# See run_all.R where this file is called.
# Start with an environment with no objects so this code can be run independently
rm(list = ls(all = TRUE))
# Record printed output in a text file
sink("logs/sample_restrictions.txt")
print("Sample restrictions")
# Record the time that this code file begins.
t0 <- Sys.time()
print("Date and time of code run:")
print(t0)
# Load packages
library(tidyverse)
# Load data. Restrict to years after 2005 for the main analyses in order to use replicate weights.
full_population <- readRDS("intermediate/full_population.Rds") %>%
filter(YEAR >= 2005)
linked <- readRDS("intermediate/linked.Rds") %>%
filter(YEAR >= 2005)
d_onset <- readRDS("intermediate/d_onset.Rds") %>%
filter(YEAR >= 2005)
d <- readRDS("intermediate/d.Rds") %>%
filter(YEAR >= 2005)
# Print sample restrictions
print("Full population size")
print(nrow(full_population))
print("Oversamples")
print(sum(full_population$CPSIDP == 0))
print("From March basic CPS")
print(sum(full_population$CPSIDP != 0))
print("Not linked (person-years)")
print(sum(full_population$CPSIDP != 0) - 2 * nrow(linked))
print("Linked (persons)")
print(nrow(linked))
print("Risk of recovery")
print(sum(linked$lag))
print("History of health limitations")
print(sum(linked$QUITSICK & !linked$lag))
print("Not employed")
print(sum(!linked$employed & !linked$lag & !linked$QUITSICK))
print("Risk of onset")
print(nrow(d_onset))
print("Lack of common support")
print(nrow(d_onset) - nrow(d))
print("Occupation analyses")
print(nrow(d))
print("Occupations that remain after common support restriction:")
print(length(unique(d$OCC2010)))
print("Time spent")
print(difftime(Sys.time(),t0))
# Close the text output file
sink()
# End with an environment with no objects so this code can be run independently
rm(list = ls(all = TRUE))
|
library(FSAdata)
### Name: RWhitefishIR
### Title: Ages and lengths of Round Whitefish.
### Aliases: RWhitefishIR
### Keywords: datasets
### ** Examples
data(RWhitefishIR)
str(RWhitefishIR)
head(RWhitefishIR)
plot(tl~age,data=RWhitefishIR)
|
/data/genthat_extracted_code/FSAdata/examples/RWhitefishIR.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 248
|
r
|
library(FSAdata)
### Name: RWhitefishIR
### Title: Ages and lengths of Round Whitefish.
### Aliases: RWhitefishIR
### Keywords: datasets
### ** Examples
data(RWhitefishIR)
str(RWhitefishIR)
head(RWhitefishIR)
plot(tl~age,data=RWhitefishIR)
|
#
# Updates parent pids by querying .pid.update command files and passing them to processPIDCommandFile.
# Limits the number of processed command files per one rule run. By default, at most *MaxSuccess
# command files are processed.
#
# Required rulebases: eudat, catchError
#
# Configuration: edit the *Coll INPUT parameter to match the target zone name and the command file collection.
#
# Arguments:
# *Coll [INPUT] The path to the .pid.update files
# *Suffix [INPUT] The suffix of the update files
#
# Author: Jani Heikkinen, CSC
#
update_parent_pid {
*ContInxOld = 1;
*SuccessCount = 0;
*FailedCount = 0;
*MaxSuccess = 100;
*Condition="COLL_NAME = '*Coll' AND DATA_NAME like '*Suffix'";
msiMakeGenQuery("COLL_NAME,DATA_NAME",*Condition,*GenQInp);
msiExecGenQuery(*GenQInp, *GenQOut);
msiGetContInxFromGenQueryOut(*GenQOut,*ContInxNew);
while(*ContInxOld > 0) {
if(*ContInxNew == 0) { *ContInxOld = 0; }
foreach(*GenQOut) {
msiGetValByKey(*GenQOut, "COLL_NAME", *Cname);
msiGetValByKey(*GenQOut, "DATA_NAME", *Dname);
*CF="*Cname/*Dname";
if(errorcode(msiObjStat(*CF,*out)) >= 0) {
processPIDCommandFile(*CF);
*SuccessCount = *SuccessCount + 1;
} else {
logInfo("*CF does not exist");
EUDATProcessErrorUpdatePID(*CF);
*FailedCount = *FailedCount + 1;
}
}
*ContInxOld = *ContInxNew;
if(*SuccessCount > *MaxSuccess) { *ContInxOld = 0; }
if(*ContInxOld > 0) {msiGetMoreRows(*GenQInp,*GenQOut,*ContInxNew);}
}
logInfo("Updated parent PIDs: *SuccessCount . Failed updates: *FailedCount");
}
INPUT *Coll = "/zone/replicate", *Suffix="%%.pid.update"
OUTPUT ruleExecOut
|
/rules/EUDATUpdateParentPids.r
|
permissive
|
NicolasLiampotis/B2SAFE-core
|
R
| false
| false
| 2,081
|
r
|
#
# Updates parent pids by querying .pid.update command files and passing them to processPIDCommandFile.
# Limits the number of processed command files per one rule run. By default, at most *MaxSuccess
# command files are processed.
#
# Required rulebases: eudat, catchError
#
# Configuration: edit the *Coll INPUT parameter to match the target zone name and the command file collection.
#
# Arguments:
# *Coll [INPUT] The path to the .pid.update files
# *Suffix [INPUT] The suffix of the update files
#
# Author: Jani Heikkinen, CSC
#
update_parent_pid {
*ContInxOld = 1;
*SuccessCount = 0;
*FailedCount = 0;
*MaxSuccess = 100;
*Condition="COLL_NAME = '*Coll' AND DATA_NAME like '*Suffix'";
msiMakeGenQuery("COLL_NAME,DATA_NAME",*Condition,*GenQInp);
msiExecGenQuery(*GenQInp, *GenQOut);
msiGetContInxFromGenQueryOut(*GenQOut,*ContInxNew);
while(*ContInxOld > 0) {
if(*ContInxNew == 0) { *ContInxOld = 0; }
foreach(*GenQOut) {
msiGetValByKey(*GenQOut, "COLL_NAME", *Cname);
msiGetValByKey(*GenQOut, "DATA_NAME", *Dname);
*CF="*Cname/*Dname";
if(errorcode(msiObjStat(*CF,*out)) >= 0) {
processPIDCommandFile(*CF);
*SuccessCount = *SuccessCount + 1;
} else {
logInfo("*CF does not exist");
EUDATProcessErrorUpdatePID(*CF);
*FailedCount = *FailedCount + 1;
}
}
*ContInxOld = *ContInxNew;
if(*SuccessCount > *MaxSuccess) { *ContInxOld = 0; }
if(*ContInxOld > 0) {msiGetMoreRows(*GenQInp,*GenQOut,*ContInxNew);}
}
logInfo("Updated parent PIDs: *SuccessCount . Failed updates: *FailedCount");
}
INPUT *Coll = "/zone/replicate", *Suffix="%%.pid.update"
OUTPUT ruleExecOut
|
\name{class-bal.tab.subclass}
\alias{class-bal.tab.subclass}
\title{Using \code{bal.tab()} with Subclassified Data}
\description{
When using \code{bal.tab()} with subclassified data, i.e., data split into subclasses where balance may hold, the output will be different from the standard, non-subclassified case, and there is an additional option for controlling display. This page outlines the outputs and options in this case.
There are two main components of the output of \code{bal.tab()} with subclassified data: the balance within subclasses and the balance summary across subclasses. The within-subclass balance displays essentially are standard balance displays for each subclass, except that only "adjusted" values are available, because the subclassification itself is the adjustment.
The balance summary is, for each variable, like a weighted average of the balance statistics across subclasses. This is computed internally by assigning each individual a weight based on their subclass and treatment group membership and then computing weighted balance statistics as usual with these weights. This summary is the same one would get if subclasses were supplied to the \code{match.strata} argument rather than to \code{subclass}. Because the means and mean differences are additive, their computed values will be weighted averages of the subclass-specific values, but for other statistics, the computed values will not be.
}
\arguments{
There are two arguments for \code{bal.tab()} that relate to subclasses: \code{subclass} and \code{disp.subclass}.
\item{subclass}{For the data.frame and formula methods of \code{bal.tab}, a vector of subclass membership or the name of the variable in \code{data} containing subclass membership. When using subclassification with a function compatible with \pkg{cobalt}, such as \code{matchit()} in \pkg{MatchIt}, this argument can be omitted because the subclass are in the output object.}
\item{disp.subclass}{This is a display option that does not affect computation. If \code{TRUE}, balance for each subclass will be displayed. This can be cumbersome with many subclasses or covariates. The default is \code{FALSE}.}
}
\value{
The output is a \code{bal.tab.subclass} object, which inherits from \code{bal.tab}. It has the following elements:
\item{Subclass.Balance}{A list of data frames containing balance information for each covariate in each subclass.}
\item{Balance.Across.Subclass}{A data frame containing balance statistics for each covariate aggregated across subclasses and for the original sample (i.e., unadjusted). See \code{\link{bal.tab}} for details on what this includes.}
}
\author{
Noah Greifer
}
\seealso{
\code{\link{bal.tab}}, \code{\link{bal.tab.data.frame}}, \code{\link{print.bal.tab.subclass}}
}
|
/man/class-bal.tab.subclass.Rd
|
no_license
|
Zoe187419/cobalt
|
R
| false
| false
| 2,779
|
rd
|
\name{class-bal.tab.subclass}
\alias{class-bal.tab.subclass}
\title{Using \code{bal.tab()} with Subclassified Data}
\description{
When using \code{bal.tab()} with subclassified data, i.e., data split into subclasses where balance may hold, the output will be different from the standard, non-subclassified case, and there is an additional option for controlling display. This page outlines the outputs and options in this case.
There are two main components of the output of \code{bal.tab()} with subclassified data: the balance within subclasses and the balance summary across subclasses. The within-subclass balance displays essentially are standard balance displays for each subclass, except that only "adjusted" values are available, because the subclassification itself is the adjustment.
The balance summary is, for each variable, like a weighted average of the balance statistics across subclasses. This is computed internally by assigning each individual a weight based on their subclass and treatment group membership and then computing weighted balance statistics as usual with these weights. This summary is the same one would get if subclasses were supplied to the \code{match.strata} argument rather than to \code{subclass}. Because the means and mean differences are additive, their computed values will be weighted averages of the subclass-specific values, but for other statistics, the computed values will not be.
}
\arguments{
There are two arguments for \code{bal.tab()} that relate to subclasses: \code{subclass} and \code{disp.subclass}.
\item{subclass}{For the data.frame and formula methods of \code{bal.tab}, a vector of subclass membership or the name of the variable in \code{data} containing subclass membership. When using subclassification with a function compatible with \pkg{cobalt}, such as \code{matchit()} in \pkg{MatchIt}, this argument can be omitted because the subclass are in the output object.}
\item{disp.subclass}{This is a display option that does not affect computation. If \code{TRUE}, balance for each subclass will be displayed. This can be cumbersome with many subclasses or covariates. The default is \code{FALSE}.}
}
\value{
The output is a \code{bal.tab.subclass} object, which inherits from \code{bal.tab}. It has the following elements:
\item{Subclass.Balance}{A list of data frames containing balance information for each covariate in each subclass.}
\item{Balance.Across.Subclass}{A data frame containing balance statistics for each covariate aggregated across subclasses and for the original sample (i.e., unadjusted). See \code{\link{bal.tab}} for details on what this includes.}
}
\author{
Noah Greifer
}
\seealso{
\code{\link{bal.tab}}, \code{\link{bal.tab.data.frame}}, \code{\link{print.bal.tab.subclass}}
}
|
library(tidyverse)
library(cowplot)
library(readxl)
library(janitor)
library(lubridate)
# read in general data ----------------------------------------------------
plate_layout <- read_excel("data-general/chlamee-acclimated-plate-layout.xlsx")
plate_key <- read_excel("data-general/chlamee-acclimated-plate-key.xlsx")
plate_info <- left_join(plate_layout, plate_key, by = c("plate_key")) %>%
rename(column = colum) %>%
unite(row, column, col = "well", remove = FALSE, sep = "") %>%
mutate(column = formatC(column, width = 2, flag = 0)) %>%
mutate(column = str_replace(column, " ", "0")) %>%
unite(col = well, row, column, sep = "") %>%
mutate(population = ifelse(population == "anc 2", "Anc 2", population))
write_csv(plate_info, "data-processed/chlamee-acclimated-plate-info.csv")
# read in RFU data --------------------------------------------------------
RFU_files <- c(list.files("data-raw/synergy-cytation-comparisons/direct_comparisons", full.names = TRUE))
RFU_files <- RFU_files[grepl(".xls", RFU_files)]
names(RFU_files) <- RFU_files %>%
gsub(pattern = ".xls$", replacement = "")
# RFU_files[grepl("104", RFU_files)]
# RFU_files <- RFU_files[!grepl("acc", RFU_files)]
all_plates <- map_df(RFU_files, read_excel, range = "B56:N64", .id = "file_name") %>%
rename(row = X__1) %>%
filter(!grepl("dilution", file_name)) %>%
gather(key = column, value = RFU, 3:14) %>%
unite(row, column, col = "well", remove = FALSE, sep = "") %>%
mutate(column = formatC(column, width = 2, flag = 0)) %>%
mutate(column = str_replace(column, " ", "0")) %>%
unite(col = well, row, column, sep = "") %>%
filter(!is.na(RFU)) %>%
separate(file_name, into = c("path", "machine"), sep = "direct_comparisons/", remove = FALSE) %>%
separate(machine, into = c("instrument", "run"), sep = "-", remove = FALSE)
all_plates %>%
select(instrument, well, RFU) %>%
filter(RFU < 200) %>%
spread(key = instrument, value = RFU) %>%
ggplot(aes(x = cytation, y = synergy)) + geom_point() +
geom_abline(slope = 1, intercept = 0) +
geom_smooth(method = "lm")
all_plates %>%
select(instrument, well, RFU) %>%
filter(RFU < 200) %>%
spread(key = instrument, value = RFU) %>%
lm(synergy ~ cytation, data =.) %>% summary()
all_plates$file_name[[1]]
|
/R-scripts/30_synergy_cytation_comparison.R
|
permissive
|
JoeyBernhardt/ChlamEE-TPC
|
R
| false
| false
| 2,282
|
r
|
library(tidyverse)
library(cowplot)
library(readxl)
library(janitor)
library(lubridate)
# read in general data ----------------------------------------------------
plate_layout <- read_excel("data-general/chlamee-acclimated-plate-layout.xlsx")
plate_key <- read_excel("data-general/chlamee-acclimated-plate-key.xlsx")
plate_info <- left_join(plate_layout, plate_key, by = c("plate_key")) %>%
rename(column = colum) %>%
unite(row, column, col = "well", remove = FALSE, sep = "") %>%
mutate(column = formatC(column, width = 2, flag = 0)) %>%
mutate(column = str_replace(column, " ", "0")) %>%
unite(col = well, row, column, sep = "") %>%
mutate(population = ifelse(population == "anc 2", "Anc 2", population))
write_csv(plate_info, "data-processed/chlamee-acclimated-plate-info.csv")
# read in RFU data --------------------------------------------------------
RFU_files <- c(list.files("data-raw/synergy-cytation-comparisons/direct_comparisons", full.names = TRUE))
RFU_files <- RFU_files[grepl(".xls", RFU_files)]
names(RFU_files) <- RFU_files %>%
gsub(pattern = ".xls$", replacement = "")
# RFU_files[grepl("104", RFU_files)]
# RFU_files <- RFU_files[!grepl("acc", RFU_files)]
all_plates <- map_df(RFU_files, read_excel, range = "B56:N64", .id = "file_name") %>%
rename(row = X__1) %>%
filter(!grepl("dilution", file_name)) %>%
gather(key = column, value = RFU, 3:14) %>%
unite(row, column, col = "well", remove = FALSE, sep = "") %>%
mutate(column = formatC(column, width = 2, flag = 0)) %>%
mutate(column = str_replace(column, " ", "0")) %>%
unite(col = well, row, column, sep = "") %>%
filter(!is.na(RFU)) %>%
separate(file_name, into = c("path", "machine"), sep = "direct_comparisons/", remove = FALSE) %>%
separate(machine, into = c("instrument", "run"), sep = "-", remove = FALSE)
all_plates %>%
select(instrument, well, RFU) %>%
filter(RFU < 200) %>%
spread(key = instrument, value = RFU) %>%
ggplot(aes(x = cytation, y = synergy)) + geom_point() +
geom_abline(slope = 1, intercept = 0) +
geom_smooth(method = "lm")
all_plates %>%
select(instrument, well, RFU) %>%
filter(RFU < 200) %>%
spread(key = instrument, value = RFU) %>%
lm(synergy ~ cytation, data =.) %>% summary()
all_plates$file_name[[1]]
|
#
# maint-dev-ratio.R, 19 Apr 17
#
# Data from:
# An Investigation of the Factors Affecting the Lifecycle Costs of COTS-Based Systems
# Laurence Michael Dunn
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
# What percentage of maint/dev ratio is greater than 1
perc_gt_1=function()
{
samp_ind=sample(num_data, size=num_sys, replace=TRUE)
sys_samp=dme[samp_ind, ]
t=maint_weight*sys_samp$y_maint.effort/sys_samp$dev.effort
# A ratio, so use harmonic mean
t_len=length(t)
return(sapply(c(1, 2, 3, 4, 5, 6, 10),
function(X) length(which(t > X))/t_len))
}
# Sample the systems and return mean maint/dev ratio
sys_samp_mean=function()
{
samp_ind=sample(num_data, size=num_sys, replace=TRUE)
sys_samp=dme[samp_ind, ]
t=maint_weight*sys_samp$y_maint.effort/sys_samp$dev.effort
# A ratio, so use harmonic mean
return(1/mean(1/t))
}
survival_rate=function(years)
{
# Values from fitted system survival data
# yr1_mod=glm(remaining ~ years, data=life, family=poisson)
# return(exp(-0.140833*years))
# yr2_mod=glm(remaining ~ years+I(years^2), data=life, family=poisson)
return(exp(-0.0852127*years-0.0025576*years^2))
# q=nls(remaining ~ a*b^years, data=life, start=list(a=100, b=0.8))
# return(0.8829^years)
}
pal_col=rainbow(2)
dme=read.csv(paste0(ESEUR_dir,"economics/dev-maint-effort.csv.xz"), as.is=TRUE)
y=sort(dme$dev.effort/dme$maint.effort, decreasing=TRUE)
# Harmonic mean of single year maintenance, i.e., d/m
# 1/mean(1/(y*5))
max_years=25
NPV_rate=1.05
# Maintenance over five years
y5_rate=(1-NPV_rate^6)/(1-NPV_rate)-1
# Assume fixed amount per year
dme$y_maint.effort=dme$maint.effort/y5_rate
num_data=nrow(dme)
num_sys=trunc(0.5+num_data/survival_rate(5))
yearly_death=trunc(0.5-diff(num_sys*survival_rate(0:(max_years-1))))
# All remaining systems are replaced in the last year
yearly_death=c(yearly_death, num_sys-sum(yearly_death))
# Assume maintenance happens for a year, then a yes/no termination
# decision is made
# Calculate Net Present Value, yearly discount of NPV_rate
maint_per_year=(1-NPV_rate^(2:(max_years+1)))/(1-NPV_rate)-1
maint_weight=rep(maint_per_year, times=yearly_death)
# maint_weight=rep(1, num_sys)
# mean_rep=replicate(1000, sys_samp_mean())
# Harmonic mean
# 1/mean(1/mean_rep)
gt_1=replicate(1000, perc_gt_1())
rowMeans(gt_1)
|
/ecosystems/maint-dev-ratio.R
|
no_license
|
alanponce/ESEUR-code-data
|
R
| false
| false
| 2,340
|
r
|
#
# maint-dev-ratio.R, 19 Apr 17
#
# Data from:
# An Investigation of the Factors Affecting the Lifecycle Costs of COTS-Based Systems
# Laurence Michael Dunn
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
# What percentage of maint/dev ratio is greater than 1
perc_gt_1=function()
{
samp_ind=sample(num_data, size=num_sys, replace=TRUE)
sys_samp=dme[samp_ind, ]
t=maint_weight*sys_samp$y_maint.effort/sys_samp$dev.effort
# A ratio, so use harmonic mean
t_len=length(t)
return(sapply(c(1, 2, 3, 4, 5, 6, 10),
function(X) length(which(t > X))/t_len))
}
# Sample the systems and return mean maint/dev ratio
sys_samp_mean=function()
{
samp_ind=sample(num_data, size=num_sys, replace=TRUE)
sys_samp=dme[samp_ind, ]
t=maint_weight*sys_samp$y_maint.effort/sys_samp$dev.effort
# A ratio, so use harmonic mean
return(1/mean(1/t))
}
survival_rate=function(years)
{
# Values from fitted system survival data
# yr1_mod=glm(remaining ~ years, data=life, family=poisson)
# return(exp(-0.140833*years))
# yr2_mod=glm(remaining ~ years+I(years^2), data=life, family=poisson)
return(exp(-0.0852127*years-0.0025576*years^2))
# q=nls(remaining ~ a*b^years, data=life, start=list(a=100, b=0.8))
# return(0.8829^years)
}
pal_col=rainbow(2)
dme=read.csv(paste0(ESEUR_dir,"economics/dev-maint-effort.csv.xz"), as.is=TRUE)
y=sort(dme$dev.effort/dme$maint.effort, decreasing=TRUE)
# Harmonic mean of single year maintenance, i.e., d/m
# 1/mean(1/(y*5))
max_years=25
NPV_rate=1.05
# Maintenance over five years
y5_rate=(1-NPV_rate^6)/(1-NPV_rate)-1
# Assume fixed amount per year
dme$y_maint.effort=dme$maint.effort/y5_rate
num_data=nrow(dme)
num_sys=trunc(0.5+num_data/survival_rate(5))
yearly_death=trunc(0.5-diff(num_sys*survival_rate(0:(max_years-1))))
# All remaining systems are replaced in the last year
yearly_death=c(yearly_death, num_sys-sum(yearly_death))
# Assume maintenance happens for a year, then a yes/no termination
# decision is made
# Calculate Net Present Value, yearly discount of NPV_rate
maint_per_year=(1-NPV_rate^(2:(max_years+1)))/(1-NPV_rate)-1
maint_weight=rep(maint_per_year, times=yearly_death)
# maint_weight=rep(1, num_sys)
# mean_rep=replicate(1000, sys_samp_mean())
# Harmonic mean
# 1/mean(1/mean_rep)
gt_1=replicate(1000, perc_gt_1())
rowMeans(gt_1)
|
#Load libraries
library(dplyr)
library(magrittr)
library(brms)
library(parallel)
library(MASS)
##Read in data
# Dataset
cold_DA <- read.csv("data/growth/processed/analysis/COLD_quangen_growth_DA.csv", stringsAsFactors = F)
str(cold_DA)
dim(cold_DA)
#Format date and treatment
cold_DA %<>% mutate(treatment = as.factor(treatment),
liz_id = as.factor(liz_id),
dam_id = as.factor(dam_id),
treatent = as.factor(treatment))
#G matrix
G_VCV <- read.csv("output/G/Ga_SNPready.csv", row.names = 1) %>% as.matrix()
# Set some prirors
priors <- c(prior(normal(0, 10), "Intercept"),
prior(student_t(3, 0, 10), class = "sd"),
prior(student_t(3, 0, 10), class = "sigma"))
# Reviewer 2 Suggestion to Drop M
brm_15_cold <- brm(lnMass ~ 1 +
(1 + z_days_since_hatch + z_days_since_hatch_I2 | gr(F1_Genotype, cov = G_VCV)),
family = gaussian(),
data2 = list(G_VCV = G_VCV),
prior = priors,
data = cold_DA,
chains = 4, cores = 4, iter = 6000, warmup = 1000, thin = 10,
control = list(adapt_delta = 0.98), save_pars = save_pars(all = TRUE))
saveRDS(brm_15_cold, "output/rds/brm_15_cold")
|
/Katana/brms_mod.15.cold.R
|
no_license
|
fontikar/ldeli_growth
|
R
| false
| false
| 1,260
|
r
|
#Load libraries
library(dplyr)
library(magrittr)
library(brms)
library(parallel)
library(MASS)
##Read in data
# Dataset
cold_DA <- read.csv("data/growth/processed/analysis/COLD_quangen_growth_DA.csv", stringsAsFactors = F)
str(cold_DA)
dim(cold_DA)
#Format date and treatment
cold_DA %<>% mutate(treatment = as.factor(treatment),
liz_id = as.factor(liz_id),
dam_id = as.factor(dam_id),
treatent = as.factor(treatment))
#G matrix
G_VCV <- read.csv("output/G/Ga_SNPready.csv", row.names = 1) %>% as.matrix()
# Set some prirors
priors <- c(prior(normal(0, 10), "Intercept"),
prior(student_t(3, 0, 10), class = "sd"),
prior(student_t(3, 0, 10), class = "sigma"))
# Reviewer 2 Suggestion to Drop M
brm_15_cold <- brm(lnMass ~ 1 +
(1 + z_days_since_hatch + z_days_since_hatch_I2 | gr(F1_Genotype, cov = G_VCV)),
family = gaussian(),
data2 = list(G_VCV = G_VCV),
prior = priors,
data = cold_DA,
chains = 4, cores = 4, iter = 6000, warmup = 1000, thin = 10,
control = list(adapt_delta = 0.98), save_pars = save_pars(all = TRUE))
saveRDS(brm_15_cold, "output/rds/brm_15_cold")
|
library(ggplot2)
library(latex2exp)
setwd(Sys.getenv("masters-thesis"))
source("Simulation/Heston.R")
source("Simulation/BlackScholes.R")
source("Simulation/Bursts.R")
source("Simulation/Jumps.R")
source("Estimation/pre-average.R")
source("Estimation/estimates.R")
source("Estimation/estimates_reloaded.R")
set.seed(100)
#################### PARAMETERS THAT DON'T CHANGE ####################
omega2 <- 2.64*10^(-10)*25 #What Mathias wrote
omega <- sqrt(omega2)
K2 <- 0.5 #K2
n <- 23400 /7
mat <- 6.5/(24*7*52) /7 #I cheat a little because it takes too long otherwise. Since it is only evaluated in a single point, it doesn't matter as long as there are enough bandwidths of obs.
dt <- mat/n
Npaths <- 1000
sigma2 <- 0.0457 /25
sigma <- sqrt(sigma2)
#noise_ratio <- omega/(sigma*sqrt(dt))
#Because of lack of memory, it is done in loops
n_loops <- ceiling(Npaths/100)
#List to final values
hd_list <- seq(1,20,by = 1)/ (2*60*24*7*52)
var_bias_temp <- matrix(nrow = n_loops,ncol = length(hd_list))
for (memory in 1:n_loops) {
#memory <- 1
print(memory)
temp_paths <- Npaths / n_loops
BS <- sim.BlackScholes(mean = 0, sd = sigma, omega = omega, Nsteps = n, Npath = temp_paths)
#Create dy
BS$Y <- t(diff(t(as.matrix(BS$Y))))
#################### LOOP OVER H_D ####################
#H_d from 1min to 15min
for (hd in 1:length(hd_list)) {
#hd <- 2
desired_index <- n-1 #Takes last index, so K uses as many points as possible
mu_hat <- sqrt(hd_list[hd])/sqrt(K2*sigma^2)*est.mu.mat(data = BS, hd = hd_list[hd], t.index = desired_index)$mu
var_bias_temp[memory,hd] <- mean(mu_hat^2) #We know it has mean zero, so E(mu^2) is more precise than Var(mu)
}
}
#The total variance is just the mean of the variances (because the loops have same size)
var_bias <- numeric(length = length(hd_list))
for (hd in 1:length(hd_list)) {
var_bias[hd] <- mean(var_bias_temp[,hd])
}
#Compute the the bias from the last noise term
noise <- 1+omega^2 / (hd_list*K2*sigma2)
#4min, 33sek pr. kilometer.
#################### PLOT ####################
hd_minutes <- hd_list*(60*24*7*52)
data <- data.frame(hd = hd_minutes, target = (1:length(hd_minutes)*0)+1, var_bias = var_bias, var_bias_noise = noise)
ggplot() +
geom_line(data=data, aes(x=hd, y=var_bias, col = "Var_mu"), size = 1) +
geom_line(data=data, aes(x=hd, y=var_bias_noise, col = "var-noise"), size = 1) +
geom_line(data=data, aes(x=hd, y=target, col = "1"), size = 1) +
xlab(TeX("Bandwidth in minutes")) + ylab("Value") +
ggtitle('Bias of drift estimator') +
theme(plot.title = element_text(hjust = 0.5, size = 14)) +
scale_color_discrete(name = "Expression",
labels = unname(TeX(
c("1",'Var $\\left( \\frac{\\sqrt{h_n} \\mu_t^n}{\\sqrt{K_2 \\sigma^2}} \\right)$',
"$1+\\frac{\\omega^2}{h_n K_2 \\sigma^2$"))))
|
/Figures2/08_bias_of_mu2.R
|
no_license
|
SebastianGPedersen/masters-thesis
|
R
| false
| false
| 2,855
|
r
|
library(ggplot2)
library(latex2exp)
setwd(Sys.getenv("masters-thesis"))
source("Simulation/Heston.R")
source("Simulation/BlackScholes.R")
source("Simulation/Bursts.R")
source("Simulation/Jumps.R")
source("Estimation/pre-average.R")
source("Estimation/estimates.R")
source("Estimation/estimates_reloaded.R")
set.seed(100)
#################### PARAMETERS THAT DON'T CHANGE ####################
omega2 <- 2.64*10^(-10)*25 #What Mathias wrote
omega <- sqrt(omega2)
K2 <- 0.5 #K2
n <- 23400 /7
mat <- 6.5/(24*7*52) /7 #I cheat a little because it takes too long otherwise. Since it is only evaluated in a single point, it doesn't matter as long as there are enough bandwidths of obs.
dt <- mat/n
Npaths <- 1000
sigma2 <- 0.0457 /25
sigma <- sqrt(sigma2)
#noise_ratio <- omega/(sigma*sqrt(dt))
#Because of lack of memory, it is done in loops
n_loops <- ceiling(Npaths/100)
#List to final values
hd_list <- seq(1,20,by = 1)/ (2*60*24*7*52)
var_bias_temp <- matrix(nrow = n_loops,ncol = length(hd_list))
for (memory in 1:n_loops) {
#memory <- 1
print(memory)
temp_paths <- Npaths / n_loops
BS <- sim.BlackScholes(mean = 0, sd = sigma, omega = omega, Nsteps = n, Npath = temp_paths)
#Create dy
BS$Y <- t(diff(t(as.matrix(BS$Y))))
#################### LOOP OVER H_D ####################
#H_d from 1min to 15min
for (hd in 1:length(hd_list)) {
#hd <- 2
desired_index <- n-1 #Takes last index, so K uses as many points as possible
mu_hat <- sqrt(hd_list[hd])/sqrt(K2*sigma^2)*est.mu.mat(data = BS, hd = hd_list[hd], t.index = desired_index)$mu
var_bias_temp[memory,hd] <- mean(mu_hat^2) #We know it has mean zero, so E(mu^2) is more precise than Var(mu)
}
}
#The total variance is just the mean of the variances (because the loops have same size)
var_bias <- numeric(length = length(hd_list))
for (hd in 1:length(hd_list)) {
var_bias[hd] <- mean(var_bias_temp[,hd])
}
#Compute the the bias from the last noise term
noise <- 1+omega^2 / (hd_list*K2*sigma2)
#4min, 33sek pr. kilometer.
#################### PLOT ####################
hd_minutes <- hd_list*(60*24*7*52)
data <- data.frame(hd = hd_minutes, target = (1:length(hd_minutes)*0)+1, var_bias = var_bias, var_bias_noise = noise)
ggplot() +
geom_line(data=data, aes(x=hd, y=var_bias, col = "Var_mu"), size = 1) +
geom_line(data=data, aes(x=hd, y=var_bias_noise, col = "var-noise"), size = 1) +
geom_line(data=data, aes(x=hd, y=target, col = "1"), size = 1) +
xlab(TeX("Bandwidth in minutes")) + ylab("Value") +
ggtitle('Bias of drift estimator') +
theme(plot.title = element_text(hjust = 0.5, size = 14)) +
scale_color_discrete(name = "Expression",
labels = unname(TeX(
c("1",'Var $\\left( \\frac{\\sqrt{h_n} \\mu_t^n}{\\sqrt{K_2 \\sigma^2}} \\right)$',
"$1+\\frac{\\omega^2}{h_n K_2 \\sigma^2$"))))
|
#Of the four types of sources indicated by the \color{red}{\verb|type|}type (point, nonpoint, onroad, nonroad) variable, which of these four sources have seen decreases in emissions from 1999–2008 for Baltimore City? Which have seen increases in emissions from 1999–2008?
#Use the ggplot2 plotting system to make a plot answer this question.
library(ggplot2)
library(dplyr)
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
#?download.file
download.file(url, destfile="Data_for_peer_assessment.zip")
#unzip
unzip("Data_for_peer_assessment.zip")
#read files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008.
emission_in_Baltimore<-NEI[NEI$fips=="24510",]
#group by source and year
ems_bal_type_year<- emission_in_Baltimore %>%
group_by(year, type) %>%
summarise(sum_ems=sum(Emissions))
#plot graph
png("plot3.png", width=480, height=480)
ems_bal_type_year %>%
ggplot(aes(year, sum_ems, group = type, colour=type)) + geom_line()
dev.off()
|
/Exploratory-Data-Analysis/Project-2/Plot 3.R
|
no_license
|
Huiying-Tan/datasciencecoursera
|
R
| false
| false
| 1,247
|
r
|
#Of the four types of sources indicated by the \color{red}{\verb|type|}type (point, nonpoint, onroad, nonroad) variable, which of these four sources have seen decreases in emissions from 1999–2008 for Baltimore City? Which have seen increases in emissions from 1999–2008?
#Use the ggplot2 plotting system to make a plot answer this question.
library(ggplot2)
library(dplyr)
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
#?download.file
download.file(url, destfile="Data_for_peer_assessment.zip")
#unzip
unzip("Data_for_peer_assessment.zip")
#read files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008.
emission_in_Baltimore<-NEI[NEI$fips=="24510",]
#group by source and year
ems_bal_type_year<- emission_in_Baltimore %>%
group_by(year, type) %>%
summarise(sum_ems=sum(Emissions))
#plot graph
png("plot3.png", width=480, height=480)
ems_bal_type_year %>%
ggplot(aes(year, sum_ems, group = type, colour=type)) + geom_line()
dev.off()
|
# Justin Singh-Mohudpur
# 8/20/2020
# Daily Exercise 12
library(tidyverse)
library(USAboundaries)
library(sf)
# Map theme edited from:
# https://timogrossenbacher.ch/2016/12/beautiful-thematic-maps-with-ggplot2-only/
# I quite like the default Ubuntu font, so I've used the showtext library to
# automatically download it for use in the theme.
#
# library(showtext)
# font_add_google("Ubuntu")
theme_map <- function(...) {
theme_minimal() +
theme(
text = element_text(family = "Ubuntu", color = "#22211d"),
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
#panel.grid.minor = element_line(color = "#ebebe5", size = 0.2),
#panel.grid.major = element_line(color = "#ebebe5", size = 0.2),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
# plot.background = element_rect(fill = "#f5f5f2", color = NA),
plot.background = element_blank(),
plot.title = element_text(hjust = 0.5, color = "#4e4d47", size = 48),
# panel.background = element_rect(fill = "#f5f5f2", color = NA),
panel.background = element_blank(),
legend.background = element_rect(fill = "#f5f5f2", color = NA),
panel.border = element_blank(),
...
)
}
# Get US States data
USAboundaries::us_states() %>%
filter(!(name %in% c("Puerto Rico", "Alaska", "Hawaii"))) ->
CONUS
CONUS %>%
filter(name == "Colorado") ->
colorado
st_filter(CONUS, colorado, .predicate = st_touches) -> touchesColorado
ggplot() +
geom_sf(data = CONUS, colour = "#c2c2c2") +
geom_sf(data = colorado, fill = '#ff7070', colour = "#c2c2c2") +
geom_sf(data = touchesColorado, fill = '#ff7070', colour = "#c2c2c2", alpha = 0.5) +
geom_label(data = colorado, aes(label = name, geometry = geometry), stat = "sf_coordinates", fill = NA, label.size = NA, size = 8) +
geom_label(data = touchesColorado, aes(label = name, geometry = geometry), stat = "sf_coordinates", fill = NA, label.size = NA, size = 8) +
labs(title = "States touching the border of Colorado") +
theme_map() ->
ggCO
ggsave(ggCO, file = "img/states-touching-colorado.png", height = 8, width = 8, dpi = 300)
|
/R/day-12.R
|
no_license
|
program--/geog176A-daily-exercises
|
R
| false
| false
| 2,371
|
r
|
# Justin Singh-Mohudpur
# 8/20/2020
# Daily Exercise 12
library(tidyverse)
library(USAboundaries)
library(sf)
# Map theme edited from:
# https://timogrossenbacher.ch/2016/12/beautiful-thematic-maps-with-ggplot2-only/
# I quite like the default Ubuntu font, so I've used the showtext library to
# automatically download it for use in the theme.
#
# library(showtext)
# font_add_google("Ubuntu")
theme_map <- function(...) {
theme_minimal() +
theme(
text = element_text(family = "Ubuntu", color = "#22211d"),
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
#panel.grid.minor = element_line(color = "#ebebe5", size = 0.2),
#panel.grid.major = element_line(color = "#ebebe5", size = 0.2),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
# plot.background = element_rect(fill = "#f5f5f2", color = NA),
plot.background = element_blank(),
plot.title = element_text(hjust = 0.5, color = "#4e4d47", size = 48),
# panel.background = element_rect(fill = "#f5f5f2", color = NA),
panel.background = element_blank(),
legend.background = element_rect(fill = "#f5f5f2", color = NA),
panel.border = element_blank(),
...
)
}
# Get US States data
USAboundaries::us_states() %>%
filter(!(name %in% c("Puerto Rico", "Alaska", "Hawaii"))) ->
CONUS
CONUS %>%
filter(name == "Colorado") ->
colorado
st_filter(CONUS, colorado, .predicate = st_touches) -> touchesColorado
ggplot() +
geom_sf(data = CONUS, colour = "#c2c2c2") +
geom_sf(data = colorado, fill = '#ff7070', colour = "#c2c2c2") +
geom_sf(data = touchesColorado, fill = '#ff7070', colour = "#c2c2c2", alpha = 0.5) +
geom_label(data = colorado, aes(label = name, geometry = geometry), stat = "sf_coordinates", fill = NA, label.size = NA, size = 8) +
geom_label(data = touchesColorado, aes(label = name, geometry = geometry), stat = "sf_coordinates", fill = NA, label.size = NA, size = 8) +
labs(title = "States touching the border of Colorado") +
theme_map() ->
ggCO
ggsave(ggCO, file = "img/states-touching-colorado.png", height = 8, width = 8, dpi = 300)
|
# In this script we combine all phenotypes and put them in matrices to be analyzed with plink
try({setwd("/Users/davidhsu/Documents/ukbb")})
try({setwd("/Users/david/Desktop/ukbb/")})
try({setwd('/scratch/PI/euan/projects/ukbb/da_dh/')})
source("auxiliary_functions.R")
library(corrplot)
###############################################
###############################################
############# Helper functions ################
###############################################
###############################################
get_score_from_object<-function(x){
if(class(x)!="numeric"){
return(x[[1]]$residuals)
}
return(x)
}
# get_binary_score_by_NAs<-function(x){
# v = rep(0,length(x))
# names(v) = names(x)
# v[!is.na(x)] = 1
# return(v)
# }
merge_scores_list_into_matrix<-function(l){
allnames = unique(unlist(sapply(l,names)))
m = matrix(NA,nrow=length(allnames),ncol=length(l))
colnames(m) = names(l)
rownames(m) = allnames
for(j in 1:ncol(m)){
x = l[[j]]
m[names(x),j]=x
}
return(m)
}
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
########## Physical fitness scores ############
###############################################
###############################################
# Conservative
file = "fitness_analysis_fitness_vs_covs_lm_objects.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_consv = merge_scores_list_into_matrix(scores)
colnames(scores_mat_consv) = paste(colnames(scores_mat_consv),"_conservative",sep='')
# Simple
file = "fitness_analysis_fitness_vs_covs_lm_objects_simple.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_simp = merge_scores_list_into_matrix(scores)
colnames(scores_mat_simp) = paste(colnames(scores_mat_simp),"_simple",sep='')
final_mat = cbind(scores_mat_simp,scores_mat_consv)
print(dim(final_mat))
corrs = get_pairwise_corrs(final_mat)
corrplot(corrs,order='hclust')
save(final_mat,file="physical_fitness_scores_for_GWAS.RData")
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
########## Physical activity scores ###########
###############################################
###############################################
# Conservative
file = "accelereometry_analysis_score_vs_covs_residuals_conservative.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_consv = merge_scores_list_into_matrix(scores)
colnames(scores_mat_consv) = paste(colnames(scores_mat_consv),"_conservative",sep='')
# Simple
file = "accelereometry_analysis_score_vs_covs_residuals_simple.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_simp = merge_scores_list_into_matrix(scores)
colnames(scores_mat_simp) = paste(colnames(scores_mat_simp),"_simple",sep='')
final_mat = cbind(scores_mat_simp,scores_mat_consv)
print(dim(final_mat))
corrs = get_pairwise_corrs(final_mat[,1:20])
corrplot(corrs,order='hclust')
save(final_mat,file="physical_activity_scores_for_GWAS.RData")
# The categorical data
# Conservative
file = "disc_accl_residual_scores_conservative.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_consv = merge_scores_list_into_matrix(scores)
colnames(scores_mat_consv) = paste(colnames(scores_mat_consv),"_conservative",sep='')
# Simple
file = "disc_accl_residual_scores_simple.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_simp = merge_scores_list_into_matrix(scores)
colnames(scores_mat_simp) = paste(colnames(scores_mat_simp),"_simple",sep='')
final_mat = cbind(scores_mat_simp,scores_mat_consv)
print(dim(final_mat))
corrs = get_pairwise_corrs(final_mat)
corrplot(corrs,order='hclust')
save(final_mat,file="discrete_physical_activity_scores_for_GWAS.RData")
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
############## Additional scores ##############
###############################################
###############################################
# Conservative
file = "additional_scores_vs_covs_lm_objects_vs_covs_lm_objects_conservative.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_consv = merge_scores_list_into_matrix(scores)
colnames(scores_mat_consv) = paste(colnames(scores_mat_consv),"_conservative",sep='')
# Simple
file = "additional_scores_vs_covs_lm_objects_vs_covs_lm_objects_simple.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_simp = merge_scores_list_into_matrix(scores)
colnames(scores_mat_simp) = paste(colnames(scores_mat_simp),"_simple",sep='')
final_mat = cbind(scores_mat_simp,scores_mat_consv)
print(dim(final_mat))
corrs = get_pairwise_corrs(final_mat)
corrplot(corrs,order='hclust')
save(final_mat,file="additional_scores_for_GWAS.RData")
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
################ Some plots ###################
###############################################
###############################################
# Assumes that column names across all matrices are unique
merge_scores_matriceslist_into_matrix<-function(l){
rows = unique(unlist(sapply(l,rownames)))
cols = unlist(sapply(l,colnames))
m = matrix(NA,nrow=length(rows),ncol=length(cols))
colnames(m) = cols
rownames(m) = rows
for(j in 1:length(l)){
x = l[[j]]
m[rownames(x),colnames(x)]=x
}
return(m)
}
library(corrplot)
corrs = get_pairwise_corrs(final_mat)
corrplot(corrs,order='hclust')
# Merge matrices
l = list()
l[["additional"]] = get(load("additional_scores_for_GWAS.RData"))
l[["fitness"]] = get(load("physical_fitness_scores_for_GWAS.RData"))
all_pheno_mat = merge_scores_matriceslist_into_matrix(l)
dim(all_pheno_mat)
all_corrs = get_pairwise_corrs(all_pheno_mat,method="spearman")
all_sizes = get_pairwise_sizes(all_pheno_mat)
colnames(all_corrs) = gsub(colnames(all_corrs),pattern="conservative",replace="consv")
rownames(all_corrs) = gsub(rownames(all_corrs),pattern="conservative",replace="consv")
corrplot(all_corrs,order='hclust',diag=T,tl.cex=0.9,
mar=rep(2.5,4),tl.col="black",tl.srt=65,cl.ratio=0.1,bg="gray")
corrplot(all_corrs,order='hclust',type='upper',tl.pos="lt",diag=T,tl.cex=0.9,
mar=rep(2.5,4),tl.col="black",tl.srt=65,cl.ratio=0.1,bg="gray")
col2 <- colorRampPalette(c("green","darkgreen"))
corrplot(log(all_sizes,base=10),add=T,type='lower',method='number',is.corr=F,
tl.pos="n",diag=F,number.cex=0.65,number.digits = 1,cl.ratio=0.1,
bg="black",col=col2(20),cl.length=5,cl.lim=c(4.8,5.8))
save(all_pheno_mat,file="all_pheno_mat_for_GWAS.RData")
# Sanity checks
table(is.na(all_pheno_mat[,15]))
table(is.na(l[[2]][,5]))
x1 = all_pheno_mat[,15]
x2 = l[[2]][,5]
inter = intersect(names(x1),names(x2))
all(x1[inter]==x2[inter],na.rm=T)
all(is.na(x1[inter])==is.na(x2[inter]))
plot(x1[inter],x2[inter])
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
############## Correct PCs ####################
###############################################
###############################################
source("auxiliary_functions.R")
scores_matrix = get(load("physical_fitness_scores_for_GWAS.RData"))
# Load the genetic PCA data and print the tables for the GWAS
# TODO:
# 1. Discretize?
# 2. What to do with NAs?
genetic_pca_data_path = "plink/may16.eigenvec"
genetic_pca_data = read.delim(genetic_pca_data_path,sep=" ",header = F)
dim(genetic_pca_data)
gwas_data_samples = as.character(genetic_pca_data[,2])
gwas_data_pcs = genetic_pca_data[,3:4]
colnames(gwas_data_pcs) = paste("PC",1:ncol(gwas_data_pcs),sep="")
gwas_data_residuals = c()
# Correct for the PCs
for(nn in colnames(scores_matrix)){
curr_res = scores_matrix[,nn]
curr_res = curr_res[gwas_data_samples]
names(curr_res) = gwas_data_samples
NA_samples = gwas_data_samples[is.na(curr_res)]
gwas_data_residuals = cbind(gwas_data_residuals,curr_res)
}
colnames(gwas_data_residuals) = paste("Residuals_",colnames(scores_matrix),sep="")
colnames(gwas_data_residuals) = gsub(colnames(gwas_data_residuals),pattern=" ",replace="_")
gwas_data = cbind(gwas_data_residuals,as.matrix(gwas_data_pcs))
corrs = get_pairwise_corrs(gwas_data)
save(gwas_data,file="July19_2017_gwas_data_table.RData")
write.table(gwas_data,file = "July19_2017_fitness_scores_gwas_data_table.txt",sep="\t",quote=F)
library(corrplot)
corrplot(corrs,order='hclust')
|
/metadata_analysis/combine_phenotypes_for_plink.R
|
no_license
|
HongyuanWu/ukbb-1
|
R
| false
| false
| 9,726
|
r
|
# In this script we combine all phenotypes and put them in matrices to be analyzed with plink
try({setwd("/Users/davidhsu/Documents/ukbb")})
try({setwd("/Users/david/Desktop/ukbb/")})
try({setwd('/scratch/PI/euan/projects/ukbb/da_dh/')})
source("auxiliary_functions.R")
library(corrplot)
###############################################
###############################################
############# Helper functions ################
###############################################
###############################################
get_score_from_object<-function(x){
if(class(x)!="numeric"){
return(x[[1]]$residuals)
}
return(x)
}
# get_binary_score_by_NAs<-function(x){
# v = rep(0,length(x))
# names(v) = names(x)
# v[!is.na(x)] = 1
# return(v)
# }
merge_scores_list_into_matrix<-function(l){
allnames = unique(unlist(sapply(l,names)))
m = matrix(NA,nrow=length(allnames),ncol=length(l))
colnames(m) = names(l)
rownames(m) = allnames
for(j in 1:ncol(m)){
x = l[[j]]
m[names(x),j]=x
}
return(m)
}
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
########## Physical fitness scores ############
###############################################
###############################################
# Conservative
file = "fitness_analysis_fitness_vs_covs_lm_objects.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_consv = merge_scores_list_into_matrix(scores)
colnames(scores_mat_consv) = paste(colnames(scores_mat_consv),"_conservative",sep='')
# Simple
file = "fitness_analysis_fitness_vs_covs_lm_objects_simple.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_simp = merge_scores_list_into_matrix(scores)
colnames(scores_mat_simp) = paste(colnames(scores_mat_simp),"_simple",sep='')
final_mat = cbind(scores_mat_simp,scores_mat_consv)
print(dim(final_mat))
corrs = get_pairwise_corrs(final_mat)
corrplot(corrs,order='hclust')
save(final_mat,file="physical_fitness_scores_for_GWAS.RData")
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
########## Physical activity scores ###########
###############################################
###############################################
# Conservative
file = "accelereometry_analysis_score_vs_covs_residuals_conservative.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_consv = merge_scores_list_into_matrix(scores)
colnames(scores_mat_consv) = paste(colnames(scores_mat_consv),"_conservative",sep='')
# Simple
file = "accelereometry_analysis_score_vs_covs_residuals_simple.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_simp = merge_scores_list_into_matrix(scores)
colnames(scores_mat_simp) = paste(colnames(scores_mat_simp),"_simple",sep='')
final_mat = cbind(scores_mat_simp,scores_mat_consv)
print(dim(final_mat))
corrs = get_pairwise_corrs(final_mat[,1:20])
corrplot(corrs,order='hclust')
save(final_mat,file="physical_activity_scores_for_GWAS.RData")
# The categorical data
# Conservative
file = "disc_accl_residual_scores_conservative.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_consv = merge_scores_list_into_matrix(scores)
colnames(scores_mat_consv) = paste(colnames(scores_mat_consv),"_conservative",sep='')
# Simple
file = "disc_accl_residual_scores_simple.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_simp = merge_scores_list_into_matrix(scores)
colnames(scores_mat_simp) = paste(colnames(scores_mat_simp),"_simple",sep='')
final_mat = cbind(scores_mat_simp,scores_mat_consv)
print(dim(final_mat))
corrs = get_pairwise_corrs(final_mat)
corrplot(corrs,order='hclust')
save(final_mat,file="discrete_physical_activity_scores_for_GWAS.RData")
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
############## Additional scores ##############
###############################################
###############################################
# Conservative
file = "additional_scores_vs_covs_lm_objects_vs_covs_lm_objects_conservative.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_consv = merge_scores_list_into_matrix(scores)
colnames(scores_mat_consv) = paste(colnames(scores_mat_consv),"_conservative",sep='')
# Simple
file = "additional_scores_vs_covs_lm_objects_vs_covs_lm_objects_simple.RData"
obj = get(load(file))
scores = lapply(obj,get_score_from_object)
scores_mat_simp = merge_scores_list_into_matrix(scores)
colnames(scores_mat_simp) = paste(colnames(scores_mat_simp),"_simple",sep='')
final_mat = cbind(scores_mat_simp,scores_mat_consv)
print(dim(final_mat))
corrs = get_pairwise_corrs(final_mat)
corrplot(corrs,order='hclust')
save(final_mat,file="additional_scores_for_GWAS.RData")
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
################ Some plots ###################
###############################################
###############################################
# Assumes that column names across all matrices are unique
merge_scores_matriceslist_into_matrix<-function(l){
rows = unique(unlist(sapply(l,rownames)))
cols = unlist(sapply(l,colnames))
m = matrix(NA,nrow=length(rows),ncol=length(cols))
colnames(m) = cols
rownames(m) = rows
for(j in 1:length(l)){
x = l[[j]]
m[rownames(x),colnames(x)]=x
}
return(m)
}
library(corrplot)
corrs = get_pairwise_corrs(final_mat)
corrplot(corrs,order='hclust')
# Merge matrices
l = list()
l[["additional"]] = get(load("additional_scores_for_GWAS.RData"))
l[["fitness"]] = get(load("physical_fitness_scores_for_GWAS.RData"))
all_pheno_mat = merge_scores_matriceslist_into_matrix(l)
dim(all_pheno_mat)
all_corrs = get_pairwise_corrs(all_pheno_mat,method="spearman")
all_sizes = get_pairwise_sizes(all_pheno_mat)
colnames(all_corrs) = gsub(colnames(all_corrs),pattern="conservative",replace="consv")
rownames(all_corrs) = gsub(rownames(all_corrs),pattern="conservative",replace="consv")
corrplot(all_corrs,order='hclust',diag=T,tl.cex=0.9,
mar=rep(2.5,4),tl.col="black",tl.srt=65,cl.ratio=0.1,bg="gray")
corrplot(all_corrs,order='hclust',type='upper',tl.pos="lt",diag=T,tl.cex=0.9,
mar=rep(2.5,4),tl.col="black",tl.srt=65,cl.ratio=0.1,bg="gray")
col2 <- colorRampPalette(c("green","darkgreen"))
corrplot(log(all_sizes,base=10),add=T,type='lower',method='number',is.corr=F,
tl.pos="n",diag=F,number.cex=0.65,number.digits = 1,cl.ratio=0.1,
bg="black",col=col2(20),cl.length=5,cl.lim=c(4.8,5.8))
save(all_pheno_mat,file="all_pheno_mat_for_GWAS.RData")
# Sanity checks
table(is.na(all_pheno_mat[,15]))
table(is.na(l[[2]][,5]))
x1 = all_pheno_mat[,15]
x2 = l[[2]][,5]
inter = intersect(names(x1),names(x2))
all(x1[inter]==x2[inter],na.rm=T)
all(is.na(x1[inter])==is.na(x2[inter]))
plot(x1[inter],x2[inter])
###############################################
###############################################
#################### End ######################
###############################################
###############################################
###############################################
###############################################
############## Correct PCs ####################
###############################################
###############################################
source("auxiliary_functions.R")
scores_matrix = get(load("physical_fitness_scores_for_GWAS.RData"))
# Load the genetic PCA data and print the tables for the GWAS
# TODO:
# 1. Discretize?
# 2. What to do with NAs?
genetic_pca_data_path = "plink/may16.eigenvec"
genetic_pca_data = read.delim(genetic_pca_data_path,sep=" ",header = F)
dim(genetic_pca_data)
gwas_data_samples = as.character(genetic_pca_data[,2])
gwas_data_pcs = genetic_pca_data[,3:4]
colnames(gwas_data_pcs) = paste("PC",1:ncol(gwas_data_pcs),sep="")
gwas_data_residuals = c()
# Correct for the PCs
for(nn in colnames(scores_matrix)){
curr_res = scores_matrix[,nn]
curr_res = curr_res[gwas_data_samples]
names(curr_res) = gwas_data_samples
NA_samples = gwas_data_samples[is.na(curr_res)]
gwas_data_residuals = cbind(gwas_data_residuals,curr_res)
}
colnames(gwas_data_residuals) = paste("Residuals_",colnames(scores_matrix),sep="")
colnames(gwas_data_residuals) = gsub(colnames(gwas_data_residuals),pattern=" ",replace="_")
gwas_data = cbind(gwas_data_residuals,as.matrix(gwas_data_pcs))
corrs = get_pairwise_corrs(gwas_data)
save(gwas_data,file="July19_2017_gwas_data_table.RData")
write.table(gwas_data,file = "July19_2017_fitness_scores_gwas_data_table.txt",sep="\t",quote=F)
library(corrplot)
corrplot(corrs,order='hclust')
|
# pryr: Tools for Computing on the Language.
# Useful tools to pry back the covers of R and understand the language at a deeper level.
install.packages("pryr")
library(pryr)
library(ggplot2)
library(sf)
list.files()
download.file("http://biogeo.ucdavis.edu/data/diva/adm/IRL_adm.zip",
destfile = "counties.zip")
unzip("counties.zip")
counties <- sf::read_sf("IRL_adm1.shp")
ggplot(counties)+geom_sf()
pryr::object_size(counties)
counties_1k <- sf::st_simplify(counties, preserveTopology = TRUE, dTolerance = 1000)
pryr::object_size(counties_1k)
plot(counties_1k)
counties_10k <- sf::st_simplify(counties, preserveTopology = TRUE, dTolerance = 10000)
pryr::object_size(counties_10k)
plot(counties_10k)
counties_2k <- sf::st_simplify(counties, preserveTopology = TRUE, dTolerance = 2000)
pryr::object_size(counties_2k)
plot(counties_2k)
|
/sf/import.r
|
no_license
|
DragonflyStats/GISwithR_archive
|
R
| false
| false
| 876
|
r
|
# pryr: Tools for Computing on the Language.
# Useful tools to pry back the covers of R and understand the language at a deeper level.
install.packages("pryr")
library(pryr)
library(ggplot2)
library(sf)
list.files()
download.file("http://biogeo.ucdavis.edu/data/diva/adm/IRL_adm.zip",
destfile = "counties.zip")
unzip("counties.zip")
counties <- sf::read_sf("IRL_adm1.shp")
ggplot(counties)+geom_sf()
pryr::object_size(counties)
counties_1k <- sf::st_simplify(counties, preserveTopology = TRUE, dTolerance = 1000)
pryr::object_size(counties_1k)
plot(counties_1k)
counties_10k <- sf::st_simplify(counties, preserveTopology = TRUE, dTolerance = 10000)
pryr::object_size(counties_10k)
plot(counties_10k)
counties_2k <- sf::st_simplify(counties, preserveTopology = TRUE, dTolerance = 2000)
pryr::object_size(counties_2k)
plot(counties_2k)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stf_help_view.R
\name{stf_help_view}
\alias{stf_help_view}
\title{Function stf_help_view}
\usage{
stf_help_view()
}
\value{
This function has only side effect, it doesn't return anything.
}
\description{
This function displays STF's help page on Rstudio viewer pane.
}
\examples{
\dontrun{
stf_help_view()
}
}
\keyword{help}
\keyword{stf,}
|
/man/stf_help_view.Rd
|
permissive
|
jjesusfilho/stfstj
|
R
| false
| true
| 418
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stf_help_view.R
\name{stf_help_view}
\alias{stf_help_view}
\title{Function stf_help_view}
\usage{
stf_help_view()
}
\value{
This function has only side effect, it doesn't return anything.
}
\description{
This function displays STF's help page on Rstudio viewer pane.
}
\examples{
\dontrun{
stf_help_view()
}
}
\keyword{help}
\keyword{stf,}
|
library(pcalg)
## source("/u/kalischm/research/packages/LINGAM/R/lingamFuns.R")
##--> showProc.time(), assertError(), relErrV(), ...
R.home(); sessionInfo() # helping package maintainers to debug ...
.libPaths()
packageDescription("pcalg")
packageDescription("Matrix")
cat("doExtras:", (doExtras <- pcalg:::doExtras()), "\n")
##################################################
## Exp 1
##################################################
set.seed(123)
n <- 500
eps1 <- sign(rnorm(n)) * sqrt(abs(rnorm(n)))
eps2 <- runif(n) - 0.5 # ~ U[-1/2, 1/2]
X <- cbind(A = eps1 + 0.9*eps2,
B = eps2)
## x1 <- x2
## adjacency matrix:
## 0 0
## 1 0
(trueDAG <- rbind(c(0,0),
c(1,0)))
estDAG <- LINGAM(X, verbose = TRUE)
stopifnot(as.integer(estDAG$ Adj) == trueDAG,
all.equal (estDAG$ B, cbind(0, c(0.878188262685122, 0))))
if(doExtras) {
## using pcalg
n <- nrow(X)
V <- LETTERS[1:ncol(X)] # labels aka node names
## estimate CPDAG
pc.fit <- pc(suffStat = list(C = cor(X), n = n),
indepTest = gaussCItest, ## indep.test: partial correlations
alpha = 0.01, labels = colnames(X))
if (require(Rgraphviz)) {
plot(pc.fit, main = "Estimated CPDAG")
}
}
##################################################
## Exp 2
##################################################
set.seed(123)
n <- 500
eps1 <- sign(rnorm(n)) * sqrt(abs(rnorm(n)))
eps2 <- runif(n) - 0.5
eps3 <- sign(rnorm(n)) * abs(rnorm(n))^(1/3)
eps4 <- rnorm(n)^2
x1 <- eps1 + 0.9*eps2
x2 <- eps2
x3 <- 0.8*eps2 + eps3
x4 <- -0.9*x3 - x1 + eps4
X <- cbind(U = x1, V = x2, W = x3, Y = x4)
trueDAG <- cbind(c(0,1,0,0),c(0,0,0,0),c(0,1,0,0),c(1,0,1,0))
## x4 <- x3 <- x2 -> x1 -> x4
## adjacency matrix:
## 0 0 0 1
## 1 0 1 0
## 0 0 0 1
## 0 0 0 0
estDAG <- LINGAM(X, verbose = TRUE)
B.est <- rbind(c(0, 0.986119553, 0, 0),
c(0, 0, 0, 0),
c(0, 0.89198226, 0, 0),
c(-0.987301824, 0, -0.890961952, 0))
stopifnot(as.integer(estDAG$Adj) == trueDAG,
all.equal(estDAG$B, B.est, tol=1e-9))
if(doExtras) {
## using pcalg
n <- nrow(X)
V <- colnames(X) # labels aka node names
## estimate CPDAG
pc.fit <- pc(suffStat = list(C = cor(X), n = n),
indepTest = gaussCItest, ## indep.test: partial correlations
alpha=0.01, labels = V, verbose = FALSE)
if (require(Rgraphviz)) {
plot(pc.fit, main = "Estimated CPDAG")
}
}
## if(!doExtras && !interactive()) quit("no")
### More tests for higher dimensions
### p = 8 -------- Example 3 -----------
set.seed(127)
n <- 2000
x1 <- eps1 <- sign(rnorm(n)) * sqrt(abs(rnorm(n)))
x2 <- eps2 <- runif(n) - 0.5
x3 <- eps3 <- sign(rnorm(n)) * abs(rnorm(n))^(1/3)
x4 <- eps4 <- rnorm(n)^2
Z <- rnorm(n); eps5 <- sign(Z) * sqrt(abs(Z))
Z <- rnorm(n); eps6 <- sign(Z) * sqrt(abs(Z))
Z <- rnorm(n); eps7 <- sign(Z) * sqrt(abs(Z))
Z <- rnorm(n); eps8 <- sign(Z) * sqrt(abs(Z))
x5 <- 7/8*x1 - 3/4*x2 + 3/4*x3 + eps5
x6 <- 0.8*x4 + eps6
x7 <- 3/4*x5 - 7/8*x6 + eps7
x8 <- .9*x7 + eps8
X <- cbind(x1,x2,x3,x4,x5,x6,x7,x8, deparse.level = 2)
## (x1, x2, x3) -> x5 -> x7 <- x6 <- x4; x7 -> x8
## adjacency matrix:
## 1 2 3 4 5 6 7 8
## x1 . . . . 1 . . .
## x2 . . . . 1 . . .
## x3 . . . . 1 . . .
## x4 . . . . . 1 . .
## x5 . . . . . . 1 .
## x6 . . . . . . 1 .
## x7 . . . . . . . 1
## x8 . . . . . . . .
## true DAG :
. <- 0
trDAG3 <- rbind(
c(., ., ., ., 1, ., ., .),
c(., ., ., ., 1, ., ., .),
c(., ., ., ., 1, ., ., .),
c(., ., ., ., ., 1, ., .),
c(., ., ., ., ., ., 1, .),
c(., ., ., ., ., ., 1, .),
c(., ., ., ., ., ., ., 1),
c(., ., ., ., ., ., ., .))
estB.3 <- rbind(
c(., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., .),
c(.831899433, -.737954104, 0.725137273, ., ., ., ., .),
c(., ., ., 0.788185348, ., ., ., .),
c(., ., ., ., 0.774490692, -0.886143314, ., .),
c(., ., ., ., ., ., 0.900617843, .))
eDAG3 <- LINGAM(X, verbose = TRUE)
stopifnot(trDAG3 == eDAG3$Adj,
with(eDAG3, all(t(B != 0) == Adj)),
all.equal(eDAG3$B, estB.3, tol=1e-9))
### p = 10 -------- Example 4 -----------
### using same x1..,x4, and, eps5 .. eps8 as in Ex. 3
Z <- rnorm(n); eps9 <- sign(Z) * abs(Z)^(1/3)
Z <- rnorm(n); eps10 <- sign(Z) * abs(Z)^0.25
x5 <- 7/8*x1 - 3/4*x2 + eps5
x6 <- 0.8*x2 - 7/8*x3 + eps6
x7 <- -7/8*x4 + eps7
x8 <- 0.9*x2 - 0.8*x5 + eps8
x9 <- -3/4*x6 + 7/8*x7 + eps9
x10 <- 3/4*x6 + 0.5*x8 +0.9*x9 + eps10
X <- cbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10, deparse.level = 2)
## true DAG :
. <- 0
trDAG4 <- rbind(
# 1 2 3 4 5 6 7 8 9 0
c(., ., ., ., 1, ., ., ., ., .), # 1
c(., ., ., ., 1, 1, ., 1, ., .), # 2
c(., ., ., ., ., 1, ., ., ., .), # 3
c(., ., ., ., ., ., 1, ., ., .), # 4
c(., ., ., ., ., ., ., 1, ., .), # 5
c(., ., ., ., ., ., ., ., 1, 1), # 6
c(., ., ., ., ., ., ., ., 1, .), # 7
c(., ., ., ., ., ., ., ., ., 1), # 8
c(., ., ., ., ., ., ., ., ., 1), # 9
c(., ., ., ., ., ., ., ., ., .)) # 10
estB.4 <- rbind(
c(., ., ., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., ., ., .),
c(0.831899433, -0.737954104, ., ., ., ., ., ., ., .),
c(., 0.687919607, -0.863361084, ., ., ., ., ., ., .),
c(., ., ., -0.878305407, ., ., ., ., ., .),
c(., 0.864092647, ., ., -0.77902333, ., ., ., ., .),
c(., ., ., ., ., -0.780116888, 0.929828083, ., ., .),
c(., ., ., ., ., 0.72436897, ., 0.502210828, 0.913644804, .))
eDAG4 <- LINGAM(X, verbose = TRUE)
stopifnot(trDAG4 == eDAG4$Adj,
with(eDAG4, all(t(B != 0) == Adj)),
all.equal(eDAG4$B, estB.4, tol=1e-9))
|
/pcalg/tests/test_LINGAM.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 6,127
|
r
|
library(pcalg)
## source("/u/kalischm/research/packages/LINGAM/R/lingamFuns.R")
##--> showProc.time(), assertError(), relErrV(), ...
R.home(); sessionInfo() # helping package maintainers to debug ...
.libPaths()
packageDescription("pcalg")
packageDescription("Matrix")
cat("doExtras:", (doExtras <- pcalg:::doExtras()), "\n")
##################################################
## Exp 1
##################################################
set.seed(123)
n <- 500
eps1 <- sign(rnorm(n)) * sqrt(abs(rnorm(n)))
eps2 <- runif(n) - 0.5 # ~ U[-1/2, 1/2]
X <- cbind(A = eps1 + 0.9*eps2,
B = eps2)
## x1 <- x2
## adjacency matrix:
## 0 0
## 1 0
(trueDAG <- rbind(c(0,0),
c(1,0)))
estDAG <- LINGAM(X, verbose = TRUE)
stopifnot(as.integer(estDAG$ Adj) == trueDAG,
all.equal (estDAG$ B, cbind(0, c(0.878188262685122, 0))))
if(doExtras) {
## using pcalg
n <- nrow(X)
V <- LETTERS[1:ncol(X)] # labels aka node names
## estimate CPDAG
pc.fit <- pc(suffStat = list(C = cor(X), n = n),
indepTest = gaussCItest, ## indep.test: partial correlations
alpha = 0.01, labels = colnames(X))
if (require(Rgraphviz)) {
plot(pc.fit, main = "Estimated CPDAG")
}
}
##################################################
## Exp 2
##################################################
set.seed(123)
n <- 500
eps1 <- sign(rnorm(n)) * sqrt(abs(rnorm(n)))
eps2 <- runif(n) - 0.5
eps3 <- sign(rnorm(n)) * abs(rnorm(n))^(1/3)
eps4 <- rnorm(n)^2
x1 <- eps1 + 0.9*eps2
x2 <- eps2
x3 <- 0.8*eps2 + eps3
x4 <- -0.9*x3 - x1 + eps4
X <- cbind(U = x1, V = x2, W = x3, Y = x4)
trueDAG <- cbind(c(0,1,0,0),c(0,0,0,0),c(0,1,0,0),c(1,0,1,0))
## x4 <- x3 <- x2 -> x1 -> x4
## adjacency matrix:
## 0 0 0 1
## 1 0 1 0
## 0 0 0 1
## 0 0 0 0
estDAG <- LINGAM(X, verbose = TRUE)
B.est <- rbind(c(0, 0.986119553, 0, 0),
c(0, 0, 0, 0),
c(0, 0.89198226, 0, 0),
c(-0.987301824, 0, -0.890961952, 0))
stopifnot(as.integer(estDAG$Adj) == trueDAG,
all.equal(estDAG$B, B.est, tol=1e-9))
if(doExtras) {
## using pcalg
n <- nrow(X)
V <- colnames(X) # labels aka node names
## estimate CPDAG
pc.fit <- pc(suffStat = list(C = cor(X), n = n),
indepTest = gaussCItest, ## indep.test: partial correlations
alpha=0.01, labels = V, verbose = FALSE)
if (require(Rgraphviz)) {
plot(pc.fit, main = "Estimated CPDAG")
}
}
## if(!doExtras && !interactive()) quit("no")
### More tests for higher dimensions
### p = 8 -------- Example 3 -----------
set.seed(127)
n <- 2000
x1 <- eps1 <- sign(rnorm(n)) * sqrt(abs(rnorm(n)))
x2 <- eps2 <- runif(n) - 0.5
x3 <- eps3 <- sign(rnorm(n)) * abs(rnorm(n))^(1/3)
x4 <- eps4 <- rnorm(n)^2
Z <- rnorm(n); eps5 <- sign(Z) * sqrt(abs(Z))
Z <- rnorm(n); eps6 <- sign(Z) * sqrt(abs(Z))
Z <- rnorm(n); eps7 <- sign(Z) * sqrt(abs(Z))
Z <- rnorm(n); eps8 <- sign(Z) * sqrt(abs(Z))
x5 <- 7/8*x1 - 3/4*x2 + 3/4*x3 + eps5
x6 <- 0.8*x4 + eps6
x7 <- 3/4*x5 - 7/8*x6 + eps7
x8 <- .9*x7 + eps8
X <- cbind(x1,x2,x3,x4,x5,x6,x7,x8, deparse.level = 2)
## (x1, x2, x3) -> x5 -> x7 <- x6 <- x4; x7 -> x8
## adjacency matrix:
## 1 2 3 4 5 6 7 8
## x1 . . . . 1 . . .
## x2 . . . . 1 . . .
## x3 . . . . 1 . . .
## x4 . . . . . 1 . .
## x5 . . . . . . 1 .
## x6 . . . . . . 1 .
## x7 . . . . . . . 1
## x8 . . . . . . . .
## true DAG :
. <- 0
trDAG3 <- rbind(
c(., ., ., ., 1, ., ., .),
c(., ., ., ., 1, ., ., .),
c(., ., ., ., 1, ., ., .),
c(., ., ., ., ., 1, ., .),
c(., ., ., ., ., ., 1, .),
c(., ., ., ., ., ., 1, .),
c(., ., ., ., ., ., ., 1),
c(., ., ., ., ., ., ., .))
estB.3 <- rbind(
c(., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., .),
c(.831899433, -.737954104, 0.725137273, ., ., ., ., .),
c(., ., ., 0.788185348, ., ., ., .),
c(., ., ., ., 0.774490692, -0.886143314, ., .),
c(., ., ., ., ., ., 0.900617843, .))
eDAG3 <- LINGAM(X, verbose = TRUE)
stopifnot(trDAG3 == eDAG3$Adj,
with(eDAG3, all(t(B != 0) == Adj)),
all.equal(eDAG3$B, estB.3, tol=1e-9))
### p = 10 -------- Example 4 -----------
### using same x1..,x4, and, eps5 .. eps8 as in Ex. 3
Z <- rnorm(n); eps9 <- sign(Z) * abs(Z)^(1/3)
Z <- rnorm(n); eps10 <- sign(Z) * abs(Z)^0.25
x5 <- 7/8*x1 - 3/4*x2 + eps5
x6 <- 0.8*x2 - 7/8*x3 + eps6
x7 <- -7/8*x4 + eps7
x8 <- 0.9*x2 - 0.8*x5 + eps8
x9 <- -3/4*x6 + 7/8*x7 + eps9
x10 <- 3/4*x6 + 0.5*x8 +0.9*x9 + eps10
X <- cbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10, deparse.level = 2)
## true DAG :
. <- 0
trDAG4 <- rbind(
# 1 2 3 4 5 6 7 8 9 0
c(., ., ., ., 1, ., ., ., ., .), # 1
c(., ., ., ., 1, 1, ., 1, ., .), # 2
c(., ., ., ., ., 1, ., ., ., .), # 3
c(., ., ., ., ., ., 1, ., ., .), # 4
c(., ., ., ., ., ., ., 1, ., .), # 5
c(., ., ., ., ., ., ., ., 1, 1), # 6
c(., ., ., ., ., ., ., ., 1, .), # 7
c(., ., ., ., ., ., ., ., ., 1), # 8
c(., ., ., ., ., ., ., ., ., 1), # 9
c(., ., ., ., ., ., ., ., ., .)) # 10
estB.4 <- rbind(
c(., ., ., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., ., ., .),
c(., ., ., ., ., ., ., ., ., .),
c(0.831899433, -0.737954104, ., ., ., ., ., ., ., .),
c(., 0.687919607, -0.863361084, ., ., ., ., ., ., .),
c(., ., ., -0.878305407, ., ., ., ., ., .),
c(., 0.864092647, ., ., -0.77902333, ., ., ., ., .),
c(., ., ., ., ., -0.780116888, 0.929828083, ., ., .),
c(., ., ., ., ., 0.72436897, ., 0.502210828, 0.913644804, .))
eDAG4 <- LINGAM(X, verbose = TRUE)
stopifnot(trDAG4 == eDAG4$Adj,
with(eDAG4, all(t(B != 0) == Adj)),
all.equal(eDAG4$B, estB.4, tol=1e-9))
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/uibe.r
\name{postData}
\alias{postData}
\title{POST data from HTTP}
\usage{
postData(df, name = NULL, key = NULL)
}
\arguments{
\item{df}{data.frame}
\item{name}{character}
\item{key}{character}
}
\value{
list
}
\description{
POST data from HTTP
}
\examples{
\dontrun{
postData(stock1,name='abc',key='xxxx')
postData(industry0,name='industry0',key='xxxx')
}
}
\author{
Dan Zhang
}
|
/man/postData.Rd
|
no_license
|
Leo-Hzau/ruibe
|
R
| false
| false
| 470
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/uibe.r
\name{postData}
\alias{postData}
\title{POST data from HTTP}
\usage{
postData(df, name = NULL, key = NULL)
}
\arguments{
\item{df}{data.frame}
\item{name}{character}
\item{key}{character}
}
\value{
list
}
\description{
POST data from HTTP
}
\examples{
\dontrun{
postData(stock1,name='abc',key='xxxx')
postData(industry0,name='industry0',key='xxxx')
}
}
\author{
Dan Zhang
}
|
jsonSerializer <- function(val, req, res, errorHandler){
tryCatch({
json <- val
res$setHeader("Content-Type", "application/json")
res$body <- json
return(res$toResponse())
}, error=function(e){
errorHandler(req, res, e)
})
}
.globals$serializers[["json"]] <- jsonSerializer
|
/R/serializer-json.R
|
permissive
|
mpmenne/plumber
|
R
| false
| false
| 305
|
r
|
jsonSerializer <- function(val, req, res, errorHandler){
tryCatch({
json <- val
res$setHeader("Content-Type", "application/json")
res$body <- json
return(res$toResponse())
}, error=function(e){
errorHandler(req, res, e)
})
}
.globals$serializers[["json"]] <- jsonSerializer
|
## 지도 시각화
# 단계 구분도(지역별 통계치)
install.packages("mapproj")
install.packages("ggiraphExtra")
library(ggiraphExtra)
# 패키지안에 있는 데이터셋 목록
data() #USArrests : 주별 강력 범죄율 정보
# 구조 확인
str(USArrests) #UrbanPop(도시 인구)
head(USArrests)
library(dplyr)
library(tibble)
# 행의 이름을 state 변수로 바꿔서 새로운 데이터 프레임을 만듦
crime <- rownames_to_column(USArrests, var="state")
crime$state <- tolower(crime$state)
crime
# 미국의 위/경도 정보가 있는 지도 데이터터
install.packages("maps")
library(ggplot2)
# 데이터 프레임으로 불러옴
states_map <- map_data("state")
str(states_map)
ggChoropleth(data = crime,
aes(fill = Murder,
map_id = state),
map = states_map)
ggChoropleth(data = crime,
aes(fill = Murder,
map_id = state),
map = states_map,
interactive = T)
|
/demo_11(choroplethmap).R
|
no_license
|
hhayoung/R-lang
|
R
| false
| false
| 1,038
|
r
|
## 지도 시각화
# 단계 구분도(지역별 통계치)
install.packages("mapproj")
install.packages("ggiraphExtra")
library(ggiraphExtra)
# 패키지안에 있는 데이터셋 목록
data() #USArrests : 주별 강력 범죄율 정보
# 구조 확인
str(USArrests) #UrbanPop(도시 인구)
head(USArrests)
library(dplyr)
library(tibble)
# 행의 이름을 state 변수로 바꿔서 새로운 데이터 프레임을 만듦
crime <- rownames_to_column(USArrests, var="state")
crime$state <- tolower(crime$state)
crime
# 미국의 위/경도 정보가 있는 지도 데이터터
install.packages("maps")
library(ggplot2)
# 데이터 프레임으로 불러옴
states_map <- map_data("state")
str(states_map)
ggChoropleth(data = crime,
aes(fill = Murder,
map_id = state),
map = states_map)
ggChoropleth(data = crime,
aes(fill = Murder,
map_id = state),
map = states_map,
interactive = T)
|
## Hong Meeker Plug in Method for tp CI, Page 172 Hong & Meeker ##
# First Get the Value of the Quantile for Fixed MLE estimated Parameters ##
#Weibull Function
my_pweibull <- function(x, location, scale, ...){
pweibull(x, 1/scale, exp(location), ...)
}
#Function to get time from p
quant <- function(loc1, loc2, scl1, scl2, pi, b, t) {
1 - (1 - pi * my_pweibull(t, loc1, scl1)) * (1 - my_pweibull(t, loc2, scl2)) - b
}
#Function to Get Quantiles of Interest for MLE. Takes MLE output and Quantile.
mletp <- function(mle, b){
Qp = uniroot(function(t) quant(mle$est["mu1"],mle$est["mu2"],exp(mle$est["log_sigma1"]), exp(mle$est["log_sigma2"]), mle$est["pi"], b, t),c(0,300000))$root
return(Qp)
}
# SEV for standardized time
psev = function(x, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE){
upper <- exp(-exp(x))
if(lower.tail){
if(log.p)
return(log(1 - upper))
else
return(1-upper)
} else {
if(log.p)
return(log(upper))
else
return(upper)
}
}
#Derivative of SEV
dsev = function(x, location = 0, scale = 1){
pdf <- exp(x-exp(x))
return(pdf)
}
#Get Implicit Derivative of GFLP
impli_deriv <- function(mle, tp){
z1 = (log(tp) - mle$est["mu1"]) / exp(mle$est["log_sigma1"])
z2 = (log(tp) - mle$est["mu2"]) / exp(mle$est["log_sigma2"])
gF1 = -psev(z2,lower.tail=FALSE)*mle$est["pi"]
gF2 = -(1 - mle$est["pi"]*psev(z1,lower.tail=TRUE))
F1yp = dsev(z1)*(1/exp(mle$est["log_sigma1"]))
F2yp = dsev(z2)*(1/exp(mle$est["log_sigma2"]))
F1mu1 = -dsev(z1)*(1/exp(mle$est["log_sigma1"]))
F1s1 = dsev(z1)*((-log(tp) + mle$est["mu1"])/((exp(mle$est["log_sigma1"]))^2))
F2mu2 = -dsev(z2)*(1/exp(mle$est["log_sigma2"]))
F2s2 = dsev(z2)*((-log(tp) + mle$est["mu2"])/((exp(mle$est["log_sigma2"]))^2))
denom = gF1*F1yp + gF2*F2yp
l1 = (-gF1*F1mu1)/denom
l2 = (-gF2*F2mu2)/denom
ls1 = (-gF1*F1s1)/denom
ls2 = (-gF2*F2s2)/denom
lp = 0 #note: p is not a parameter in the individual failure distributions
out <- cbind(l1,l2,ls1,ls2,lp)
rownames(out) <- NULL
return(out)
}
#Get Standard Error for y=log(tp)
se_tp <- function(impl, mle){
out <- impl%*%mle$cov2%*%t(impl)
return(sqrt(out))
}
#Get CI for tp using Delta Plug in Method
tp_bounds <- function(mle, tp, se, alpha){
lb <- exp(log(tp) - qnorm(1-alpha/2)*se)
mle.est <- tp
ub <- exp(log(tp) + qnorm(1-alpha/2)*se)
out <- cbind(lb, mle.est, ub)
return(out)
}
## Run for All Models ##
tp_all <- matrix(NA, ncol=4, nrow=21)
for (i in 1:21){
mod <- i
mlefit <- get_mle_stan(mod, inits) #get MLE estimate
tp_mle <- mletp(mlefit,.10) #use MLE point estimates for tp
idev <- impli_deriv(mlefit, tp_mle) #get implicit derivative of GFLP at log(tp)
se_ylog <- se_tp(idev, mlefit) #se for log(tp)
tp.bounds <- tp_bounds(mlefit, tp_mle, se_ylog, .05) #get 95% tp CI
tp_all[i,2:4] <- tp.bounds
tp_all[i, 1] <- mod
}
|
/constrained_MLE/tp_mle.R
|
no_license
|
emittman/BB_data_analysis
|
R
| false
| false
| 2,942
|
r
|
## Hong Meeker Plug in Method for tp CI, Page 172 Hong & Meeker ##
# First Get the Value of the Quantile for Fixed MLE estimated Parameters ##
#Weibull Function
my_pweibull <- function(x, location, scale, ...){
pweibull(x, 1/scale, exp(location), ...)
}
#Function to get time from p
quant <- function(loc1, loc2, scl1, scl2, pi, b, t) {
1 - (1 - pi * my_pweibull(t, loc1, scl1)) * (1 - my_pweibull(t, loc2, scl2)) - b
}
#Function to Get Quantiles of Interest for MLE. Takes MLE output and Quantile.
mletp <- function(mle, b){
Qp = uniroot(function(t) quant(mle$est["mu1"],mle$est["mu2"],exp(mle$est["log_sigma1"]), exp(mle$est["log_sigma2"]), mle$est["pi"], b, t),c(0,300000))$root
return(Qp)
}
# SEV for standardized time
psev = function(x, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE){
upper <- exp(-exp(x))
if(lower.tail){
if(log.p)
return(log(1 - upper))
else
return(1-upper)
} else {
if(log.p)
return(log(upper))
else
return(upper)
}
}
#Derivative of SEV
dsev = function(x, location = 0, scale = 1){
pdf <- exp(x-exp(x))
return(pdf)
}
#Get Implicit Derivative of GFLP
impli_deriv <- function(mle, tp){
z1 = (log(tp) - mle$est["mu1"]) / exp(mle$est["log_sigma1"])
z2 = (log(tp) - mle$est["mu2"]) / exp(mle$est["log_sigma2"])
gF1 = -psev(z2,lower.tail=FALSE)*mle$est["pi"]
gF2 = -(1 - mle$est["pi"]*psev(z1,lower.tail=TRUE))
F1yp = dsev(z1)*(1/exp(mle$est["log_sigma1"]))
F2yp = dsev(z2)*(1/exp(mle$est["log_sigma2"]))
F1mu1 = -dsev(z1)*(1/exp(mle$est["log_sigma1"]))
F1s1 = dsev(z1)*((-log(tp) + mle$est["mu1"])/((exp(mle$est["log_sigma1"]))^2))
F2mu2 = -dsev(z2)*(1/exp(mle$est["log_sigma2"]))
F2s2 = dsev(z2)*((-log(tp) + mle$est["mu2"])/((exp(mle$est["log_sigma2"]))^2))
denom = gF1*F1yp + gF2*F2yp
l1 = (-gF1*F1mu1)/denom
l2 = (-gF2*F2mu2)/denom
ls1 = (-gF1*F1s1)/denom
ls2 = (-gF2*F2s2)/denom
lp = 0 #note: p is not a parameter in the individual failure distributions
out <- cbind(l1,l2,ls1,ls2,lp)
rownames(out) <- NULL
return(out)
}
#Get Standard Error for y=log(tp)
se_tp <- function(impl, mle){
out <- impl%*%mle$cov2%*%t(impl)
return(sqrt(out))
}
#Get CI for tp using Delta Plug in Method
tp_bounds <- function(mle, tp, se, alpha){
lb <- exp(log(tp) - qnorm(1-alpha/2)*se)
mle.est <- tp
ub <- exp(log(tp) + qnorm(1-alpha/2)*se)
out <- cbind(lb, mle.est, ub)
return(out)
}
## Run for All Models ##
tp_all <- matrix(NA, ncol=4, nrow=21)
for (i in 1:21){
mod <- i
mlefit <- get_mle_stan(mod, inits) #get MLE estimate
tp_mle <- mletp(mlefit,.10) #use MLE point estimates for tp
idev <- impli_deriv(mlefit, tp_mle) #get implicit derivative of GFLP at log(tp)
se_ylog <- se_tp(idev, mlefit) #se for log(tp)
tp.bounds <- tp_bounds(mlefit, tp_mle, se_ylog, .05) #get 95% tp CI
tp_all[i,2:4] <- tp.bounds
tp_all[i, 1] <- mod
}
|
# Calculate TMB for a given db_file using Strelka and Mutect data
#
# C. Savonen for ALSF - CCDL
#
# 2019
#
# Option descriptions
#
# --db_file : Path to sqlite database file made from 01-setup_db.py
# --metadata : Relative file path to MAF file to be analyzed. Can be .gz compressed.
# --coding_regions : File path that specifies the BED regions file that specifies
# coding regions that should be used for coding only TMB calculations.
# --overwrite : If specified, will overwrite any files of the same name. Default is FALSE.
# --nonsynfilter_maf: If TRUE, filter out synonymous mutations, keep non-synonymous mutations, based on maftools definition.
# --nonsynfilter_focr: If TRUE, filter out synonymous mutations, keep non-synonymous mutations, based on Friends of Cancer Research Definition.
# --tcga: If TRUE, will skip PBTA metadata specific steps
#
# Command line example:
#
# Rscript analyses/snv-callers/scripts/03-calculate_tmb.R \
# --db_file scratch/testing_snv_db.sqlite \
# --output analyses/snv-callers/results/consensus \
# --metadata data/pbta-histologies.tsv \
# --coding_regions scratch/gencode.v27.primary_assembly.annotation.bed \
# --nonsynfilter_maf
# --overwrite
################################ Initial Set Up ################################
# Establish base dir
root_dir <- rprojroot::find_root(rprojroot::has_dir(".git"))
analysis_dir <- file.path(root_dir, "analyses", "snv-callers")
# Import special functions
source(file.path(analysis_dir, "util", "tmb_functions.R"))
source(file.path(analysis_dir, "util", "split_mnv.R"))
# Magrittr pipe
`%>%` <- dplyr::`%>%`
# Load library:
library(optparse)
################################ Set up options ################################
# Set up optparse options
option_list <- list(
make_option(
opt_str = c("-d", "--db_file"), type = "character",
default = NULL, help = "Path to sqlite database file made from 01-setup_db.py",
metavar = "character"
),
make_option(
opt_str = c("-o", "--output"), type = "character",
default = NULL, help = "Path to folder where you would like the
output TMB file from this script to be stored.",
metavar = "character"
),
make_option(
opt_str = "--metadata", type = "character", default = "none",
help = "Relative file path (assuming from top directory of
'OpenPBTA-analysis') to MAF file to be analyzed. Can be .gz compressed.",
metavar = "character"
),
make_option(
opt_str = "--coding_regions", type = "character", default = "none",
help = "File path that specifies the BED regions file that specifies what
coding regions should be used for coding only TMB.",
metavar = "character"
),
make_option(
opt_str = "--overwrite", action = "store_true",
default = FALSE, help = "If TRUE, will overwrite any files of
the same name. Default is FALSE",
metavar = "character"
),
make_option(
opt_str = "--nonsynfilter_maf", action = "store_true",
default = FALSE, help = "If TRUE, filter out synonymous mutations, keep
non-synonymous mutations, according to maftools definition.",
metavar = "character"
),
make_option(
opt_str = "--nonsynfilter_focr", action = "store_true",
default = FALSE, help = "If TRUE, filter out synonymous mutations, keep
non-synonymous mutations, according to Friends of Cancer Research definition.",
metavar = "character"
),
make_option(
opt_str = "--tcga", action = "store_true",
default = FALSE, help = "If TRUE, will skip PBTA metadata specific steps",
metavar = "character"
)
)
# Parse options
opt <- parse_args(OptionParser(option_list = option_list))
########### Check that the files we need are in the paths specified ############
needed_files <- c(
opt$metadata, opt$db_file, opt$coding_regions
)
# Get list of which files were found
files_found <- file.exists(needed_files)
# Report error if any of them aren't found
if (!all(files_found)) {
stop(paste("\n Could not find needed file(s):",
needed_files[which(!files_found)],
"Check your options and set up.",
sep = "\n"
))
}
############################### Set Up Output #####################################
# Make output folder
if (!dir.exists(opt$output)) {
dir.create(opt$output, recursive = TRUE)
}
# Get data name
data_name <- ifelse(opt$tcga, "tcga", "pbta")
# Declare output file based on data_name
tmb_coding_file <- file.path(
opt$output,
paste0(data_name, "-snv-mutation-tmb-coding.tsv")
)
tmb_all_file <- file.path(
opt$output,
paste0(data_name, "-snv-mutation-tmb-all.tsv")
)
# Don't bother if both files exist already and overwrite is FALSE
if (all(file.exists(c(tmb_coding_file, tmb_all_file)), !opt$overwrite)) {
stop(paste0(tmb_coding_file, tmb_all_file, "both exist and --overwrite is not
being used. Use --overwrite if you would like to overwrite these files."))
}
######################## Obtain Mutect Strelka mutations #######################
# Start up connection
con <- DBI::dbConnect(RSQLite::SQLite(), opt$db_file)
# Designate caller tables from SQL file
strelka <- dplyr::tbl(con, "strelka")
mutect <- dplyr::tbl(con, "mutect")
# Specify the columns to join by
join_cols <- c(
"Chromosome",
"Start_Position",
"Reference_Allele",
"Allele",
"Tumor_Sample_Barcode"
)
# Variant Classification with High/Moderate variant consequences from maftools
maf_nonsynonymous <- c(
"Missense_Mutation",
"Frame_Shift_Del",
"In_Frame_Ins",
"Frame_Shift_Ins",
"Splice_Site",
"Nonsense_Mutation",
"In_Frame_Del",
"Nonstop_Mutation",
"Translation_Start_Site"
)
focr_nonsynonymous <- c(
"Missense_Mutation",
"Frame_Shift_Del",
"In_Frame_Ins",
"Frame_Shift_Ins",
"Nonsense_Mutation",
"In_Frame_Del"
)
# Create the consensus for non-MNVs
strelka_mutect_maf_df <- strelka %>%
# We'll keep the Strelka2 columns and drop Mutect2 columns
dplyr::inner_join(mutect %>%
dplyr::select(join_cols),
by = join_cols,
copy = TRUE
) %>%
as.data.frame()
# Get Multi-nucleotide calls from mutect as SNVs
split_mutect_df <- split_mnv(mutect) %>%
dplyr::select(join_cols)
# join MNV calls with strelka
strelka_mutect_mnv <- strelka %>%
dplyr::inner_join(split_mutect_df,
by = join_cols,
copy = TRUE
) %>%
as.data.frame()
if (opt$tcga) {
strelka_mutect_mnv <- strelka_mutect_mnv %>%
# In the TCGA MAF files, the Tumor_Sample_Barcode has the biospecimen
# information but only the first 12 characters are needed to match the metadata
dplyr::mutate(Tumor_Sample_Barcode = substr(Tumor_Sample_Barcode, 0, 12))
}
# Merge in the MNVs
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::union(strelka_mutect_mnv,
by = join_cols
)
# If the maftools non-synonymous filter is on, filter out synonymous mutations
if (opt$nonsynfilter_maf) {
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::filter(Variant_Classification %in% maf_nonsynonymous)
}
# If the FoCR non-synonymous filter is on, filter out synonymous mutations according to that definition
if (opt$nonsynfilter_focr) {
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::filter(Variant_Classification %in% focr_nonsynonymous)
}
########################### Set up metadata columns ############################
# Print progress message
message("Setting up metadata...")
# Have to handle TCGA and PBTA metadata differently
if (opt$tcga) {
# Format two fields of metadata for use with functions
metadata <- readr::read_tsv(opt$metadata, guess_max = 10000) %>%
dplyr::mutate(
short_histology = Primary_diagnosis,
target_bed_path = file.path(root_dir, "data", BED_In_Use),
experimental_strategy = "WXS"
) %>%
dplyr::rename(
Tumor_Sample_Barcode = tumorID,
target_bed = BED_In_Use
) # This field is named differently
# Manifest files only have first 12 letters of the barcode so we gotta chop the end off
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::mutate(Tumor_Sample_Barcode = substr(Tumor_Sample_Barcode, 0, 12))
} else { # pbta data
# Isolate metadata to only the samples that are in the datasets
metadata <- readr::read_tsv(opt$metadata, guess_max = 10000) %>%
dplyr::filter(Kids_First_Biospecimen_ID %in% strelka_mutect_maf_df$Tumor_Sample_Barcode) %>%
dplyr::distinct(Kids_First_Biospecimen_ID, .keep_all = TRUE) %>%
dplyr::rename(Tumor_Sample_Barcode = Kids_First_Biospecimen_ID) %>%
# Make a Target BED regions column
dplyr::mutate(
target_bed = dplyr::recode(experimental_strategy,
"WGS" = "scratch/snv-callers/intersect_strelka_mutect_WGS.bed",
"WXS" = "data/WXS.hg38.100bp_padded.bed"
#TODO: make a padded/unpadded script option
),
target_bed_path = file.path(root_dir, target_bed)
)
# Make sure that we have metadata for all these samples.
if (!all(unique(strelka_mutect_maf_df$Tumor_Sample_Barcode) %in% metadata$Tumor_Sample_Barcode)) {
stop("There are samples in this MAF file that are not in the metadata.")
}
}
# Add in metadata
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::inner_join(metadata %>%
dplyr::select(
Tumor_Sample_Barcode,
experimental_strategy,
short_histology,
target_bed,
target_bed_path
),
by = "Tumor_Sample_Barcode"
) %>%
# Remove samples if they are not WGS or WXS
dplyr::filter(experimental_strategy %in% c("WGS", "WXS"))
############################# Set Up BED Files #################################
# Make a data.frame of the unique BED file paths and their names
bed_files_key_df <- strelka_mutect_maf_df %>%
dplyr::select(Tumor_Sample_Barcode, target_bed, target_bed_path) %>%
dplyr::distinct()
# Get the file paths for the bed files
bed_file_paths <- bed_files_key_df %>%
dplyr::distinct(target_bed, target_bed_path) %>%
tibble::deframe()
# Read in each unique BED file and turn into GenomicRanges object
bed_ranges_list <- lapply(bed_file_paths, function(bed_file) {
# Read in BED file as data.frame
bed_df <- readr::read_tsv(bed_file,
col_names = c("chr", "start", "end")
)
# Make into a GenomicRanges object
bed_ranges <- GenomicRanges::GRanges(
seqnames = bed_df$chr,
ranges = IRanges::IRanges(
start = bed_df$start,
end = bed_df$end
)
)
return(bed_ranges)
})
#################### Set up Coding Region version of BED ranges ################
# Read in the coding regions BED file
coding_regions_df <- readr::read_tsv(opt$coding_regions,
col_names = c("chr", "start", "end")
)
# Make into a GenomicRanges object
coding_ranges <- GenomicRanges::GRanges(
seqnames = coding_regions_df$chr,
ranges = IRanges::IRanges(
start = coding_regions_df$start,
end = coding_regions_df$end
)
)
# For each BED range, find the coding regions intersection
coding_bed_ranges_list <- lapply(bed_ranges_list, function(bed_range,
coding_grange = coding_ranges) {
# Find the intersection
coding_intersect_ranges <- GenomicRanges::intersect(bed_range, coding_grange)
# Return the reduce version of these ranges
return(GenomicRanges::reduce(coding_intersect_ranges))
})
########################### All mutations TMB file #############################
# If the file exists or the overwrite option is not being used, run TMB calculations
if (file.exists(tmb_all_file) && !opt$overwrite) {
# Stop if this file exists and overwrite is set to FALSE
warning(cat(
"The 'all mutations' Tumor Mutation Burden file already exists: \n",
tmb_all_file, "\n",
"Use --overwrite if you want to overwrite it."
))
} else {
# Print out warning if this file is going to be overwritten
if (file.exists(tmb_coding_file)) {
warning("Overwriting existing 'all mutations' TMB file.")
}
# Run TMB calculation on each tumor sample and its respective BED range
tmb_all_df <- purrr::map2_df(
bed_files_key_df$Tumor_Sample_Barcode,
bed_files_key_df$target_bed,
~ calculate_tmb(
tumor_sample_barcode = .x,
maf_df = strelka_mutect_maf_df,
bed_ranges = bed_ranges_list[[.y]]
)
)
# Write to TSV file
readr::write_tsv(tmb_all_df, tmb_all_file)
# Print out completion message
message(paste("TMB 'all' calculations saved to:", tmb_all_file))
}
############################# Coding TMB file ##################################
# If the file exists or the overwrite option is not being used, run TMB calculations
if (file.exists(tmb_coding_file) && !opt$overwrite) {
# Stop if this file exists and overwrite is set to FALSE
warning(cat(
"The 'coding only' Tumor Mutation Burden file already exists: \n",
tmb_coding_file, "\n",
"Use --overwrite if you want to overwrite it."
))
} else {
# Print out warning if this file is going to be overwritten
if (file.exists(tmb_coding_file)) {
warning("Overwriting existing 'coding only' TMB file.")
}
# Print out progress message
message(paste("Calculating 'coding only' TMB..."))
# Run coding TMB calculation on each tumor sample and its
# respective coding BED range
tmb_coding_df <- purrr::map2_df(
bed_files_key_df$Tumor_Sample_Barcode,
bed_files_key_df$target_bed,
~ calculate_tmb(
tumor_sample_barcode = .x,
maf_df = strelka_mutect_maf_df,
bed_ranges = coding_bed_ranges_list[[.y]]
)
)
# Write to TSV file
readr::write_tsv(tmb_coding_df, tmb_coding_file)
# Print out completion message
message(paste("TMB 'coding only' calculations saved to:", tmb_coding_file))
}
|
/analyses/snv-callers/scripts/03-calculate_tmb.R
|
permissive
|
jaclyn-taroni/OpenPBTA-analysis
|
R
| false
| false
| 13,622
|
r
|
# Calculate TMB for a given db_file using Strelka and Mutect data
#
# C. Savonen for ALSF - CCDL
#
# 2019
#
# Option descriptions
#
# --db_file : Path to sqlite database file made from 01-setup_db.py
# --metadata : Relative file path to MAF file to be analyzed. Can be .gz compressed.
# --coding_regions : File path that specifies the BED regions file that specifies
# coding regions that should be used for coding only TMB calculations.
# --overwrite : If specified, will overwrite any files of the same name. Default is FALSE.
# --nonsynfilter_maf: If TRUE, filter out synonymous mutations, keep non-synonymous mutations, based on maftools definition.
# --nonsynfilter_focr: If TRUE, filter out synonymous mutations, keep non-synonymous mutations, based on Friends of Cancer Research Definition.
# --tcga: If TRUE, will skip PBTA metadata specific steps
#
# Command line example:
#
# Rscript analyses/snv-callers/scripts/03-calculate_tmb.R \
# --db_file scratch/testing_snv_db.sqlite \
# --output analyses/snv-callers/results/consensus \
# --metadata data/pbta-histologies.tsv \
# --coding_regions scratch/gencode.v27.primary_assembly.annotation.bed \
# --nonsynfilter_maf
# --overwrite
################################ Initial Set Up ################################
# Establish base dir
root_dir <- rprojroot::find_root(rprojroot::has_dir(".git"))
analysis_dir <- file.path(root_dir, "analyses", "snv-callers")
# Import special functions
source(file.path(analysis_dir, "util", "tmb_functions.R"))
source(file.path(analysis_dir, "util", "split_mnv.R"))
# Magrittr pipe
`%>%` <- dplyr::`%>%`
# Load library:
library(optparse)
################################ Set up options ################################
# Set up optparse options
option_list <- list(
make_option(
opt_str = c("-d", "--db_file"), type = "character",
default = NULL, help = "Path to sqlite database file made from 01-setup_db.py",
metavar = "character"
),
make_option(
opt_str = c("-o", "--output"), type = "character",
default = NULL, help = "Path to folder where you would like the
output TMB file from this script to be stored.",
metavar = "character"
),
make_option(
opt_str = "--metadata", type = "character", default = "none",
help = "Relative file path (assuming from top directory of
'OpenPBTA-analysis') to MAF file to be analyzed. Can be .gz compressed.",
metavar = "character"
),
make_option(
opt_str = "--coding_regions", type = "character", default = "none",
help = "File path that specifies the BED regions file that specifies what
coding regions should be used for coding only TMB.",
metavar = "character"
),
make_option(
opt_str = "--overwrite", action = "store_true",
default = FALSE, help = "If TRUE, will overwrite any files of
the same name. Default is FALSE",
metavar = "character"
),
make_option(
opt_str = "--nonsynfilter_maf", action = "store_true",
default = FALSE, help = "If TRUE, filter out synonymous mutations, keep
non-synonymous mutations, according to maftools definition.",
metavar = "character"
),
make_option(
opt_str = "--nonsynfilter_focr", action = "store_true",
default = FALSE, help = "If TRUE, filter out synonymous mutations, keep
non-synonymous mutations, according to Friends of Cancer Research definition.",
metavar = "character"
),
make_option(
opt_str = "--tcga", action = "store_true",
default = FALSE, help = "If TRUE, will skip PBTA metadata specific steps",
metavar = "character"
)
)
# Parse options
opt <- parse_args(OptionParser(option_list = option_list))
########### Check that the files we need are in the paths specified ############
needed_files <- c(
opt$metadata, opt$db_file, opt$coding_regions
)
# Get list of which files were found
files_found <- file.exists(needed_files)
# Report error if any of them aren't found
if (!all(files_found)) {
stop(paste("\n Could not find needed file(s):",
needed_files[which(!files_found)],
"Check your options and set up.",
sep = "\n"
))
}
############################### Set Up Output #####################################
# Make output folder
if (!dir.exists(opt$output)) {
dir.create(opt$output, recursive = TRUE)
}
# Get data name
data_name <- ifelse(opt$tcga, "tcga", "pbta")
# Declare output file based on data_name
tmb_coding_file <- file.path(
opt$output,
paste0(data_name, "-snv-mutation-tmb-coding.tsv")
)
tmb_all_file <- file.path(
opt$output,
paste0(data_name, "-snv-mutation-tmb-all.tsv")
)
# Don't bother if both files exist already and overwrite is FALSE
if (all(file.exists(c(tmb_coding_file, tmb_all_file)), !opt$overwrite)) {
stop(paste0(tmb_coding_file, tmb_all_file, "both exist and --overwrite is not
being used. Use --overwrite if you would like to overwrite these files."))
}
######################## Obtain Mutect Strelka mutations #######################
# Start up connection
con <- DBI::dbConnect(RSQLite::SQLite(), opt$db_file)
# Designate caller tables from SQL file
strelka <- dplyr::tbl(con, "strelka")
mutect <- dplyr::tbl(con, "mutect")
# Specify the columns to join by
join_cols <- c(
"Chromosome",
"Start_Position",
"Reference_Allele",
"Allele",
"Tumor_Sample_Barcode"
)
# Variant Classification with High/Moderate variant consequences from maftools
maf_nonsynonymous <- c(
"Missense_Mutation",
"Frame_Shift_Del",
"In_Frame_Ins",
"Frame_Shift_Ins",
"Splice_Site",
"Nonsense_Mutation",
"In_Frame_Del",
"Nonstop_Mutation",
"Translation_Start_Site"
)
focr_nonsynonymous <- c(
"Missense_Mutation",
"Frame_Shift_Del",
"In_Frame_Ins",
"Frame_Shift_Ins",
"Nonsense_Mutation",
"In_Frame_Del"
)
# Create the consensus for non-MNVs
strelka_mutect_maf_df <- strelka %>%
# We'll keep the Strelka2 columns and drop Mutect2 columns
dplyr::inner_join(mutect %>%
dplyr::select(join_cols),
by = join_cols,
copy = TRUE
) %>%
as.data.frame()
# Get Multi-nucleotide calls from mutect as SNVs
split_mutect_df <- split_mnv(mutect) %>%
dplyr::select(join_cols)
# join MNV calls with strelka
strelka_mutect_mnv <- strelka %>%
dplyr::inner_join(split_mutect_df,
by = join_cols,
copy = TRUE
) %>%
as.data.frame()
if (opt$tcga) {
strelka_mutect_mnv <- strelka_mutect_mnv %>%
# In the TCGA MAF files, the Tumor_Sample_Barcode has the biospecimen
# information but only the first 12 characters are needed to match the metadata
dplyr::mutate(Tumor_Sample_Barcode = substr(Tumor_Sample_Barcode, 0, 12))
}
# Merge in the MNVs
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::union(strelka_mutect_mnv,
by = join_cols
)
# If the maftools non-synonymous filter is on, filter out synonymous mutations
if (opt$nonsynfilter_maf) {
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::filter(Variant_Classification %in% maf_nonsynonymous)
}
# If the FoCR non-synonymous filter is on, filter out synonymous mutations according to that definition
if (opt$nonsynfilter_focr) {
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::filter(Variant_Classification %in% focr_nonsynonymous)
}
########################### Set up metadata columns ############################
# Print progress message
message("Setting up metadata...")
# Have to handle TCGA and PBTA metadata differently
if (opt$tcga) {
# Format two fields of metadata for use with functions
metadata <- readr::read_tsv(opt$metadata, guess_max = 10000) %>%
dplyr::mutate(
short_histology = Primary_diagnosis,
target_bed_path = file.path(root_dir, "data", BED_In_Use),
experimental_strategy = "WXS"
) %>%
dplyr::rename(
Tumor_Sample_Barcode = tumorID,
target_bed = BED_In_Use
) # This field is named differently
# Manifest files only have first 12 letters of the barcode so we gotta chop the end off
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::mutate(Tumor_Sample_Barcode = substr(Tumor_Sample_Barcode, 0, 12))
} else { # pbta data
# Isolate metadata to only the samples that are in the datasets
metadata <- readr::read_tsv(opt$metadata, guess_max = 10000) %>%
dplyr::filter(Kids_First_Biospecimen_ID %in% strelka_mutect_maf_df$Tumor_Sample_Barcode) %>%
dplyr::distinct(Kids_First_Biospecimen_ID, .keep_all = TRUE) %>%
dplyr::rename(Tumor_Sample_Barcode = Kids_First_Biospecimen_ID) %>%
# Make a Target BED regions column
dplyr::mutate(
target_bed = dplyr::recode(experimental_strategy,
"WGS" = "scratch/snv-callers/intersect_strelka_mutect_WGS.bed",
"WXS" = "data/WXS.hg38.100bp_padded.bed"
#TODO: make a padded/unpadded script option
),
target_bed_path = file.path(root_dir, target_bed)
)
# Make sure that we have metadata for all these samples.
if (!all(unique(strelka_mutect_maf_df$Tumor_Sample_Barcode) %in% metadata$Tumor_Sample_Barcode)) {
stop("There are samples in this MAF file that are not in the metadata.")
}
}
# Add in metadata
strelka_mutect_maf_df <- strelka_mutect_maf_df %>%
dplyr::inner_join(metadata %>%
dplyr::select(
Tumor_Sample_Barcode,
experimental_strategy,
short_histology,
target_bed,
target_bed_path
),
by = "Tumor_Sample_Barcode"
) %>%
# Remove samples if they are not WGS or WXS
dplyr::filter(experimental_strategy %in% c("WGS", "WXS"))
############################# Set Up BED Files #################################
# Make a data.frame of the unique BED file paths and their names
bed_files_key_df <- strelka_mutect_maf_df %>%
dplyr::select(Tumor_Sample_Barcode, target_bed, target_bed_path) %>%
dplyr::distinct()
# Get the file paths for the bed files
bed_file_paths <- bed_files_key_df %>%
dplyr::distinct(target_bed, target_bed_path) %>%
tibble::deframe()
# Read in each unique BED file and turn into GenomicRanges object
bed_ranges_list <- lapply(bed_file_paths, function(bed_file) {
# Read in BED file as data.frame
bed_df <- readr::read_tsv(bed_file,
col_names = c("chr", "start", "end")
)
# Make into a GenomicRanges object
bed_ranges <- GenomicRanges::GRanges(
seqnames = bed_df$chr,
ranges = IRanges::IRanges(
start = bed_df$start,
end = bed_df$end
)
)
return(bed_ranges)
})
#################### Set up Coding Region version of BED ranges ################
# Read in the coding regions BED file
coding_regions_df <- readr::read_tsv(opt$coding_regions,
col_names = c("chr", "start", "end")
)
# Make into a GenomicRanges object
coding_ranges <- GenomicRanges::GRanges(
seqnames = coding_regions_df$chr,
ranges = IRanges::IRanges(
start = coding_regions_df$start,
end = coding_regions_df$end
)
)
# For each BED range, find the coding regions intersection
coding_bed_ranges_list <- lapply(bed_ranges_list, function(bed_range,
coding_grange = coding_ranges) {
# Find the intersection
coding_intersect_ranges <- GenomicRanges::intersect(bed_range, coding_grange)
# Return the reduce version of these ranges
return(GenomicRanges::reduce(coding_intersect_ranges))
})
########################### All mutations TMB file #############################
# If the file exists or the overwrite option is not being used, run TMB calculations
if (file.exists(tmb_all_file) && !opt$overwrite) {
# Stop if this file exists and overwrite is set to FALSE
warning(cat(
"The 'all mutations' Tumor Mutation Burden file already exists: \n",
tmb_all_file, "\n",
"Use --overwrite if you want to overwrite it."
))
} else {
# Print out warning if this file is going to be overwritten
if (file.exists(tmb_coding_file)) {
warning("Overwriting existing 'all mutations' TMB file.")
}
# Run TMB calculation on each tumor sample and its respective BED range
tmb_all_df <- purrr::map2_df(
bed_files_key_df$Tumor_Sample_Barcode,
bed_files_key_df$target_bed,
~ calculate_tmb(
tumor_sample_barcode = .x,
maf_df = strelka_mutect_maf_df,
bed_ranges = bed_ranges_list[[.y]]
)
)
# Write to TSV file
readr::write_tsv(tmb_all_df, tmb_all_file)
# Print out completion message
message(paste("TMB 'all' calculations saved to:", tmb_all_file))
}
############################# Coding TMB file ##################################
# If the file exists or the overwrite option is not being used, run TMB calculations
if (file.exists(tmb_coding_file) && !opt$overwrite) {
# Stop if this file exists and overwrite is set to FALSE
warning(cat(
"The 'coding only' Tumor Mutation Burden file already exists: \n",
tmb_coding_file, "\n",
"Use --overwrite if you want to overwrite it."
))
} else {
# Print out warning if this file is going to be overwritten
if (file.exists(tmb_coding_file)) {
warning("Overwriting existing 'coding only' TMB file.")
}
# Print out progress message
message(paste("Calculating 'coding only' TMB..."))
# Run coding TMB calculation on each tumor sample and its
# respective coding BED range
tmb_coding_df <- purrr::map2_df(
bed_files_key_df$Tumor_Sample_Barcode,
bed_files_key_df$target_bed,
~ calculate_tmb(
tumor_sample_barcode = .x,
maf_df = strelka_mutect_maf_df,
bed_ranges = coding_bed_ranges_list[[.y]]
)
)
# Write to TSV file
readr::write_tsv(tmb_coding_df, tmb_coding_file)
# Print out completion message
message(paste("TMB 'coding only' calculations saved to:", tmb_coding_file))
}
|
update.packages(ask=FALSE, repos='http://cran.cnr.berkeley.edu')
install.packages(c(
'rrdf',
'SPARQL',
'textir',
'lda',
'OAIHarvester',
'topicmodels',
'stringkernels'),
repos='http://cran.cnr.berkeley.edu'
)
library(ctv)
install.views('NaturalLanguageProcessing',
repos='http://cran.cnr.berkeley.edu'
)
|
/Desktop/Install-Scripts/NaturalLanguageProcessing/load-task-view.R
|
no_license
|
GunioRobot/Data-Journalism-Developer-Studio
|
R
| false
| false
| 322
|
r
|
update.packages(ask=FALSE, repos='http://cran.cnr.berkeley.edu')
install.packages(c(
'rrdf',
'SPARQL',
'textir',
'lda',
'OAIHarvester',
'topicmodels',
'stringkernels'),
repos='http://cran.cnr.berkeley.edu'
)
library(ctv)
install.views('NaturalLanguageProcessing',
repos='http://cran.cnr.berkeley.edu'
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/domain_insertion_functions.R
\name{read.DIP.Counts}
\alias{read.DIP.Counts}
\title{Read domain-insertion profiling count files into a dataframe}
\usage{
read.DIP.Counts(fnames, meta.file)
}
\arguments{
\item{fnames}{List of filenames for read count tables to be read}
\item{meta.file}{Filename for metadata file describing mapping between insertion
sites and amino acid sequence.}
}
\value{
Matrix with the first n columns corresponding to
the n files/samples provided by \code{fnames}, rows corresponding to
insertion sites and entries containing read counts. The last 6 columns
provide insertion site annotations from the metadata file.
}
\description{
This function provides a means to read the count files produced by the python
dipseq package (https://github.com/SavageLab/dipseq) into a single dataframe
for sort-seq analysis.
}
\examples{
\dontrun{
read.DIP.Counts(fnames = "example.csv", meta.file = "meta.txt")
}
}
|
/man/read.DIP.Counts.Rd
|
permissive
|
jnkoberstein/biosensor-sort-seq
|
R
| false
| true
| 1,007
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/domain_insertion_functions.R
\name{read.DIP.Counts}
\alias{read.DIP.Counts}
\title{Read domain-insertion profiling count files into a dataframe}
\usage{
read.DIP.Counts(fnames, meta.file)
}
\arguments{
\item{fnames}{List of filenames for read count tables to be read}
\item{meta.file}{Filename for metadata file describing mapping between insertion
sites and amino acid sequence.}
}
\value{
Matrix with the first n columns corresponding to
the n files/samples provided by \code{fnames}, rows corresponding to
insertion sites and entries containing read counts. The last 6 columns
provide insertion site annotations from the metadata file.
}
\description{
This function provides a means to read the count files produced by the python
dipseq package (https://github.com/SavageLab/dipseq) into a single dataframe
for sort-seq analysis.
}
\examples{
\dontrun{
read.DIP.Counts(fnames = "example.csv", meta.file = "meta.txt")
}
}
|
##Plots
library(tidyverse)
library(cowplot)
Bunn <- read_csv("Bunn.csv")
fish <- read_csv("fishCN.csv")
ter <- read_csv("terCN.csv")
lit <- read_csv("litCN.csv")
pel <- read_csv("pelagCN.csv")
#fish only, basic plot
ggplot(data = fish, aes(x = d13C_VPDB, y = d15N_air)) +
geom_point() +
theme_minimal() +
labs(x = expression(paste(delta^{13}, "C (\u2030)")),
y = expression(paste(delta^{15}, "N (\u2030)")))
#learning to layer several sets of data onto one plot.
ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#f4a582"), size = 2, shape = 20, show.legend = TRUE) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#008837"), size = 2, shape = 17, show.legend = TRUE) +
geom_point(data = lit, aes(x = d13C_VPDB, y = d15N_air, color = "#92c5de" ), size = 2, shape = 3, show.legend = TRUE) +
geom_point(data = pel, aes(x = d13C_VPDB, y = d15N_air, color = "#0571b0"), size = 2, shape = 8, show.legend = TRUE) +
theme_cowplot() +
labs(x = expression(paste(delta^{13}, "C (\u2030)")),
y = expression(paste(delta^{15}, "N (\u2030)"))) +
scale_fill_identity(name = "" ,
guide = "legend",
aesthetics = "color",
labels = c("rainbow trout","terrestrial","littoral","pelagic")) +
guides(colour = guide_legend(override.aes = list(pch = c(20, 17,3,8),
color = c("#f4a582", "#008837","#92c5de","#0571b0")))) +
theme(legend.position=c(.70,.20))
ggsave()
##The legend is wonky. Can I change it? I will make a simpler plot with only two things to test:
ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#f4a582"), size = 2, shape = 20) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#008837"), size = 2, shape = 17) +
scale_colour_manual(name = "",
labels = c("fish", "terrestrial"),
values = c("#f4a582"="#f4a582", "#008837" = "#008837")) +
guides(colour = guide_legend(override.aes = list(pch = c(20, 17), color = c("#f4a582", "#008837")))) +
theme_cowplot()
p1<-ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#f4a582"), size = 2, shape = 20) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#008837"), size = 2, shape = 17) +
theme_cowplot()
print(p1 + scale_shape_manual(values = c(20,17)))
pALLCN<-ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#f4a582"), size = 2, shape = 20) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#008837"), size = 2, shape = 17) +
geom_point(data = lit, aes(x = d13C_VPDB, y = d15N_air, color = "#92c5de" ), size = 2, shape = 3) +
geom_point(data = pel, aes(x = d13C_VPDB, y = d15N_air, color = "#0571b0"), size = 2, shape = 8) +
theme_cowplot() +
scale_fill_identity(name = "" ,
guide = "legend",
aesthetics = "color",
labels = c("terrestrial","pelagic","littoral","rainbow trout")) +
labs(x = expression(paste(delta^{13}, "C (\u2030)")),
y = expression(paste(delta^{15}, "N (\u2030)")))
##right now I'm getting frustrated that I cannot plot from separate dataframes. It seems I first need to
## gather them into a single dataframe.
#So maybe I select just the columns I need and create a new single df?
#base r
par(mai = c(1,1,0.5,.5))
plot(x = fish$d13C_VPDB, y = fish$d15N_air,
pch = 20,
col = "#a6611a",
ylim = c(-12,11),
xlim = c(-37,-17),
xlab = expression(paste(delta^{13}, "C (\u2030)")),
ylab = expression(paste(delta^{15}, "N (\u2030)")))
points(x = ter$d13C_VPDB, y = ter$d15N_air,
pch = 17,
col = "#238b45")
points(pel$d13C_VPDB, pel$d15N_air,
pch = 4,
col = "#526EFF")
points(x = lit$d13C_VPDB, y = lit$d15N_air,
pch = 8,
col = "#66c2a4")
legend(x = -23 , y=-5,
legend = c("fish", "terrestrial", "pelagic", "littoral") ,
pch = c(20,17,4,8),
col = c("#a6611a", "#238b45", "#526EFF", "#66c2a4"),
bty = "n")
## test bind_rows
df1 <-data.frame(b = c(1:5), a = c(6:10))
df2 <-data.frame(a = c(11:15), b = c(16:20), c = LETTERS[1:5])
df3 <-data.frame(c= letters[1:3], d = c(6:8))
bind_rows(df1,df2,df3)
binded <-bind_rows(fish,ter, pel)
binded$Identifier.1
binded$d13C_VPDB
##Working on the final version of a basic stable isotope biplot
CNbiplot.all<-ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#c487a9"), size = 2, shape = 20, show.legend = TRUE) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#4d7d53"), size = 2, shape = 17, show.legend = TRUE) +
geom_point(data = lit, aes(x = d13C_VPDB, y = d15N_air, color = "#92c5de" ), size = 2, shape = 3, show.legend = TRUE) +
geom_point(data = pel, aes(x = d13C_VPDB, y = d15N_air, color = "#0571b0"), size = 2, shape = 8, show.legend = TRUE) +
theme_cowplot() +
labs(x = expression(paste(delta^{13}, "C (\u2030)")),
y = expression(paste(delta^{15}, "N (\u2030)"))) +
scale_fill_identity(name = "" ,
guide = "legend",
aesthetics = "color",
labels = c("rainbow trout","terrestrial","littoral","pelagic")) +
guides(colour = guide_legend(override.aes = list(pch = c(20, 17,3,8),
color = c("#c487a9", "#4d7d53","#92c5de","#0571b0")))) +
theme(legend.position=c(.70,.20),
axis.title.y = element_text(angle = 0))
|
/Isotopebiplot.R
|
no_license
|
freshbeka/BGplots
|
R
| false
| false
| 5,611
|
r
|
##Plots
library(tidyverse)
library(cowplot)
Bunn <- read_csv("Bunn.csv")
fish <- read_csv("fishCN.csv")
ter <- read_csv("terCN.csv")
lit <- read_csv("litCN.csv")
pel <- read_csv("pelagCN.csv")
#fish only, basic plot
ggplot(data = fish, aes(x = d13C_VPDB, y = d15N_air)) +
geom_point() +
theme_minimal() +
labs(x = expression(paste(delta^{13}, "C (\u2030)")),
y = expression(paste(delta^{15}, "N (\u2030)")))
#learning to layer several sets of data onto one plot.
ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#f4a582"), size = 2, shape = 20, show.legend = TRUE) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#008837"), size = 2, shape = 17, show.legend = TRUE) +
geom_point(data = lit, aes(x = d13C_VPDB, y = d15N_air, color = "#92c5de" ), size = 2, shape = 3, show.legend = TRUE) +
geom_point(data = pel, aes(x = d13C_VPDB, y = d15N_air, color = "#0571b0"), size = 2, shape = 8, show.legend = TRUE) +
theme_cowplot() +
labs(x = expression(paste(delta^{13}, "C (\u2030)")),
y = expression(paste(delta^{15}, "N (\u2030)"))) +
scale_fill_identity(name = "" ,
guide = "legend",
aesthetics = "color",
labels = c("rainbow trout","terrestrial","littoral","pelagic")) +
guides(colour = guide_legend(override.aes = list(pch = c(20, 17,3,8),
color = c("#f4a582", "#008837","#92c5de","#0571b0")))) +
theme(legend.position=c(.70,.20))
ggsave()
##The legend is wonky. Can I change it? I will make a simpler plot with only two things to test:
ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#f4a582"), size = 2, shape = 20) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#008837"), size = 2, shape = 17) +
scale_colour_manual(name = "",
labels = c("fish", "terrestrial"),
values = c("#f4a582"="#f4a582", "#008837" = "#008837")) +
guides(colour = guide_legend(override.aes = list(pch = c(20, 17), color = c("#f4a582", "#008837")))) +
theme_cowplot()
p1<-ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#f4a582"), size = 2, shape = 20) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#008837"), size = 2, shape = 17) +
theme_cowplot()
print(p1 + scale_shape_manual(values = c(20,17)))
pALLCN<-ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#f4a582"), size = 2, shape = 20) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#008837"), size = 2, shape = 17) +
geom_point(data = lit, aes(x = d13C_VPDB, y = d15N_air, color = "#92c5de" ), size = 2, shape = 3) +
geom_point(data = pel, aes(x = d13C_VPDB, y = d15N_air, color = "#0571b0"), size = 2, shape = 8) +
theme_cowplot() +
scale_fill_identity(name = "" ,
guide = "legend",
aesthetics = "color",
labels = c("terrestrial","pelagic","littoral","rainbow trout")) +
labs(x = expression(paste(delta^{13}, "C (\u2030)")),
y = expression(paste(delta^{15}, "N (\u2030)")))
##right now I'm getting frustrated that I cannot plot from separate dataframes. It seems I first need to
## gather them into a single dataframe.
#So maybe I select just the columns I need and create a new single df?
#base r
par(mai = c(1,1,0.5,.5))
plot(x = fish$d13C_VPDB, y = fish$d15N_air,
pch = 20,
col = "#a6611a",
ylim = c(-12,11),
xlim = c(-37,-17),
xlab = expression(paste(delta^{13}, "C (\u2030)")),
ylab = expression(paste(delta^{15}, "N (\u2030)")))
points(x = ter$d13C_VPDB, y = ter$d15N_air,
pch = 17,
col = "#238b45")
points(pel$d13C_VPDB, pel$d15N_air,
pch = 4,
col = "#526EFF")
points(x = lit$d13C_VPDB, y = lit$d15N_air,
pch = 8,
col = "#66c2a4")
legend(x = -23 , y=-5,
legend = c("fish", "terrestrial", "pelagic", "littoral") ,
pch = c(20,17,4,8),
col = c("#a6611a", "#238b45", "#526EFF", "#66c2a4"),
bty = "n")
## test bind_rows
df1 <-data.frame(b = c(1:5), a = c(6:10))
df2 <-data.frame(a = c(11:15), b = c(16:20), c = LETTERS[1:5])
df3 <-data.frame(c= letters[1:3], d = c(6:8))
bind_rows(df1,df2,df3)
binded <-bind_rows(fish,ter, pel)
binded$Identifier.1
binded$d13C_VPDB
##Working on the final version of a basic stable isotope biplot
CNbiplot.all<-ggplot() +
geom_point(data = fish, aes(x = d13C_VPDB, y = d15N_air, color = "#c487a9"), size = 2, shape = 20, show.legend = TRUE) +
geom_point(data = ter, aes(x = d13C_VPDB, y = d15N_air, color = "#4d7d53"), size = 2, shape = 17, show.legend = TRUE) +
geom_point(data = lit, aes(x = d13C_VPDB, y = d15N_air, color = "#92c5de" ), size = 2, shape = 3, show.legend = TRUE) +
geom_point(data = pel, aes(x = d13C_VPDB, y = d15N_air, color = "#0571b0"), size = 2, shape = 8, show.legend = TRUE) +
theme_cowplot() +
labs(x = expression(paste(delta^{13}, "C (\u2030)")),
y = expression(paste(delta^{15}, "N (\u2030)"))) +
scale_fill_identity(name = "" ,
guide = "legend",
aesthetics = "color",
labels = c("rainbow trout","terrestrial","littoral","pelagic")) +
guides(colour = guide_legend(override.aes = list(pch = c(20, 17,3,8),
color = c("#c487a9", "#4d7d53","#92c5de","#0571b0")))) +
theme(legend.position=c(.70,.20),
axis.title.y = element_text(angle = 0))
|
/Retos/Reto 1/Código/Intersección.R
|
no_license
|
AngieTatianaP/Analisis_Numerico_AngieTatianaP
|
R
| false
| false
| 1,636
|
r
| ||
require(Matrix)
n=10
m=6
Matrix_ini <- matrix(seq(n*m),nrow = n,ncol = m)
# funcio_a_aplicar <- get('*')
i=1
get_cross_function <- function(Matrix_ini, function_to_aplly="I",...){
function_to_aplly <- get(function_to_aplly)
for(i in 1:(floor(ncol(Matrix_ini)/2)+1) ){
if(i==1){
result <-NULL
permutation <- as(as.integer( c(ncol(Matrix_ini),1:(ncol(Matrix_ini)-1) ) ),"pMatrix")
permutation_matrix <- as(as.integer( c(1:ncol(Matrix_ini) ) ),"pMatrix")
}
result <- cbind(result, function_to_aplly( Matrix_ini * (Matrix_ini %*% permutation_matrix),...=...))
permutation_matrix <- permutation_matrix %*% permutation
}
return (result)
}
get_cross_function(Matrix_ini)
get_cross_function(Matrix_ini,'^',2)
get_cross_function(Matrix_ini,'^',1/2)
|
/cross function.R
|
no_license
|
jpuigde/PruebasPackages
|
R
| false
| false
| 798
|
r
|
require(Matrix)
n=10
m=6
Matrix_ini <- matrix(seq(n*m),nrow = n,ncol = m)
# funcio_a_aplicar <- get('*')
i=1
get_cross_function <- function(Matrix_ini, function_to_aplly="I",...){
function_to_aplly <- get(function_to_aplly)
for(i in 1:(floor(ncol(Matrix_ini)/2)+1) ){
if(i==1){
result <-NULL
permutation <- as(as.integer( c(ncol(Matrix_ini),1:(ncol(Matrix_ini)-1) ) ),"pMatrix")
permutation_matrix <- as(as.integer( c(1:ncol(Matrix_ini) ) ),"pMatrix")
}
result <- cbind(result, function_to_aplly( Matrix_ini * (Matrix_ini %*% permutation_matrix),...=...))
permutation_matrix <- permutation_matrix %*% permutation
}
return (result)
}
get_cross_function(Matrix_ini)
get_cross_function(Matrix_ini,'^',2)
get_cross_function(Matrix_ini,'^',1/2)
|
make_soil_inorganic_n_pool <- function(n_conc,
bk_density) {
# obtain ring averaged soil bulk density (0 - 10 cm only)
bk_density <- subset(bk_density, Depth == "0-10cm")
# assign bulk density onto each ring and each depth
for (i in 1:6) {
n_conc[n_conc$Ring == i, "bk_kg_m3"] <- bk_density[bk_density$ring == i, "bulk_density_kg_m3"]
}
# calculate total N in top 10cm of soil (hence the * 0.1), unit kg m-2
n_conc$total_kg_m2 <- n_conc$Total_PercN * n_conc$bk_kg_m3 * 0.1 / 100
n_conc$nitr_kg_m2 <- n_conc$Nitrate_PercN * n_conc$bk_kg_m3 * 0.1 / 100
n_conc$ammo_kg_m2 <- n_conc$Ammonium_PercN * n_conc$bk_kg_m3 * 0.1 / 100
# return in unit of g/m2
n_conc$nitr_g_m2 <-n_conc$nitr_kg_m2 * 10^3
n_conc$ammo_g_m2 <-n_conc$ammo_kg_m2 * 10^3
n_conc$total_g_m2 <-n_conc$total_kg_m2 * 10^3
myDF.out <- n_conc[,c("Date", "Ring", "nitr_g_m2", "ammo_g_m2", "total_g_m2")]
colnames(myDF.out) <- c("Date", "Ring", "nitrate_pool", "ammonium_pool", "total_inorganic_pool")
return(myDF.out)
}
|
/modules/n_pools_and_fluxes/soil_inorganic_n_pool/make_soil_inorganic_n_pool.R
|
no_license
|
SoilTSSM/EucFACE_P_synthesis
|
R
| false
| false
| 1,117
|
r
|
make_soil_inorganic_n_pool <- function(n_conc,
bk_density) {
# obtain ring averaged soil bulk density (0 - 10 cm only)
bk_density <- subset(bk_density, Depth == "0-10cm")
# assign bulk density onto each ring and each depth
for (i in 1:6) {
n_conc[n_conc$Ring == i, "bk_kg_m3"] <- bk_density[bk_density$ring == i, "bulk_density_kg_m3"]
}
# calculate total N in top 10cm of soil (hence the * 0.1), unit kg m-2
n_conc$total_kg_m2 <- n_conc$Total_PercN * n_conc$bk_kg_m3 * 0.1 / 100
n_conc$nitr_kg_m2 <- n_conc$Nitrate_PercN * n_conc$bk_kg_m3 * 0.1 / 100
n_conc$ammo_kg_m2 <- n_conc$Ammonium_PercN * n_conc$bk_kg_m3 * 0.1 / 100
# return in unit of g/m2
n_conc$nitr_g_m2 <-n_conc$nitr_kg_m2 * 10^3
n_conc$ammo_g_m2 <-n_conc$ammo_kg_m2 * 10^3
n_conc$total_g_m2 <-n_conc$total_kg_m2 * 10^3
myDF.out <- n_conc[,c("Date", "Ring", "nitr_g_m2", "ammo_g_m2", "total_g_m2")]
colnames(myDF.out) <- c("Date", "Ring", "nitrate_pool", "ammonium_pool", "total_inorganic_pool")
return(myDF.out)
}
|
testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682962941L, 546746628L, 861686L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853834136L, 2030715618L, -1261966754L, -129171080L, -642760964L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_depth/AFL_rcpp_depth/rcpp_depth_valgrind_files/1609856742-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 713
|
r
|
testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682962941L, 546746628L, 861686L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853834136L, 2030715618L, -1261966754L, -129171080L, -642760964L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result)
|
---
title: "Data Wrangling"
author: "Kexin Fei"
date: "2/9/2018"
output: pdf_document
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
library(dplyr)
library(knitr)
```
```{r}
setwd("~/Desktop/Data Challenge/Cyence/cyence_data_exercise_take_home")
df = read.csv("data_perm_take_home.csv")
dff = as.tibble(df)
df1 = dff %>%
select(-case_number) %>%
filter(!is.na(wage_offer))%>%
filter(country %in% c("Mexico", "Afghanistan")) %>%
mutate(decision_date = as.character(decision_date))%>%
mutate(decision_date = as.Date(decision_date, "%m/%d/%y"))%>%
filter(!is.na(case_received_date))%>%
mutate(duration = as.numeric(difftime(decision_date, case_received_date, units = c("days"))))%>%
mutate(wage_unit = ifelse(as.character(wage_unit) == "", "Year", as.character(wage_unit)))%>%
mutate(year_length = as.numeric(2018-employer_yr_established))%>%
mutate(job_level = ifelse(is.na(job_level),"other",job_level))
df1[df1$job_education == "", "job_education"] = "NA"
df1[df1$wage_unit == "Hour",'wage_offer']=40*52*df1[df1$wage_unit == "Hour",'wage_offer']
df1[df1$wage_unit == "Week",'wage_offer']=52*df1[df1$wage_unit == "Week",'wage_offer']
df1[df1$wage_unit == "Month",'wage_offer']=12*df1[df1$wage_unit == "Month",'wage_offer']
```
select()
filter()
group_by()
summarize()
mutate()
arrange(desc(year), continent)
#sort the rows in our data frame according to values in a certain column. We can use the arrange() function to do this. For instance, let's organize our rows by year (recent first), and then by continent.
```{r dplyr}
df %>%
group_by(id) %>%
mutate(cumsum = cumsum(value))
wrong = data_indeed %>%
select(advertiser_id, date_assignment_starts, date_assignment_ends, assign_days, revenue) %>%
slice(which(data_indeed$assign_days<0)) %>%
distinct()
wrong %>%
head() %>%
kable(caption = "Data Points with Wrong Dates")
```
```{r Date conversion, echo=FALSE}
df1 %>%
select(decision_date) %>%
mutate(decision_date = as.Date(decision_date, "%m/%d/%y"))%>%
mutate(week = week(decision_date)) %>%
mutate(month = month(decision_date)) %>%
mutate(day_of_week = weekdays(decision_date)) %>%
arrange(desc(month))
```
```{r summarize, echo=FALSE}
starwars %>%
group_by(species) %>%
summarise(n = n(),
mass = mean(mass, na.rm = TRUE)) %>%
filter(n > 1)
```
```{r as function, echo=FALSE}
#https://cran.r-project.org/web/packages/dplyr/vignettes/programming.html
library(tibble)
as.tibble(iris)
my_summarise <- function(df, expr1, expr2) {
expr1 <- enquo(expr1) #turns an argument into a string
expr2 <- enquo(expr2)
df %>%
group_by(!!expr1) %>%
summarise(
mean = mean(!!expr2),
sum = sum(!!expr2),
n = n()
)
}
x = my_summarise(iris, Species, Sepal.Width)
# To removing grouping, use ungroup
x %>%
ungroup() %>%
summarise(n = sum(n))
#Multiple variables in argument=================================================================
my_summarise <- function(df, ...) {
group_var <- quos(...)
df %>%
group_by(!!!group_var) %>% # takes a list of elements and splices them into to the current call
summarise(a = mean(a))
}
my_summarise(df, g1, g2)
```
```{r Consecutive/Rolling sums, echo=FALSE}
library(zoo)
x = c(1, 2, 3, 4, 5, 6)
rollapply(x, 3, sum)
```
|
/dplyr.R
|
no_license
|
kexinfei/DataWrangling
|
R
| false
| false
| 3,318
|
r
|
---
title: "Data Wrangling"
author: "Kexin Fei"
date: "2/9/2018"
output: pdf_document
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
library(dplyr)
library(knitr)
```
```{r}
setwd("~/Desktop/Data Challenge/Cyence/cyence_data_exercise_take_home")
df = read.csv("data_perm_take_home.csv")
dff = as.tibble(df)
df1 = dff %>%
select(-case_number) %>%
filter(!is.na(wage_offer))%>%
filter(country %in% c("Mexico", "Afghanistan")) %>%
mutate(decision_date = as.character(decision_date))%>%
mutate(decision_date = as.Date(decision_date, "%m/%d/%y"))%>%
filter(!is.na(case_received_date))%>%
mutate(duration = as.numeric(difftime(decision_date, case_received_date, units = c("days"))))%>%
mutate(wage_unit = ifelse(as.character(wage_unit) == "", "Year", as.character(wage_unit)))%>%
mutate(year_length = as.numeric(2018-employer_yr_established))%>%
mutate(job_level = ifelse(is.na(job_level),"other",job_level))
df1[df1$job_education == "", "job_education"] = "NA"
df1[df1$wage_unit == "Hour",'wage_offer']=40*52*df1[df1$wage_unit == "Hour",'wage_offer']
df1[df1$wage_unit == "Week",'wage_offer']=52*df1[df1$wage_unit == "Week",'wage_offer']
df1[df1$wage_unit == "Month",'wage_offer']=12*df1[df1$wage_unit == "Month",'wage_offer']
```
select()
filter()
group_by()
summarize()
mutate()
arrange(desc(year), continent)
#sort the rows in our data frame according to values in a certain column. We can use the arrange() function to do this. For instance, let's organize our rows by year (recent first), and then by continent.
```{r dplyr}
df %>%
group_by(id) %>%
mutate(cumsum = cumsum(value))
wrong = data_indeed %>%
select(advertiser_id, date_assignment_starts, date_assignment_ends, assign_days, revenue) %>%
slice(which(data_indeed$assign_days<0)) %>%
distinct()
wrong %>%
head() %>%
kable(caption = "Data Points with Wrong Dates")
```
```{r Date conversion, echo=FALSE}
df1 %>%
select(decision_date) %>%
mutate(decision_date = as.Date(decision_date, "%m/%d/%y"))%>%
mutate(week = week(decision_date)) %>%
mutate(month = month(decision_date)) %>%
mutate(day_of_week = weekdays(decision_date)) %>%
arrange(desc(month))
```
```{r summarize, echo=FALSE}
starwars %>%
group_by(species) %>%
summarise(n = n(),
mass = mean(mass, na.rm = TRUE)) %>%
filter(n > 1)
```
```{r as function, echo=FALSE}
#https://cran.r-project.org/web/packages/dplyr/vignettes/programming.html
library(tibble)
as.tibble(iris)
my_summarise <- function(df, expr1, expr2) {
expr1 <- enquo(expr1) #turns an argument into a string
expr2 <- enquo(expr2)
df %>%
group_by(!!expr1) %>%
summarise(
mean = mean(!!expr2),
sum = sum(!!expr2),
n = n()
)
}
x = my_summarise(iris, Species, Sepal.Width)
# To removing grouping, use ungroup
x %>%
ungroup() %>%
summarise(n = sum(n))
#Multiple variables in argument=================================================================
my_summarise <- function(df, ...) {
group_var <- quos(...)
df %>%
group_by(!!!group_var) %>% # takes a list of elements and splices them into to the current call
summarise(a = mean(a))
}
my_summarise(df, g1, g2)
```
```{r Consecutive/Rolling sums, echo=FALSE}
library(zoo)
x = c(1, 2, 3, 4, 5, 6)
rollapply(x, 3, sum)
```
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lakeformation_operations.R
\name{lakeformation_list_permissions}
\alias{lakeformation_list_permissions}
\title{Returns a list of the principal permissions on the resource, filtered by
the permissions of the caller}
\usage{
lakeformation_list_permissions(CatalogId, Principal, ResourceType,
Resource, NextToken, MaxResults)
}
\arguments{
\item{CatalogId}{The identifier for the Data Catalog. By default, the account ID. The
Data Catalog is the persistent metadata store. It contains database
definitions, table definitions, and other control information to manage
your AWS Lake Formation environment.}
\item{Principal}{Specifies a principal to filter the permissions returned.}
\item{ResourceType}{Specifies a resource type to filter the permissions returned.}
\item{Resource}{A resource where you will get a list of the principal permissions.
This operation does not support getting privileges on a table with
columns. Instead, call this operation on the table, and the operation
returns the table and the table w columns.}
\item{NextToken}{A continuation token, if this is not the first call to retrieve this
list.}
\item{MaxResults}{The maximum number of results to return.}
}
\description{
Returns a list of the principal permissions on the resource, filtered by
the permissions of the caller. For example, if you are granted an ALTER
permission, you are able to see only the principal permissions for
ALTER.
}
\details{
This operation returns only those permissions that have been explicitly
granted.
For information about permissions, see Security and Access Control to
Metadata and Data.
}
\section{Request syntax}{
\preformatted{svc$list_permissions(
CatalogId = "string",
Principal = list(
DataLakePrincipalIdentifier = "string"
),
ResourceType = "CATALOG"|"DATABASE"|"TABLE"|"DATA_LOCATION",
Resource = list(
Catalog = list(),
Database = list(
CatalogId = "string",
Name = "string"
),
Table = list(
CatalogId = "string",
DatabaseName = "string",
Name = "string",
TableWildcard = list()
),
TableWithColumns = list(
CatalogId = "string",
DatabaseName = "string",
Name = "string",
ColumnNames = list(
"string"
),
ColumnWildcard = list(
ExcludedColumnNames = list(
"string"
)
)
),
DataLocation = list(
CatalogId = "string",
ResourceArn = "string"
)
),
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
/paws/man/lakeformation_list_permissions.Rd
|
permissive
|
jcheng5/paws
|
R
| false
| true
| 2,589
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lakeformation_operations.R
\name{lakeformation_list_permissions}
\alias{lakeformation_list_permissions}
\title{Returns a list of the principal permissions on the resource, filtered by
the permissions of the caller}
\usage{
lakeformation_list_permissions(CatalogId, Principal, ResourceType,
Resource, NextToken, MaxResults)
}
\arguments{
\item{CatalogId}{The identifier for the Data Catalog. By default, the account ID. The
Data Catalog is the persistent metadata store. It contains database
definitions, table definitions, and other control information to manage
your AWS Lake Formation environment.}
\item{Principal}{Specifies a principal to filter the permissions returned.}
\item{ResourceType}{Specifies a resource type to filter the permissions returned.}
\item{Resource}{A resource where you will get a list of the principal permissions.
This operation does not support getting privileges on a table with
columns. Instead, call this operation on the table, and the operation
returns the table and the table w columns.}
\item{NextToken}{A continuation token, if this is not the first call to retrieve this
list.}
\item{MaxResults}{The maximum number of results to return.}
}
\description{
Returns a list of the principal permissions on the resource, filtered by
the permissions of the caller. For example, if you are granted an ALTER
permission, you are able to see only the principal permissions for
ALTER.
}
\details{
This operation returns only those permissions that have been explicitly
granted.
For information about permissions, see Security and Access Control to
Metadata and Data.
}
\section{Request syntax}{
\preformatted{svc$list_permissions(
CatalogId = "string",
Principal = list(
DataLakePrincipalIdentifier = "string"
),
ResourceType = "CATALOG"|"DATABASE"|"TABLE"|"DATA_LOCATION",
Resource = list(
Catalog = list(),
Database = list(
CatalogId = "string",
Name = "string"
),
Table = list(
CatalogId = "string",
DatabaseName = "string",
Name = "string",
TableWildcard = list()
),
TableWithColumns = list(
CatalogId = "string",
DatabaseName = "string",
Name = "string",
ColumnNames = list(
"string"
),
ColumnWildcard = list(
ExcludedColumnNames = list(
"string"
)
)
),
DataLocation = list(
CatalogId = "string",
ResourceArn = "string"
)
),
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
# Step 0 - prep env -------------------------------------------------------
# Step 1 - make plot ------------------------------------------------------
weekly_price_to_share <-
coffee_clean %>%
group_by(relweek, shop_desc_clean) %>%
summarise(sales = sum(packs),
price = mean(price)) %>%
group_by(relweek) %>%
mutate(share = sales/sum(sales),
price = price,
shop_desc_clean = toproper(shop_desc_clean),
shop_desc_clean = ifelse(shop_desc_clean == "Aldi & lidl", "Aldi & Lidl",
shop_desc_clean)) %>%
ungroup() %>%
group_by(shop_desc_clean) %>%
mutate(relweek = row_number()) %>%
ggplot(aes(x = price, y = share)) +
geom_point(aes(colour = shop_desc_clean), size = 2.5, alpha = .75) +
geom_smooth(aes(colour = shop_desc_clean), method = "lm", formula = y ~ x + poly(x, 2)) +
facet_grid(. ~ shop_desc_clean, scales = "free_x") +
scale_colour_brewer(palette = "Dark2", type = "qual") +
scale_y_continuous(labels = scales::percent) +
xlab("Average price (£)") +
ylab("Market Share") +
guides(colour = "none") +
theme_jim
|
/retail/project/r/files/504_vis_weekly_price_to_share.R
|
no_license
|
Busyclover/icl
|
R
| false
| false
| 1,137
|
r
|
# Step 0 - prep env -------------------------------------------------------
# Step 1 - make plot ------------------------------------------------------
weekly_price_to_share <-
coffee_clean %>%
group_by(relweek, shop_desc_clean) %>%
summarise(sales = sum(packs),
price = mean(price)) %>%
group_by(relweek) %>%
mutate(share = sales/sum(sales),
price = price,
shop_desc_clean = toproper(shop_desc_clean),
shop_desc_clean = ifelse(shop_desc_clean == "Aldi & lidl", "Aldi & Lidl",
shop_desc_clean)) %>%
ungroup() %>%
group_by(shop_desc_clean) %>%
mutate(relweek = row_number()) %>%
ggplot(aes(x = price, y = share)) +
geom_point(aes(colour = shop_desc_clean), size = 2.5, alpha = .75) +
geom_smooth(aes(colour = shop_desc_clean), method = "lm", formula = y ~ x + poly(x, 2)) +
facet_grid(. ~ shop_desc_clean, scales = "free_x") +
scale_colour_brewer(palette = "Dark2", type = "qual") +
scale_y_continuous(labels = scales::percent) +
xlab("Average price (£)") +
ylab("Market Share") +
guides(colour = "none") +
theme_jim
|
library(RLLVMCompile)
f = function(n) {
ctr = 0L
for(i in 1:n)
for(j in 1:i) {
if(i == j)
printf("diagonal %d\n", i)
ctr = ctr + 1L
}
ctr
}
fc = compileFunction(f, Int32Type, Int32Type)
stopifnot(.llvm(fc, 10) == 55L)
cat("okay\n")
|
/tests/nestedLoops2.R
|
no_license
|
duncantl/RLLVMCompile
|
R
| false
| false
| 286
|
r
|
library(RLLVMCompile)
f = function(n) {
ctr = 0L
for(i in 1:n)
for(j in 1:i) {
if(i == j)
printf("diagonal %d\n", i)
ctr = ctr + 1L
}
ctr
}
fc = compileFunction(f, Int32Type, Int32Type)
stopifnot(.llvm(fc, 10) == 55L)
cat("okay\n")
|
## Figure 1 ----
library(tidyverse)
library(survival)
library(broom)
library(ggfortify)
library(ggbeeswarm)
library(scico)
library(scales)
library(ggpubr)
library(cowplot)
source(file.path("functions_dataset_1" , "clean_data.R"))
# load data
total <- clean_data()
## Statistics ----
# Summarize Lifetime data by experiment and specified additional column
mean_lifetime_by_experiment <- function(total_data, by_column) {
total_data %>%
group_by(Culture, {{by_column}}) %>%
mutate(Lifetime_trans = 1/Lifetime) %>% # to adjust for right-skewed data
summarise(Lifetime = mean(Lifetime_trans)) %>%
ungroup()
}
## raw lifetimes not accounting for right-censoring of data - not included in paper anymore -
## A) Genotype --
# Mean lifetime per experiment
# genotype_exp <- mean_lifetime_by_experiment(total, Genotype)
#
#
# # Testing model assumptions
# m0_geno <- aov(Lifetime ~ Genotype, data = genotype_exp)
# autoplot(m0_geno, which = 1:4, ncol = 2, label.size = 3, colour = "Genotype")
# shapiro.test(m0_geno$residuals)
# car::leveneTest(m0_geno)
#
# # Testing with Welch's test (non-equal variances)
# m0_genotype <- compare_means(Lifetime ~ Genotype,
# data = genotype_exp, method = "t.test")
#
# not performed anymore
# ## E) Location --
# location_exp <- mean_lifetime_by_experiment(total, Location)
#
#
# # Testing model assumptions
# m0_loc <- aov(Lifetime ~ Location, data = location_exp)
# autoplot(m0_loc, which = 1:4, ncol = 2, label.size = 3, colour = "Location")
#
#
# # Testing with Welch's test (non-equal variances)
# m0_location <- compare_means(Lifetime ~ Location,
# data = filter(location_exp, Location != "Unclear"), method = "t.test")
#
#
#
# ## I) Branchtype --
# branchtype_exp <- mean_lifetime_by_experiment(total, Branchtype)
#
#
# # Testing model assumptions
# m0_btype <- aov(Lifetime ~ Branchtype, data = branchtype_exp)
# autoplot(m0_btype, which = 1:4, ncol = 2, label.size = 3, colour = "Branchtype")
#
#
# # Post Hoc comparisons: pairwise Welch's test (non-equal variances)
# m0_branchtype <- compare_means(Lifetime ~ Branchtype,
# data = branchtype_exp, method = "t.test",
# p.adjust.method = "holm")
#
#
### Survival analysis
## c) Genotype --
m_genotype <- coxph(Surv(Lifetime, CompleteData) ~ Genotype,
weights = Prob,
data = total)
# Diagnostics
diag_surv_genotype<- cox.zph(m_genotype)
survminer::ggcoxzph(diag_surv_genotype) # Schoenfeld test
survminer::ggcoxdiagnostics(m_genotype, type = "deviance", linear.predictions = FALSE) #
# Results table
tidy(m_genotype, exponentiate = T)
## G) Location --
m_location <- coxph(Surv(Lifetime, CompleteData) ~ Location ,
weights = Prob,
data = total)
# Diagnostics
diag_surv_location<- cox.zph(m_location)
survminer::ggcoxzph(diag_surv_location) # Schoenfeld test
survminer::ggcoxdiagnostics(m_location, type = "deviance", linear.predictions = FALSE) #
# Results table
tidy(m_location, exponentiate = T)
## K) Branchtype -- (now moved to Figure 2)
m_branchtype <- coxph(Surv(Lifetime, CompleteData) ~ Branchtype,
weights = Prob,
data = total)
# Diagnostics
diag_surv_branchtype<- cox.zph(m_branchtype)
survminer::ggcoxzph(diag_surv_branchtype) # Schoenfeld test
survminer::ggcoxdiagnostics(m_branchtype, type = "deviance", linear.predictions = FALSE) #
# Results table
tidy(m_branchtype, exponentiate = T)
## Plotting setup ----
Branchtheme <- theme_minimal() +
theme(plot.title = element_text(face = "bold"),
plot.title.position = "plot", plot.caption.position = "plot",
legend.position = "none", legend.justification = "top",
panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank()
#panel.grid.major.x = element_blank(),
#text = element_text(family = "Source Sans Pro")
)
## Colors from scico-package --
# Crameri, F. (2018). Scientific colour maps. Zenodo.
# http://doi.org/10.5281/zenodo.1243862
# for Genotype & Location
sci_pal = "batlow"
# For Branchtypes
sci_pal2 <- "roma"
## custom plotting functions --
# Lifetime beeswarm superplot
# Lord, SL et al. (2020) SuperPlots: Communicating reproducibility and variability in cell biology.
# J Cell Biol 1 June 2020; doi: https://doi.org/10.1083/jcb.202001064)
#
### plot not included anymore
#
# lifetime_swarm <- function(per_exp, total, X_axis) {
# ggplot(per_exp, aes(x = {{X_axis}}, y = 1/Lifetime, color = {{X_axis}})) + # 1/Lifetime to reverse normalization for stats
# geom_point() +
#
# stat_summary(fun = mean, geom = "crossbar", color = "black", width = 0.6, size = 0.3) +
# stat_summary(fun.data = "mean_se", geom = "errorbar", color = "black", width = 0.4, size = 0.3) +
#
# geom_quasirandom(aes(y = Lifetime), data = {{total}}, alpha = 0.01, shape = 16) + # non-collapsing
# Branchtheme + theme(panel.grid.major.x = element_blank(),
# ) +
#
# scale_y_continuous(breaks = c(0, 6, 12, 18, 24),
# labels = c("0 h", "6 h", "12 h", "18 h", "24 h")) +
#
# labs(
# x = "",
# y = "Lifetime per branch"
# )
# }
#
# Scatterplot highlighting each indiviual branch event, colored by specified column
lifetime_scatter <- function(total, Group) {
ggplot({{total}}, aes(x = Collapse, y = Formation, color = {{Group}})) +
geom_point(alpha = 0.1, shape = 16) +
Branchtheme +
labs(
x = "Timepoint of Collapse",
y = "Timepoint of Formation"
) +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24),
labels = c("0 h", "6 h", "12 h", "18 h", "24 h")) +
scale_y_continuous(breaks = c(0, 6, 12, 18, 24),
labels = c("0 h", "6 h", "12 h", "18 h", "24 h"))
}
## Survival curve with CI
survival_CI <- function(data) {
ggplot(data = {{data}}, aes(x = time, color = set, fill = set)) +
geom_step(aes(y = estimate), size = 1) +
geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = 0.3, color = NA) +
Branchtheme +
scale_y_continuous(labels = scales::percent,
limits = c(0,1),
#expand = expansion(mult = c(0.01, 0.1))
) +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24),
labels = c("0 h", "6 h", "12 h", "18 h", "24 h"),
#limits = c(0,24),
#expand = expansion(mult = c(0.01, 0.05))
) +
labs(x = "Follow up time",
y = " Survival")
}
## A: WT vs. KO -----
# lifetime beeswarm -- not included anymore
#
# A1 <- lifetime_swarm(genotype_exp, total, Genotype) +
#
# stat_pvalue_manual(m0_genotype, label = "p.signif",
# y = 23, tip.length = 0) +
#
# scale_color_scico_d(palette = sci_pal, begin = 0, end = 0.7)
#
#
# total overview axon vs. dendrite by Branchtypes and Genotype
A2 <- lifetime_scatter(total, Genotype) +
scale_color_scico_d(palette = sci_pal, begin = 0, end = 0.7)
#A3c: CoxPH + Confidence intervals
df_Genotype <- data.frame(Genotype = factor(c("Plppr3 +/+", "Plppr3 -/-"), levels=levels(total$Genotype)))
CI_genotype <- tidy(survfit(m_genotype, newdata=df_Genotype)) %>%
pivot_longer(!time:n.censor,
names_to = c(".value", "set"),
names_pattern = "(.+).(.+)$")
(A3c <- survival_CI(CI_genotype) +
scale_color_scico_d(palette = sci_pal, begin = 0, end = 0.7) +
scale_fill_scico_d(palette = sci_pal, begin = 0, end = 0.7)
)
## Neurite type ----
# # not included anymore
# (B1 <- lifetime_swarm(filter(location_exp, Location != "Unclear"),
# filter(total, Location != "Unclear"),
# Location) +
# stat_pvalue_manual(m0_location, label = "p.signif",
# y = 23, tip.length = 0) +
# scale_color_scico_d(palette = sci_pal, begin = 0.25, end = 0.5))
# total overview axon vs. dendrite by Branchtypes and Genotype
B2 <- lifetime_scatter(filter(total, Location != "Unclear"), Location) +
scale_color_scico_d(palette = sci_pal, begin = 0.25, end = 0.5)
#B3c: CoxPH + CIst
df_Location <- data.frame(Location = factor(c("Axon", "Neurite"), levels=levels(total$Location)))
CI_location <- tidy(survfit(m_location, newdata=df_Location)) %>%
pivot_longer(!time:n.censor,
names_to = c(".value", "set"),
names_pattern = "(.+).(.+)$")
(B3c <- survival_CI(CI_location) +
scale_color_scico_d(palette = sci_pal, begin = 0.25, end = 0.5) +
scale_fill_scico_d(palette = sci_pal, begin = 0.25, end = 0.5)
)
## Branchtype ----
# not included anymore
#
# (C1 <- lifetime_swarm(branchtype_exp,
# total,
# Branchtype) +
# stat_pvalue_manual(m0_branchtype, label = "p.signif",
# y = c(21, 22.5, 24, 18, 19.5 ), tip.length = 0, hide.ns = T) +
#
# scale_color_scico_d(palette = sci_pal2, begin = 0, end = 1) +
# theme(axis.text.x = element_text(angle = 45, hjust = 1))
# )
# total overview axon vs. dendrite by Branchtypes (now Figure 2)
(C2 <- lifetime_scatter(total, Branchtype) +
scale_color_scico_d(palette = sci_pal2, begin = 0, end = 1))
#B3c: CoxPH + CIst
df_type <- data.frame(Branchtype = factor(c("Filopodium", "Mixed", "Lamellipodium", "Splitting"),
levels=levels(total$Branchtype)),
set = as.character(1: length(levels(total$Branchtype))))
CI_type <- tidy(survfit(m_branchtype, newdata=df_type)) %>%
pivot_longer(!time:n.censor,
names_to = c(".value", "set"),
names_pattern = "(.+).(.+)$") %>%
left_join(df_type)
(C3c <- survival_CI(CI_type) +
scale_color_scico_d(palette = sci_pal2, begin = 0, end = 1) +
scale_fill_scico_d(palette = sci_pal2, begin = 0, end = 1))
# Load DAG .png files
A4 <- ggdraw() + draw_image(file.path("figures", "figure_1","PLPPR3.png"), scale = 0.9)
B4 <- ggdraw() + draw_image(file.path("figures", "figure_1","Location.png"), scale = 0.9)
C4 <- ggdraw() + draw_image(file.path("figures", "figure_1","Type.png"), scale = 0.9)
## Merge it in one figure ----
A <- plot_grid(A2, A3c, A4, scale = 0.9, labels = c("A", "B", "C"),
rel_widths = c(2, 2, 1), nrow = 1, align = "h", axis = "bt")
B <- plot_grid(B2, B3c, B4, scale = 0.9, labels = c("D", "E", "F"),
rel_widths = c(2, 2, 1), nrow = 1, align = "h", axis = "bt")
# now Figure 2
C <- plot_grid(C2, C3c, C4, scale = 0.9, labels = c("G", "H", "I"),
rel_widths = c(2, 2, 1), nrow = 1, align = "h", axis = "bt")
Fig_1 <- plot_grid(A, B, C, nrow = 3, rel_heights = c(1, 1, 1.2))
ggsave(file.path("figures", "figure_1", "Fig_1_raw.png"), Fig_1, device = "png", scale = 1, width = 210, height = 210, units = "mm" )
ggsave(file.path("figures", "figure_1", "Fig_1.pdf"), Fig_1, device = "pdf", scale = 1, width = 210, height = 210, units = "mm" )
|
/Figure_1.R
|
permissive
|
jo-fuchs/Branch-Lifetime-PRG2
|
R
| false
| false
| 11,174
|
r
|
## Figure 1 ----
library(tidyverse)
library(survival)
library(broom)
library(ggfortify)
library(ggbeeswarm)
library(scico)
library(scales)
library(ggpubr)
library(cowplot)
source(file.path("functions_dataset_1" , "clean_data.R"))
# load data
total <- clean_data()
## Statistics ----
# Summarize Lifetime data by experiment and specified additional column
mean_lifetime_by_experiment <- function(total_data, by_column) {
total_data %>%
group_by(Culture, {{by_column}}) %>%
mutate(Lifetime_trans = 1/Lifetime) %>% # to adjust for right-skewed data
summarise(Lifetime = mean(Lifetime_trans)) %>%
ungroup()
}
## raw lifetimes not accounting for right-censoring of data - not included in paper anymore -
## A) Genotype --
# Mean lifetime per experiment
# genotype_exp <- mean_lifetime_by_experiment(total, Genotype)
#
#
# # Testing model assumptions
# m0_geno <- aov(Lifetime ~ Genotype, data = genotype_exp)
# autoplot(m0_geno, which = 1:4, ncol = 2, label.size = 3, colour = "Genotype")
# shapiro.test(m0_geno$residuals)
# car::leveneTest(m0_geno)
#
# # Testing with Welch's test (non-equal variances)
# m0_genotype <- compare_means(Lifetime ~ Genotype,
# data = genotype_exp, method = "t.test")
#
# not performed anymore
# ## E) Location --
# location_exp <- mean_lifetime_by_experiment(total, Location)
#
#
# # Testing model assumptions
# m0_loc <- aov(Lifetime ~ Location, data = location_exp)
# autoplot(m0_loc, which = 1:4, ncol = 2, label.size = 3, colour = "Location")
#
#
# # Testing with Welch's test (non-equal variances)
# m0_location <- compare_means(Lifetime ~ Location,
# data = filter(location_exp, Location != "Unclear"), method = "t.test")
#
#
#
# ## I) Branchtype --
# branchtype_exp <- mean_lifetime_by_experiment(total, Branchtype)
#
#
# # Testing model assumptions
# m0_btype <- aov(Lifetime ~ Branchtype, data = branchtype_exp)
# autoplot(m0_btype, which = 1:4, ncol = 2, label.size = 3, colour = "Branchtype")
#
#
# # Post Hoc comparisons: pairwise Welch's test (non-equal variances)
# m0_branchtype <- compare_means(Lifetime ~ Branchtype,
# data = branchtype_exp, method = "t.test",
# p.adjust.method = "holm")
#
#
### Survival analysis
## c) Genotype --
m_genotype <- coxph(Surv(Lifetime, CompleteData) ~ Genotype,
weights = Prob,
data = total)
# Diagnostics
diag_surv_genotype<- cox.zph(m_genotype)
survminer::ggcoxzph(diag_surv_genotype) # Schoenfeld test
survminer::ggcoxdiagnostics(m_genotype, type = "deviance", linear.predictions = FALSE) #
# Results table
tidy(m_genotype, exponentiate = T)
## G) Location --
m_location <- coxph(Surv(Lifetime, CompleteData) ~ Location ,
weights = Prob,
data = total)
# Diagnostics
diag_surv_location<- cox.zph(m_location)
survminer::ggcoxzph(diag_surv_location) # Schoenfeld test
survminer::ggcoxdiagnostics(m_location, type = "deviance", linear.predictions = FALSE) #
# Results table
tidy(m_location, exponentiate = T)
## K) Branchtype -- (now moved to Figure 2)
m_branchtype <- coxph(Surv(Lifetime, CompleteData) ~ Branchtype,
weights = Prob,
data = total)
# Diagnostics
diag_surv_branchtype<- cox.zph(m_branchtype)
survminer::ggcoxzph(diag_surv_branchtype) # Schoenfeld test
survminer::ggcoxdiagnostics(m_branchtype, type = "deviance", linear.predictions = FALSE) #
# Results table
tidy(m_branchtype, exponentiate = T)
## Plotting setup ----
Branchtheme <- theme_minimal() +
theme(plot.title = element_text(face = "bold"),
plot.title.position = "plot", plot.caption.position = "plot",
legend.position = "none", legend.justification = "top",
panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank()
#panel.grid.major.x = element_blank(),
#text = element_text(family = "Source Sans Pro")
)
## Colors from scico-package --
# Crameri, F. (2018). Scientific colour maps. Zenodo.
# http://doi.org/10.5281/zenodo.1243862
# for Genotype & Location
sci_pal = "batlow"
# For Branchtypes
sci_pal2 <- "roma"
## custom plotting functions --
# Lifetime beeswarm superplot
# Lord, SL et al. (2020) SuperPlots: Communicating reproducibility and variability in cell biology.
# J Cell Biol 1 June 2020; doi: https://doi.org/10.1083/jcb.202001064)
#
### plot not included anymore
#
# lifetime_swarm <- function(per_exp, total, X_axis) {
# ggplot(per_exp, aes(x = {{X_axis}}, y = 1/Lifetime, color = {{X_axis}})) + # 1/Lifetime to reverse normalization for stats
# geom_point() +
#
# stat_summary(fun = mean, geom = "crossbar", color = "black", width = 0.6, size = 0.3) +
# stat_summary(fun.data = "mean_se", geom = "errorbar", color = "black", width = 0.4, size = 0.3) +
#
# geom_quasirandom(aes(y = Lifetime), data = {{total}}, alpha = 0.01, shape = 16) + # non-collapsing
# Branchtheme + theme(panel.grid.major.x = element_blank(),
# ) +
#
# scale_y_continuous(breaks = c(0, 6, 12, 18, 24),
# labels = c("0 h", "6 h", "12 h", "18 h", "24 h")) +
#
# labs(
# x = "",
# y = "Lifetime per branch"
# )
# }
#
# Scatterplot highlighting each indiviual branch event, colored by specified column
lifetime_scatter <- function(total, Group) {
ggplot({{total}}, aes(x = Collapse, y = Formation, color = {{Group}})) +
geom_point(alpha = 0.1, shape = 16) +
Branchtheme +
labs(
x = "Timepoint of Collapse",
y = "Timepoint of Formation"
) +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24),
labels = c("0 h", "6 h", "12 h", "18 h", "24 h")) +
scale_y_continuous(breaks = c(0, 6, 12, 18, 24),
labels = c("0 h", "6 h", "12 h", "18 h", "24 h"))
}
## Survival curve with CI
survival_CI <- function(data) {
ggplot(data = {{data}}, aes(x = time, color = set, fill = set)) +
geom_step(aes(y = estimate), size = 1) +
geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = 0.3, color = NA) +
Branchtheme +
scale_y_continuous(labels = scales::percent,
limits = c(0,1),
#expand = expansion(mult = c(0.01, 0.1))
) +
scale_x_continuous(breaks = c(0, 6, 12, 18, 24),
labels = c("0 h", "6 h", "12 h", "18 h", "24 h"),
#limits = c(0,24),
#expand = expansion(mult = c(0.01, 0.05))
) +
labs(x = "Follow up time",
y = " Survival")
}
## A: WT vs. KO -----
# lifetime beeswarm -- not included anymore
#
# A1 <- lifetime_swarm(genotype_exp, total, Genotype) +
#
# stat_pvalue_manual(m0_genotype, label = "p.signif",
# y = 23, tip.length = 0) +
#
# scale_color_scico_d(palette = sci_pal, begin = 0, end = 0.7)
#
#
# total overview axon vs. dendrite by Branchtypes and Genotype
A2 <- lifetime_scatter(total, Genotype) +
scale_color_scico_d(palette = sci_pal, begin = 0, end = 0.7)
#A3c: CoxPH + Confidence intervals
df_Genotype <- data.frame(Genotype = factor(c("Plppr3 +/+", "Plppr3 -/-"), levels=levels(total$Genotype)))
CI_genotype <- tidy(survfit(m_genotype, newdata=df_Genotype)) %>%
pivot_longer(!time:n.censor,
names_to = c(".value", "set"),
names_pattern = "(.+).(.+)$")
(A3c <- survival_CI(CI_genotype) +
scale_color_scico_d(palette = sci_pal, begin = 0, end = 0.7) +
scale_fill_scico_d(palette = sci_pal, begin = 0, end = 0.7)
)
## Neurite type ----
# # not included anymore
# (B1 <- lifetime_swarm(filter(location_exp, Location != "Unclear"),
# filter(total, Location != "Unclear"),
# Location) +
# stat_pvalue_manual(m0_location, label = "p.signif",
# y = 23, tip.length = 0) +
# scale_color_scico_d(palette = sci_pal, begin = 0.25, end = 0.5))
# total overview axon vs. dendrite by Branchtypes and Genotype
B2 <- lifetime_scatter(filter(total, Location != "Unclear"), Location) +
scale_color_scico_d(palette = sci_pal, begin = 0.25, end = 0.5)
#B3c: CoxPH + CIst
df_Location <- data.frame(Location = factor(c("Axon", "Neurite"), levels=levels(total$Location)))
CI_location <- tidy(survfit(m_location, newdata=df_Location)) %>%
pivot_longer(!time:n.censor,
names_to = c(".value", "set"),
names_pattern = "(.+).(.+)$")
(B3c <- survival_CI(CI_location) +
scale_color_scico_d(palette = sci_pal, begin = 0.25, end = 0.5) +
scale_fill_scico_d(palette = sci_pal, begin = 0.25, end = 0.5)
)
## Branchtype ----
# not included anymore
#
# (C1 <- lifetime_swarm(branchtype_exp,
# total,
# Branchtype) +
# stat_pvalue_manual(m0_branchtype, label = "p.signif",
# y = c(21, 22.5, 24, 18, 19.5 ), tip.length = 0, hide.ns = T) +
#
# scale_color_scico_d(palette = sci_pal2, begin = 0, end = 1) +
# theme(axis.text.x = element_text(angle = 45, hjust = 1))
# )
# total overview axon vs. dendrite by Branchtypes (now Figure 2)
(C2 <- lifetime_scatter(total, Branchtype) +
scale_color_scico_d(palette = sci_pal2, begin = 0, end = 1))
#B3c: CoxPH + CIst
df_type <- data.frame(Branchtype = factor(c("Filopodium", "Mixed", "Lamellipodium", "Splitting"),
levels=levels(total$Branchtype)),
set = as.character(1: length(levels(total$Branchtype))))
CI_type <- tidy(survfit(m_branchtype, newdata=df_type)) %>%
pivot_longer(!time:n.censor,
names_to = c(".value", "set"),
names_pattern = "(.+).(.+)$") %>%
left_join(df_type)
(C3c <- survival_CI(CI_type) +
scale_color_scico_d(palette = sci_pal2, begin = 0, end = 1) +
scale_fill_scico_d(palette = sci_pal2, begin = 0, end = 1))
# Load DAG .png files
A4 <- ggdraw() + draw_image(file.path("figures", "figure_1","PLPPR3.png"), scale = 0.9)
B4 <- ggdraw() + draw_image(file.path("figures", "figure_1","Location.png"), scale = 0.9)
C4 <- ggdraw() + draw_image(file.path("figures", "figure_1","Type.png"), scale = 0.9)
## Merge it in one figure ----
A <- plot_grid(A2, A3c, A4, scale = 0.9, labels = c("A", "B", "C"),
rel_widths = c(2, 2, 1), nrow = 1, align = "h", axis = "bt")
B <- plot_grid(B2, B3c, B4, scale = 0.9, labels = c("D", "E", "F"),
rel_widths = c(2, 2, 1), nrow = 1, align = "h", axis = "bt")
# now Figure 2
C <- plot_grid(C2, C3c, C4, scale = 0.9, labels = c("G", "H", "I"),
rel_widths = c(2, 2, 1), nrow = 1, align = "h", axis = "bt")
Fig_1 <- plot_grid(A, B, C, nrow = 3, rel_heights = c(1, 1, 1.2))
ggsave(file.path("figures", "figure_1", "Fig_1_raw.png"), Fig_1, device = "png", scale = 1, width = 210, height = 210, units = "mm" )
ggsave(file.path("figures", "figure_1", "Fig_1.pdf"), Fig_1, device = "pdf", scale = 1, width = 210, height = 210, units = "mm" )
|
## Assignment: Caching the Inverse of a Matrix
## Reference mentioned in the course forum
## https://github.com/lgreski/datasciencectacontent/blob/master/markdown/rprog-breakingDownMakeVector.md
## The function makeCacheMatrix is based on the function "makeVector" in the assignment instruction
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function cacheSolve is adapted from the function "cachemean" in the assignment instruction
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
bearsly/ProgrammingAssignment2
|
R
| false
| false
| 1,171
|
r
|
## Assignment: Caching the Inverse of a Matrix
## Reference mentioned in the course forum
## https://github.com/lgreski/datasciencectacontent/blob/master/markdown/rprog-breakingDownMakeVector.md
## The function makeCacheMatrix is based on the function "makeVector" in the assignment instruction
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function cacheSolve is adapted from the function "cachemean" in the assignment instruction
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
interestResultIntEx<- function(intObj, exObj, intExCol=c(),
mean.na.rm=TRUE, postExName="ex_junc" ){
if(length(intExCol)==1){
intObj<- intObj[which(rowData(intObj)[,intExCol]=="intron"), ]
exObj<- exObj[which(rowData(exObj)[,intExCol]=="exon"), ]
}
if(length(intExCol)==2){
intObj<- intObj[which(rowData(intObj)[,intExCol[1]]=="intron"), ]
exObj<- exObj[which(rowData(exObj)[,intExCol[2]]=="exon"), ]
}
exEndGr<- GenomicRanges::GRanges(rowData(exObj)$chr,
IRanges::IRanges(rowData(exObj)$end+1,
rowData(exObj)$end+1))
exBegGr<- GenomicRanges::GRanges(rowData(exObj)$chr,
IRanges::IRanges(rowData(exObj)$begin-1,
rowData(exObj)$begin-1))
intGr<- GenomicRanges::GRanges(rowData(intObj)$chr,
IRanges::IRanges(rowData(intObj)$begin,
rowData(intObj)$end))
intExEnd<-GenomicRanges::findOverlaps(intGr, exEndGr, type="start")
intExBeg<-GenomicRanges::findOverlaps(intGr, exBegGr, type="end")
exCnt<- counts(exObj)
meanExVals<- lapply(1:ncol(exCnt), function(j){
endVals<- tapply(subjectHits(intExEnd), queryHits(intExEnd),
function(tmp) mean(exCnt[unique(tmp),j], na.rm=mean.na.rm))
begVals<- tapply(subjectHits(intExBeg), queryHits(intExBeg),
function(tmp) mean(exCnt[unique(tmp),j], na.rm=mean.na.rm))
return(trunc(apply(cbind(endVals, begVals), 1, mean,
na.rm=mean.na.rm)))
})
exCnt<- as.data.frame(meanExVals)
if(all(colnames(counts(intObj)) %in% colnames(counts(exObj))))
colnames(exCnt)<- paste(colnames(counts(intObj)),postExName,sep="_")
exSr<- scaledRetention(exObj)
meanExSrVals<- lapply(1:ncol(exSr), function(j){
endVals<- tapply(subjectHits(intExEnd), queryHits(intExEnd),
function(tmp) mean(exSr[unique(tmp),j], na.rm=mean.na.rm))
begVals<- tapply(subjectHits(intExBeg), queryHits(intExBeg),
function(tmp) mean(exSr[unique(tmp),j], na.rm=mean.na.rm))
return(apply(cbind(endVals, begVals), 1, mean, na.rm=mean.na.rm))
})
exSr<- as.data.frame(meanExSrVals)
if(all(colnames(counts(intObj)) %in% colnames(counts(exObj))))
colnames(exSr)<- paste(colnames(counts(intObj)),postExName,sep="_")
annoMat<- rbind(colData(intObj), colData(intObj))
annoMat<- cbind(as.data.frame(annoMat),
c(rep("intron", nrow(colData(intObj))),
rep("exon", nrow(colData(intObj)))))
rownames(annoMat)<- c(colnames(scaledRetention(intObj)), colnames(exCnt))
colnames(annoMat)[ncol(annoMat)]<- "intronExon"
resObj<- InterestResult(
resultFiles=annoMat[,1],
counts=cbind(counts(intObj), exCnt),
scaledRetention=cbind(scaledRetention(intObj), exSr),
scaleLength=intObj@metadata$scaleFragmentscaleLength,
scaleFragment=intObj@metadata$scaleFragmentscaleFragment,
sampleAnnotation=annoMat[,-1],
rowData=rowData(intObj))
return(resObj)
}
|
/R/interestResultIntEx.R
|
no_license
|
gacatag/IntEREst
|
R
| false
| false
| 2,728
|
r
|
interestResultIntEx<- function(intObj, exObj, intExCol=c(),
mean.na.rm=TRUE, postExName="ex_junc" ){
if(length(intExCol)==1){
intObj<- intObj[which(rowData(intObj)[,intExCol]=="intron"), ]
exObj<- exObj[which(rowData(exObj)[,intExCol]=="exon"), ]
}
if(length(intExCol)==2){
intObj<- intObj[which(rowData(intObj)[,intExCol[1]]=="intron"), ]
exObj<- exObj[which(rowData(exObj)[,intExCol[2]]=="exon"), ]
}
exEndGr<- GenomicRanges::GRanges(rowData(exObj)$chr,
IRanges::IRanges(rowData(exObj)$end+1,
rowData(exObj)$end+1))
exBegGr<- GenomicRanges::GRanges(rowData(exObj)$chr,
IRanges::IRanges(rowData(exObj)$begin-1,
rowData(exObj)$begin-1))
intGr<- GenomicRanges::GRanges(rowData(intObj)$chr,
IRanges::IRanges(rowData(intObj)$begin,
rowData(intObj)$end))
intExEnd<-GenomicRanges::findOverlaps(intGr, exEndGr, type="start")
intExBeg<-GenomicRanges::findOverlaps(intGr, exBegGr, type="end")
exCnt<- counts(exObj)
meanExVals<- lapply(1:ncol(exCnt), function(j){
endVals<- tapply(subjectHits(intExEnd), queryHits(intExEnd),
function(tmp) mean(exCnt[unique(tmp),j], na.rm=mean.na.rm))
begVals<- tapply(subjectHits(intExBeg), queryHits(intExBeg),
function(tmp) mean(exCnt[unique(tmp),j], na.rm=mean.na.rm))
return(trunc(apply(cbind(endVals, begVals), 1, mean,
na.rm=mean.na.rm)))
})
exCnt<- as.data.frame(meanExVals)
if(all(colnames(counts(intObj)) %in% colnames(counts(exObj))))
colnames(exCnt)<- paste(colnames(counts(intObj)),postExName,sep="_")
exSr<- scaledRetention(exObj)
meanExSrVals<- lapply(1:ncol(exSr), function(j){
endVals<- tapply(subjectHits(intExEnd), queryHits(intExEnd),
function(tmp) mean(exSr[unique(tmp),j], na.rm=mean.na.rm))
begVals<- tapply(subjectHits(intExBeg), queryHits(intExBeg),
function(tmp) mean(exSr[unique(tmp),j], na.rm=mean.na.rm))
return(apply(cbind(endVals, begVals), 1, mean, na.rm=mean.na.rm))
})
exSr<- as.data.frame(meanExSrVals)
if(all(colnames(counts(intObj)) %in% colnames(counts(exObj))))
colnames(exSr)<- paste(colnames(counts(intObj)),postExName,sep="_")
annoMat<- rbind(colData(intObj), colData(intObj))
annoMat<- cbind(as.data.frame(annoMat),
c(rep("intron", nrow(colData(intObj))),
rep("exon", nrow(colData(intObj)))))
rownames(annoMat)<- c(colnames(scaledRetention(intObj)), colnames(exCnt))
colnames(annoMat)[ncol(annoMat)]<- "intronExon"
resObj<- InterestResult(
resultFiles=annoMat[,1],
counts=cbind(counts(intObj), exCnt),
scaledRetention=cbind(scaledRetention(intObj), exSr),
scaleLength=intObj@metadata$scaleFragmentscaleLength,
scaleFragment=intObj@metadata$scaleFragmentscaleFragment,
sampleAnnotation=annoMat[,-1],
rowData=rowData(intObj))
return(resObj)
}
|
# functions used for the sisesat class but not exported (available to the user)
.mapa.peru <- function(xlim=c(-84,-70), ylim=c(-21, -3), labelsxy = TRUE, axis4L = FALSE, perfil = FALSE,
land.col="khaki1", sea.col="white", cex.Port = 0.65, add = FALSE, ...){
require(kali)
axis.Lon <- paste(abs(seq(xlim[1],xlim[2],by = 2)),"°W")
axis.Lat <- paste(abs(seq(ylim[1],ylim[2],by = 2)),"°S")
Encoding(axis.Lon) <- "UTF-8"
Encoding(axis.Lat) <- "UTF-8"
plot.map(axes = F,col="red", cex=1, xlim=xlim, hires = TRUE, ylab = NULL, xlab = NULL, xaxs = "i", yaxs = "i",
ylim=ylim, land.col=land.col, sea.col=sea.col,
boundaries.col = NA, grid.col = "blue",main="",
grid = FALSE, add = add)
lines(linePeru$lon, linePeru$lat, col="gray40")
if(isTRUE(labelsxy)){
mtext("Longitud", side=1, line=1.5, cex=0.8)
mtext("Latitud", side=2, line=1.8, cex=0.8)
}
principalP = harborPeru[c(2,4,5,7,8,10,12,14,16,17,19),]
text(principalP$lon, principalP$lat, labels = principalP$puertos, cex=cex.Port, pos=4, font=1)
# axis
axis(2,seq(ylim[1],ylim[2],by = 2), axis.Lat, las=1, cex.axis=0.6, hadj=0.5, tck=-0.010)
if(!isTRUE(perfil)){
axis(1,seq(xlim[1],xlim[2],by = 2), tck=-0.01, labels = NA, hadj=0.5)
axis(1,seq(xlim[1],xlim[2],by = 2), labels = axis.Lon, cex.axis=0.6, line = -0.8, lwd = 0)
}
if(axis4L == TRUE){
axis(3,seq(xlim[1],xlim[2],by = 2), tck=-0.01, labels = NA, hadj=0.5)
axis(3,seq(xlim[1],xlim[2],by = 2),labels = axis.Lon, cex.axis=0.6, line = -0.5, lwd = 0)
axis(4,seq(ylim[1],ylim[2],by = 2), axis.Lat, las=1, cex.axis=0.6, hadj=0.5, tck=-0.010)
}
#return(invisible)
}
.mapa.peru.simple <- function(xlim=c(-84,-70), ylim=c(-21, -3), labelsxy = TRUE, axis4L = FALSE, perfil = FALSE,
col="khaki1", border = "khaki1", sea.col="white", cex.Port = 0.65, add = FALSE){
require(maps)
require(mapdata)
axis.Lon <- paste(abs(seq(xlim[1],xlim[2],by = 2)),"°W")
axis.Lat <- paste(abs(seq(ylim[1],ylim[2],by = 2)),"°S")
Encoding(axis.Lon) <- "UTF-8"
Encoding(axis.Lat) <- "UTF-8"
map("worldHires",fill=T, myborder = FALSE, col = col, border = border,
xlim = xlim, ylim = ylim, add = add)
lines(linePeru$lon, linePeru$lat, col="gray40")
if(isTRUE(labelsxy)){
mtext("Longitud", side=1, line=1.5, cex=0.8)
mtext("Latitud", side=2, line=1.8, cex=0.8)
}
principalP = harborPeru[c(2,4,5,7,8,10,12,14,16,17,19),]
text(principalP$lon, principalP$lat, labels = principalP$puertos, cex=cex.Port, pos=4, font=1)
# axis
axis(2,seq(ylim[1],ylim[2],by = 2), axis.Lat, las=1, cex.axis=0.6, hadj=0.5, tck=-0.010)
if(!isTRUE(perfil)){
axis(1,seq(xlim[1],xlim[2],by = 2), tck=-0.01, labels = NA, hadj=0.5)
axis(1,seq(xlim[1],xlim[2],by = 2), labels = axis.Lon, cex.axis=0.6, line = -0.8, lwd = 0)
}
if(axis4L == TRUE){
axis(3,seq(xlim[1],xlim[2],by = 2), tck=-0.01, labels = NA, hadj=0.5)
axis(3,seq(xlim[1],xlim[2],by = 2),labels = axis.Lon, cex.axis=0.6, line = -0.5, lwd = 0)
axis(4,seq(ylim[1],ylim[2],by = 2), axis.Lat, las=1, cex.axis=0.6, hadj=0.5, tck=-0.010)
}
box()
#return(invisible)
}
.
|
/R/sisesat-internal.R
|
no_license
|
imarpe/imarpe
|
R
| false
| false
| 3,236
|
r
|
# functions used for the sisesat class but not exported (available to the user)
.mapa.peru <- function(xlim=c(-84,-70), ylim=c(-21, -3), labelsxy = TRUE, axis4L = FALSE, perfil = FALSE,
land.col="khaki1", sea.col="white", cex.Port = 0.65, add = FALSE, ...){
require(kali)
axis.Lon <- paste(abs(seq(xlim[1],xlim[2],by = 2)),"°W")
axis.Lat <- paste(abs(seq(ylim[1],ylim[2],by = 2)),"°S")
Encoding(axis.Lon) <- "UTF-8"
Encoding(axis.Lat) <- "UTF-8"
plot.map(axes = F,col="red", cex=1, xlim=xlim, hires = TRUE, ylab = NULL, xlab = NULL, xaxs = "i", yaxs = "i",
ylim=ylim, land.col=land.col, sea.col=sea.col,
boundaries.col = NA, grid.col = "blue",main="",
grid = FALSE, add = add)
lines(linePeru$lon, linePeru$lat, col="gray40")
if(isTRUE(labelsxy)){
mtext("Longitud", side=1, line=1.5, cex=0.8)
mtext("Latitud", side=2, line=1.8, cex=0.8)
}
principalP = harborPeru[c(2,4,5,7,8,10,12,14,16,17,19),]
text(principalP$lon, principalP$lat, labels = principalP$puertos, cex=cex.Port, pos=4, font=1)
# axis
axis(2,seq(ylim[1],ylim[2],by = 2), axis.Lat, las=1, cex.axis=0.6, hadj=0.5, tck=-0.010)
if(!isTRUE(perfil)){
axis(1,seq(xlim[1],xlim[2],by = 2), tck=-0.01, labels = NA, hadj=0.5)
axis(1,seq(xlim[1],xlim[2],by = 2), labels = axis.Lon, cex.axis=0.6, line = -0.8, lwd = 0)
}
if(axis4L == TRUE){
axis(3,seq(xlim[1],xlim[2],by = 2), tck=-0.01, labels = NA, hadj=0.5)
axis(3,seq(xlim[1],xlim[2],by = 2),labels = axis.Lon, cex.axis=0.6, line = -0.5, lwd = 0)
axis(4,seq(ylim[1],ylim[2],by = 2), axis.Lat, las=1, cex.axis=0.6, hadj=0.5, tck=-0.010)
}
#return(invisible)
}
.mapa.peru.simple <- function(xlim=c(-84,-70), ylim=c(-21, -3), labelsxy = TRUE, axis4L = FALSE, perfil = FALSE,
col="khaki1", border = "khaki1", sea.col="white", cex.Port = 0.65, add = FALSE){
require(maps)
require(mapdata)
axis.Lon <- paste(abs(seq(xlim[1],xlim[2],by = 2)),"°W")
axis.Lat <- paste(abs(seq(ylim[1],ylim[2],by = 2)),"°S")
Encoding(axis.Lon) <- "UTF-8"
Encoding(axis.Lat) <- "UTF-8"
map("worldHires",fill=T, myborder = FALSE, col = col, border = border,
xlim = xlim, ylim = ylim, add = add)
lines(linePeru$lon, linePeru$lat, col="gray40")
if(isTRUE(labelsxy)){
mtext("Longitud", side=1, line=1.5, cex=0.8)
mtext("Latitud", side=2, line=1.8, cex=0.8)
}
principalP = harborPeru[c(2,4,5,7,8,10,12,14,16,17,19),]
text(principalP$lon, principalP$lat, labels = principalP$puertos, cex=cex.Port, pos=4, font=1)
# axis
axis(2,seq(ylim[1],ylim[2],by = 2), axis.Lat, las=1, cex.axis=0.6, hadj=0.5, tck=-0.010)
if(!isTRUE(perfil)){
axis(1,seq(xlim[1],xlim[2],by = 2), tck=-0.01, labels = NA, hadj=0.5)
axis(1,seq(xlim[1],xlim[2],by = 2), labels = axis.Lon, cex.axis=0.6, line = -0.8, lwd = 0)
}
if(axis4L == TRUE){
axis(3,seq(xlim[1],xlim[2],by = 2), tck=-0.01, labels = NA, hadj=0.5)
axis(3,seq(xlim[1],xlim[2],by = 2),labels = axis.Lon, cex.axis=0.6, line = -0.5, lwd = 0)
axis(4,seq(ylim[1],ylim[2],by = 2), axis.Lat, las=1, cex.axis=0.6, hadj=0.5, tck=-0.010)
}
box()
#return(invisible)
}
.
|
#Prepearing the datasets
BPRS <- read.csv("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/BPRS.txt", sep =" ", header = T)
names(BPRS)
str(BPRS)
summary(BPRS)
dim(BPRS)
RATS <- read.table("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/rats.txt", header = TRUE, sep = '\t')
names(RATS)
str(RATS)
summary(RATS)
dim(RATS)
#The data BPRS is about breaf psychiatric rating scale done for 40 males randomly signed to two treatment groups. Participants symptoms like hallucinations and hostility have been measured every week during eight weeks aiming to found symptoms of schitzophrenia.
#The data RATS is about three groups of rats with different nutrition for 9 weeks and measured aproximately once a week. After compairing the growth curves of the rats with different diets.
library(dplyr)
library(tidyr)
#Factoring the categorical variables
BPRS$treatment <- factor(BPRS$treatment)
BPRS$subject <- factor(BPRS$subject)
RATS$ID <- factor(RATS$ID)
RATS$Group <- factor(RATS$Group)
BPRSL <- BPRS %>% gather(key = weeks, value = bprs, -treatment, -subject)
BPRSL <- BPRSL %>% mutate(week = as.integer(substr(weeks,5,5)))
RATSL <- RATS %>%
gather(key = "WD", value = "Weight", -ID, -Group) %>%
mutate(Time = as.integer(substr(WD,3,4)))
# Glimpse the data
glimpse(RATSL)
glimpse(BPRSL)
str(BPRSL)
names(BPRSL)
summary(BPRSL)
str(RATSL)
names(RATSL)
summary(RATSL)
# In the wide form of the data, the 40 men and the 16 rats are the observations and the timepoins are the variables but in the long form of data each timepoint of every rat and every man are the observations.
|
/meet_and_repeat.R
|
no_license
|
madmintt/IODS-project
|
R
| false
| false
| 1,640
|
r
|
#Prepearing the datasets
BPRS <- read.csv("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/BPRS.txt", sep =" ", header = T)
names(BPRS)
str(BPRS)
summary(BPRS)
dim(BPRS)
RATS <- read.table("https://raw.githubusercontent.com/KimmoVehkalahti/MABS/master/Examples/data/rats.txt", header = TRUE, sep = '\t')
names(RATS)
str(RATS)
summary(RATS)
dim(RATS)
#The data BPRS is about breaf psychiatric rating scale done for 40 males randomly signed to two treatment groups. Participants symptoms like hallucinations and hostility have been measured every week during eight weeks aiming to found symptoms of schitzophrenia.
#The data RATS is about three groups of rats with different nutrition for 9 weeks and measured aproximately once a week. After compairing the growth curves of the rats with different diets.
library(dplyr)
library(tidyr)
#Factoring the categorical variables
BPRS$treatment <- factor(BPRS$treatment)
BPRS$subject <- factor(BPRS$subject)
RATS$ID <- factor(RATS$ID)
RATS$Group <- factor(RATS$Group)
BPRSL <- BPRS %>% gather(key = weeks, value = bprs, -treatment, -subject)
BPRSL <- BPRSL %>% mutate(week = as.integer(substr(weeks,5,5)))
RATSL <- RATS %>%
gather(key = "WD", value = "Weight", -ID, -Group) %>%
mutate(Time = as.integer(substr(WD,3,4)))
# Glimpse the data
glimpse(RATSL)
glimpse(BPRSL)
str(BPRSL)
names(BPRSL)
summary(BPRSL)
str(RATSL)
names(RATSL)
summary(RATSL)
# In the wide form of the data, the 40 men and the 16 rats are the observations and the timepoins are the variables but in the long form of data each timepoint of every rat and every man are the observations.
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/assertions.r
\name{has_attr}
\alias{\%has_attr\%}
\alias{\%has_name\%}
\alias{has_attr}
\alias{has_name}
\title{Has attribute or name?}
\usage{
has_attr(x, which)
x \%has_attr\% which
has_name(x, which)
x \%has_name\% which
}
\arguments{
\item{x}{object to test}
\item{which}{name or attribute}
}
\description{
Has attribute or name?
}
\examples{
has_attr(has_attr, "fail")
x <- 10
x \%has_attr\% "a"
y <- list(a = 1, b = 2)
see_if(y \%has_name\% "c")
}
|
/man/has_attr.Rd
|
no_license
|
elsander/assertthat
|
R
| false
| false
| 546
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/assertions.r
\name{has_attr}
\alias{\%has_attr\%}
\alias{\%has_name\%}
\alias{has_attr}
\alias{has_name}
\title{Has attribute or name?}
\usage{
has_attr(x, which)
x \%has_attr\% which
has_name(x, which)
x \%has_name\% which
}
\arguments{
\item{x}{object to test}
\item{which}{name or attribute}
}
\description{
Has attribute or name?
}
\examples{
has_attr(has_attr, "fail")
x <- 10
x \%has_attr\% "a"
y <- list(a = 1, b = 2)
see_if(y \%has_name\% "c")
}
|
# From https://gis.stackexchange.com/questions/140504/extracting-intersection-areas-in-r
library(sp)
library(raster)
library(rgdal)
library(rgeos)
library(maptools)
# Example data from raster package
p1 <- shapefile(system.file("external/lux.shp", package="raster"))
# Remove attribute data
p1 <- as(p1, 'SpatialPolygons')
# Add in some fake soil type data
soil <- SpatialPolygonsDataFrame(p1, data.frame(soil=LETTERS[1:12]), match.ID=F)
# Field polygons
p2 <- union(as(extent(6, 6.4, 49.75, 50), 'SpatialPolygons'),
as(extent(5.8, 6.2, 49.5, 49.7), 'SpatialPolygons'))
field <- SpatialPolygonsDataFrame(p2, data.frame(field=c('x','y')), match.ID=F)
projection(field) <- projection(soil)
# intersect from raster package
pi <- intersect(soil, field)
plot(soil, axes=T); plot(field, add=T); plot(pi, add=T, col='red')
# Extract areas from polygon objects then attach as attribute
pi$area <- area(pi) / 1000000
# For each field, get area per soil type
aggregate(area~field + soil, data=pi, FUN=sum)
|
/polygon_overlay_tests.R
|
no_license
|
jejoenje/sp_poly_overlap
|
R
| false
| false
| 1,013
|
r
|
# From https://gis.stackexchange.com/questions/140504/extracting-intersection-areas-in-r
library(sp)
library(raster)
library(rgdal)
library(rgeos)
library(maptools)
# Example data from raster package
p1 <- shapefile(system.file("external/lux.shp", package="raster"))
# Remove attribute data
p1 <- as(p1, 'SpatialPolygons')
# Add in some fake soil type data
soil <- SpatialPolygonsDataFrame(p1, data.frame(soil=LETTERS[1:12]), match.ID=F)
# Field polygons
p2 <- union(as(extent(6, 6.4, 49.75, 50), 'SpatialPolygons'),
as(extent(5.8, 6.2, 49.5, 49.7), 'SpatialPolygons'))
field <- SpatialPolygonsDataFrame(p2, data.frame(field=c('x','y')), match.ID=F)
projection(field) <- projection(soil)
# intersect from raster package
pi <- intersect(soil, field)
plot(soil, axes=T); plot(field, add=T); plot(pi, add=T, col='red')
# Extract areas from polygon objects then attach as attribute
pi$area <- area(pi) / 1000000
# For each field, get area per soil type
aggregate(area~field + soil, data=pi, FUN=sum)
|
# date:2020-10-09
# author:suke
# task:ggplot基础学习,图形基础
#####
## 基础入门
# ggplot的包安装
install.packages(c("ggplot2","gcookbook"))
install.packages("xlsx")
install.packages("gdata") # xls文件需要的包
install.packages("foreign") # spss数据导入需要的包
# 包导入
library(ggplot2)
library(gcookbook)
library(xslx)
library(foreign)
ls("package:foreign") # 查看包内所有函数,foreign 包含了read。spss/octave/systat/xport/dta
# 数据加载
data = read.csv("datafile.csv",header = FALSE,seq ="\t",stringsAsFactors = FALSE)
data = read.xlsx("datafile.xlsx",sheetIndex = 2/sheetName = "Revenues")
data = read.xls("datafile.xls",sheet = 2)
?read.csv() # 查看函数帮助
# 重命名数据列名
name(data) = c("C1","C2","C3")
#####
## 散点图
# 绘图plot(x,y),都是先放自变量,后放应变量
#以下四种方法都是散点图的绘制方法,第一种是原生的,后三种是ggplot包的绘图方式
plot(mtcars$wt,mtcars$mpg) # 原生画图
qplot(mtcars$wt,mtcars$mpg) # ggplot画图
qplot(wt,mpg,data=mtcars)
ggplot(mtcars,aes(x=wt,y=mpg))+geom_point()
## 折线图绘制
# way1原生绘图
plot(pressure$temperature,pressure$pressure,type = "l")
points(pressure$temperature,pressure$pressure)
lines(pressure$temperature,pressure$pressure/2,col = "red")
points(pressure$temperature,pressure$pressure/2,col = "red")
# 用points/lines画图,会把图画在原始图像上方,而不会重新打开新的图,但是只能画在plot上,无法画在ggplot的图上。
# way2 ggplot
qplot(pressure$temperature,pressure$pressure , geom = "line")
qplot(temperature,pressure,data = pressure , geom = "line") # 等价上句
ggplot(pressure,aes(x=temperature,y=pressure))+geom_line() # 等价上句
qplot(pressure$temperature,pressure$pressure , geom = c("line","point"))
ggplot(pressure,aes(x=temperature,y=pressure))+geom_line()+geom_point()
## 条形图
# way1 原生
# 第一个变量用量设定柱状图高度,第二个用来定义名称
barplot(BOD$demand,names.arg = BOD$Time)
table(mtcars$cyl) # 统计频数
barplot(table(mtcars$cyl))
# way2 ggplot
qplot(BOD$Time,BOD$demand,geom = "bar",stat = "identity")
ggplot(BOD,aes(x=Time,y=demand))+geom_bar(stat = "identity") # 连续型变量
ggplot(BOD,aes(x=factor(Time),y=demand))+geom_bar(stat = "identity") #离散型变量
qplot(mtcars$cyl) # 自动转化为频数统计
qplot(factor(mtcars$cyl))
## 直方图
# way1 原生
hist(mtcars$mpg)
# way2 ggplot
qplot(mpg,data=mtcars,binwidth=5) # binwidth间隔为5
ggplot(mtcars,aes(x=mpg))+geom_histogram(binwidth = 5) # 等价上句
## 箱线图
# way1 原生
plot(ToothGrowth$supp,ToothGrowth$len) # x为因子变量,y为数值变量时,自动画箱线图
boxplot(len~supp,data = ToothGrowth) # 等价上句
boxplot(len~supp+dose,data = ToothGrowth)
# way2 ggplot
qplot(ToothGrowth$supp,ToothGrowth$len,geom = "boxplot")
qplot(supp,len,data = ToothGrowth,geom = "boxplot")
ggplot(ToothGrowth,aes(x=supp,y=len))+geom_boxplot()
qplot(interaction(ToothGrowth$supp,ToothGrowth$dose),ToothGrowth$len,geom = "boxplot")
qplot(interaction(supp,dose),len,data = ToothGrowth,geom = "boxplot")
ggplot(ToothGrowth,aes(x=interaction(supp,dose),y=len))+geom_boxplot()
##曲线图
# way1 原生
curve(x^3-5*x,from=-4,to=4)
myfun=function(xvar){ 1/(1+exp(-xvar+10))}
curve(myfun(x),from=0,to=20)
curve(1-myfun(x),add=TRUE,col = "red")
# way2 ggplot
ggplot(data.frame(x=c(0,20)),aes(x=x))+stat_function(fun=myfun,geom="line")
|
/201009-03.r
|
no_license
|
sukezuo/R_learning
|
R
| false
| false
| 3,544
|
r
|
# date:2020-10-09
# author:suke
# task:ggplot基础学习,图形基础
#####
## 基础入门
# ggplot的包安装
install.packages(c("ggplot2","gcookbook"))
install.packages("xlsx")
install.packages("gdata") # xls文件需要的包
install.packages("foreign") # spss数据导入需要的包
# 包导入
library(ggplot2)
library(gcookbook)
library(xslx)
library(foreign)
ls("package:foreign") # 查看包内所有函数,foreign 包含了read。spss/octave/systat/xport/dta
# 数据加载
data = read.csv("datafile.csv",header = FALSE,seq ="\t",stringsAsFactors = FALSE)
data = read.xlsx("datafile.xlsx",sheetIndex = 2/sheetName = "Revenues")
data = read.xls("datafile.xls",sheet = 2)
?read.csv() # 查看函数帮助
# 重命名数据列名
name(data) = c("C1","C2","C3")
#####
## 散点图
# 绘图plot(x,y),都是先放自变量,后放应变量
#以下四种方法都是散点图的绘制方法,第一种是原生的,后三种是ggplot包的绘图方式
plot(mtcars$wt,mtcars$mpg) # 原生画图
qplot(mtcars$wt,mtcars$mpg) # ggplot画图
qplot(wt,mpg,data=mtcars)
ggplot(mtcars,aes(x=wt,y=mpg))+geom_point()
## 折线图绘制
# way1原生绘图
plot(pressure$temperature,pressure$pressure,type = "l")
points(pressure$temperature,pressure$pressure)
lines(pressure$temperature,pressure$pressure/2,col = "red")
points(pressure$temperature,pressure$pressure/2,col = "red")
# 用points/lines画图,会把图画在原始图像上方,而不会重新打开新的图,但是只能画在plot上,无法画在ggplot的图上。
# way2 ggplot
qplot(pressure$temperature,pressure$pressure , geom = "line")
qplot(temperature,pressure,data = pressure , geom = "line") # 等价上句
ggplot(pressure,aes(x=temperature,y=pressure))+geom_line() # 等价上句
qplot(pressure$temperature,pressure$pressure , geom = c("line","point"))
ggplot(pressure,aes(x=temperature,y=pressure))+geom_line()+geom_point()
## 条形图
# way1 原生
# 第一个变量用量设定柱状图高度,第二个用来定义名称
barplot(BOD$demand,names.arg = BOD$Time)
table(mtcars$cyl) # 统计频数
barplot(table(mtcars$cyl))
# way2 ggplot
qplot(BOD$Time,BOD$demand,geom = "bar",stat = "identity")
ggplot(BOD,aes(x=Time,y=demand))+geom_bar(stat = "identity") # 连续型变量
ggplot(BOD,aes(x=factor(Time),y=demand))+geom_bar(stat = "identity") #离散型变量
qplot(mtcars$cyl) # 自动转化为频数统计
qplot(factor(mtcars$cyl))
## 直方图
# way1 原生
hist(mtcars$mpg)
# way2 ggplot
qplot(mpg,data=mtcars,binwidth=5) # binwidth间隔为5
ggplot(mtcars,aes(x=mpg))+geom_histogram(binwidth = 5) # 等价上句
## 箱线图
# way1 原生
plot(ToothGrowth$supp,ToothGrowth$len) # x为因子变量,y为数值变量时,自动画箱线图
boxplot(len~supp,data = ToothGrowth) # 等价上句
boxplot(len~supp+dose,data = ToothGrowth)
# way2 ggplot
qplot(ToothGrowth$supp,ToothGrowth$len,geom = "boxplot")
qplot(supp,len,data = ToothGrowth,geom = "boxplot")
ggplot(ToothGrowth,aes(x=supp,y=len))+geom_boxplot()
qplot(interaction(ToothGrowth$supp,ToothGrowth$dose),ToothGrowth$len,geom = "boxplot")
qplot(interaction(supp,dose),len,data = ToothGrowth,geom = "boxplot")
ggplot(ToothGrowth,aes(x=interaction(supp,dose),y=len))+geom_boxplot()
##曲线图
# way1 原生
curve(x^3-5*x,from=-4,to=4)
myfun=function(xvar){ 1/(1+exp(-xvar+10))}
curve(myfun(x),from=0,to=20)
curve(1-myfun(x),add=TRUE,col = "red")
# way2 ggplot
ggplot(data.frame(x=c(0,20)),aes(x=x))+stat_function(fun=myfun,geom="line")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.servicecatalog_operations.R
\name{delete_product}
\alias{delete_product}
\title{Deletes the specified product}
\usage{
delete_product(AcceptLanguage = NULL, Id)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{Id}{[required] The product identifier.}
}
\description{
Deletes the specified product.
}
\details{
You cannot delete a product if it was shared with you or is associated with a portfolio.
}
\section{Accepted Parameters}{
\preformatted{delete_product(
AcceptLanguage = "string",
Id = "string"
)
}
}
|
/service/paws.servicecatalog/man/delete_product.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 711
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.servicecatalog_operations.R
\name{delete_product}
\alias{delete_product}
\title{Deletes the specified product}
\usage{
delete_product(AcceptLanguage = NULL, Id)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{Id}{[required] The product identifier.}
}
\description{
Deletes the specified product.
}
\details{
You cannot delete a product if it was shared with you or is associated with a portfolio.
}
\section{Accepted Parameters}{
\preformatted{delete_product(
AcceptLanguage = "string",
Id = "string"
)
}
}
|
# j = "Phylum"
Plot.CompareWithCK <- function(
ps = psdata,
result = result,
CK = "OE",
j = "Genus",
abun = 0.001){
map = sample_data(ps)
TF = c()
for (i in 1: nrow(result)) {
a <- result %>%
select(ends_with("level")) %>%
filter(row_number() == i) %>%
as.matrix() %>%
as.vector() %>%
unique()
a
if (length(a) ==1) {
if (a == "nosig") {
TF[i] = 0
}else {TF[i] = 1}
} else {
TF[i] = 1
}
}
result$TF = TF
result$mean <- result %>%
# filter(TF == 1) %>%
select(one_of(unique(map$Group))) %>%
rowMeans() %>%
as.vector()
Sresult <- result %>%
filter(TF == 1) %>%
filter(mean > abun)
Sresult$ID = row.names(Sresult)
rank_names(ps)
b <- colnames(
result %>%
select(ends_with("level"))
)
longda <- reshape2::melt(Sresult,
id.vars = c("ID",b),#需要保留不参与聚合的变量,
measure.vars = c(as.character(unique(map$Group))),#用于聚合的变量,
variable.name='treat',
value.name='abundance') %>%
filter(treat != CK)
level = c()
for (i in 1:nrow(longda)) {
level[i] <- longda[i,] %>%
select(contains(as.character(longda$treat[i]))) %>%
as.matrix() %>%
as.vector()
}
longda$level = level
ck <- reshape2::melt(Sresult,
id.vars = c("ID",b),#需要保留不参与聚合的变量,
measure.vars = c(as.character(unique(map$Group))),#用于聚合的变量,
variable.name='treat',
value.name='abundance') %>%
filter(treat == CK) %>%
select("ID","abundance")
colnames(ck)[2] = paste("CK",colnames(ck)[2],sep = "_")
plotda <- longda %>% left_join(ck) %>%
mutate(level2 = abundance - CK_abundance,.keep = "all") %>% arrange(ID)
plotda$level2 <- plotda$level2 > 0
plotda$abundance[plotda$level2 == F] = -plotda$abundance[plotda$level2 == F]
Taxonomies_x = plyr::ddply(plotda,"ID", summarize,
label_sd = cumsum(abundance),
label_y = cumsum(abundance) - 0.5*abundance)
plotdata <- cbind(plotda,Taxonomies_x[,-1])
head(plotdata)
plotdata$treat = factor(plotdata$treat,levels = as.character(unique(plotdata$treat)[4:1]))
c = c()
for (i in 1:nrow(plotdata)) {
if (plotdata$level[i] %in% c("enriched","depleted") ) {
c[i] = "*"
}
if (plotdata$level[i] == "nosig") {
c[i] = ""
}
}
plotdata$level3 = c
plotdata$ID = factor(plotdata$ID,levels = unique(plotdata$ID)[length( unique(plotdata$ID)):1])
p <- ggplot(plotdata) +
geom_bar(aes(y = ID,x = abundance,group = treat,fill = treat),stat = "identity",color = "black",size = 0.5) +
geom_vline(aes(xintercept=0), colour="black") +
geom_text(aes(y = ID,x = label_y,label = level3),color = "white") +
labs(title = "Control",y = "ASV of microbiome",
x = "Abundance") + theme_bw()# + scale_fill_manual(values = brewer.pal(9,"Set1"))
p
return(p)
}
tax_glom_wt <- function(ps = ps,ranks = "Phylum") {
otu <- as.data.frame(t(vegan_otu(ps)))
tax <- as.data.frame(vegan_tax(ps))
# building group
tax[[ranks]][is.na(tax[[ranks]])] = "Unknown"
tax[[ranks]][tax[[ranks]] == ""] = "Unknown"
split <- split(otu,tax[[ranks]])
#calculate sum by group
apply <- lapply(split,function(x)colSums(x[]))
# result chack
otucon <- do.call(rbind,apply)
taxcon <- tax[1:match(ranks,colnames(tax))]
taxcon <- taxcon[!duplicated(tax[[ranks]]),]
#-tax name with NA wound be repeated with unknown
taxcon[[ranks]][is.na(taxcon[[ranks]])] = "unknown"
row.names(taxcon) <- taxcon[[ranks]]
pscon <- phyloseq(
otu_table( as.matrix(otucon),taxa_are_rows = TRUE),
tax_table(as.matrix(taxcon)),
sample_data(ps)
)
return(pscon)
}
|
/Plot.CompareWithCK.R
|
no_license
|
wyy-yiyang/Plot_ComparedWithCK
|
R
| false
| false
| 4,157
|
r
|
# j = "Phylum"
Plot.CompareWithCK <- function(
ps = psdata,
result = result,
CK = "OE",
j = "Genus",
abun = 0.001){
map = sample_data(ps)
TF = c()
for (i in 1: nrow(result)) {
a <- result %>%
select(ends_with("level")) %>%
filter(row_number() == i) %>%
as.matrix() %>%
as.vector() %>%
unique()
a
if (length(a) ==1) {
if (a == "nosig") {
TF[i] = 0
}else {TF[i] = 1}
} else {
TF[i] = 1
}
}
result$TF = TF
result$mean <- result %>%
# filter(TF == 1) %>%
select(one_of(unique(map$Group))) %>%
rowMeans() %>%
as.vector()
Sresult <- result %>%
filter(TF == 1) %>%
filter(mean > abun)
Sresult$ID = row.names(Sresult)
rank_names(ps)
b <- colnames(
result %>%
select(ends_with("level"))
)
longda <- reshape2::melt(Sresult,
id.vars = c("ID",b),#需要保留不参与聚合的变量,
measure.vars = c(as.character(unique(map$Group))),#用于聚合的变量,
variable.name='treat',
value.name='abundance') %>%
filter(treat != CK)
level = c()
for (i in 1:nrow(longda)) {
level[i] <- longda[i,] %>%
select(contains(as.character(longda$treat[i]))) %>%
as.matrix() %>%
as.vector()
}
longda$level = level
ck <- reshape2::melt(Sresult,
id.vars = c("ID",b),#需要保留不参与聚合的变量,
measure.vars = c(as.character(unique(map$Group))),#用于聚合的变量,
variable.name='treat',
value.name='abundance') %>%
filter(treat == CK) %>%
select("ID","abundance")
colnames(ck)[2] = paste("CK",colnames(ck)[2],sep = "_")
plotda <- longda %>% left_join(ck) %>%
mutate(level2 = abundance - CK_abundance,.keep = "all") %>% arrange(ID)
plotda$level2 <- plotda$level2 > 0
plotda$abundance[plotda$level2 == F] = -plotda$abundance[plotda$level2 == F]
Taxonomies_x = plyr::ddply(plotda,"ID", summarize,
label_sd = cumsum(abundance),
label_y = cumsum(abundance) - 0.5*abundance)
plotdata <- cbind(plotda,Taxonomies_x[,-1])
head(plotdata)
plotdata$treat = factor(plotdata$treat,levels = as.character(unique(plotdata$treat)[4:1]))
c = c()
for (i in 1:nrow(plotdata)) {
if (plotdata$level[i] %in% c("enriched","depleted") ) {
c[i] = "*"
}
if (plotdata$level[i] == "nosig") {
c[i] = ""
}
}
plotdata$level3 = c
plotdata$ID = factor(plotdata$ID,levels = unique(plotdata$ID)[length( unique(plotdata$ID)):1])
p <- ggplot(plotdata) +
geom_bar(aes(y = ID,x = abundance,group = treat,fill = treat),stat = "identity",color = "black",size = 0.5) +
geom_vline(aes(xintercept=0), colour="black") +
geom_text(aes(y = ID,x = label_y,label = level3),color = "white") +
labs(title = "Control",y = "ASV of microbiome",
x = "Abundance") + theme_bw()# + scale_fill_manual(values = brewer.pal(9,"Set1"))
p
return(p)
}
tax_glom_wt <- function(ps = ps,ranks = "Phylum") {
otu <- as.data.frame(t(vegan_otu(ps)))
tax <- as.data.frame(vegan_tax(ps))
# building group
tax[[ranks]][is.na(tax[[ranks]])] = "Unknown"
tax[[ranks]][tax[[ranks]] == ""] = "Unknown"
split <- split(otu,tax[[ranks]])
#calculate sum by group
apply <- lapply(split,function(x)colSums(x[]))
# result chack
otucon <- do.call(rbind,apply)
taxcon <- tax[1:match(ranks,colnames(tax))]
taxcon <- taxcon[!duplicated(tax[[ranks]]),]
#-tax name with NA wound be repeated with unknown
taxcon[[ranks]][is.na(taxcon[[ranks]])] = "unknown"
row.names(taxcon) <- taxcon[[ranks]]
pscon <- phyloseq(
otu_table( as.matrix(otucon),taxa_are_rows = TRUE),
tax_table(as.matrix(taxcon)),
sample_data(ps)
)
return(pscon)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sym_regression.R
\name{sym.predict.symbolic_nnet_cm}
\alias{sym.predict.symbolic_nnet_cm}
\title{Predict nnet_cm model}
\usage{
\method{sym.predict}{symbolic_nnet_cm}(model, new.sym.data, ...)
}
\arguments{
\item{model}{model}
\item{new.sym.data}{new data}
\item{...}{optional parameters}
}
\description{
Predict nnet_cm model
}
\references{
Lima-Neto, E.A., De Carvalho, F.A.T., (2008). Centre and range method to fitting a linear regression model on symbolic interval data. Computational Statistics and Data Analysis52, 1500-1515
Lima-Neto, E.A., De Carvalho, F.A.T., (2010). Constrained linear regression models for symbolic interval-valued variables. Computational Statistics and Data Analysis 54, 333-347
Lima Neto, E.d.A., de Carvalho, F.d.A.T. Nonlinear regression applied to interval-valued data. Pattern Anal Applic 20, 809–824 (2017). https://doi.org/10.1007/s10044-016-0538-y
Rodriguez, O. (2018). Shrinkage linear regression for symbolic interval-valued variables.Journal MODULAD 2018, vol. Modulad 45, pp.19-38
}
|
/man/sym.predict.symbolic_nnet_cm.Rd
|
no_license
|
cran/RSDA
|
R
| false
| true
| 1,115
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sym_regression.R
\name{sym.predict.symbolic_nnet_cm}
\alias{sym.predict.symbolic_nnet_cm}
\title{Predict nnet_cm model}
\usage{
\method{sym.predict}{symbolic_nnet_cm}(model, new.sym.data, ...)
}
\arguments{
\item{model}{model}
\item{new.sym.data}{new data}
\item{...}{optional parameters}
}
\description{
Predict nnet_cm model
}
\references{
Lima-Neto, E.A., De Carvalho, F.A.T., (2008). Centre and range method to fitting a linear regression model on symbolic interval data. Computational Statistics and Data Analysis52, 1500-1515
Lima-Neto, E.A., De Carvalho, F.A.T., (2010). Constrained linear regression models for symbolic interval-valued variables. Computational Statistics and Data Analysis 54, 333-347
Lima Neto, E.d.A., de Carvalho, F.d.A.T. Nonlinear regression applied to interval-valued data. Pattern Anal Applic 20, 809–824 (2017). https://doi.org/10.1007/s10044-016-0538-y
Rodriguez, O. (2018). Shrinkage linear regression for symbolic interval-valued variables.Journal MODULAD 2018, vol. Modulad 45, pp.19-38
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/OutcomeModels.R
\name{plotKaplanMeier}
\alias{plotKaplanMeier}
\title{Plot the Kaplan-Meier curve}
\usage{
plotKaplanMeier(outcomeModel, censorMarks = FALSE,
confidenceIntervals = TRUE, includeZero = TRUE, dataCutoff = 0.99,
treatmentLabel = "Treated", comparatorLabel = "Comparator",
fileName = NULL)
}
\arguments{
\item{outcomeModel}{An object of type \code{outcomeModel} as generated using he
\code{fitOutcomeModel} function.}
\item{censorMarks}{Whether or not to include censor marks in the plot.}
\item{confidenceIntervals}{Plot 95 percent confidence intervals?}
\item{includeZero}{Should the y axis include zero, or only go down to the lowest observed
survival?}
\item{dataCutoff}{Fraction of the data (number censored) after which the graph will not
be shown.}
\item{treatmentLabel}{A label to us for the treated cohort.}
\item{comparatorLabel}{A label to us for the comparator cohort.}
\item{fileName}{Name of the file where the plot should be saved, for example
'plot.png'. See the function \code{ggsave} in the ggplot2 package for
supported file formats.}
}
\value{
A ggplot object. Use the \code{\link[ggplot2]{ggsave}} function to save to file in a different
format.
}
\description{
\code{plotKaplanMeier} creates the Kaplain-Meier survival plot
}
\examples{
# todo
}
|
/man/plotKaplanMeier.Rd
|
permissive
|
tdbennett/CohortMethod
|
R
| false
| false
| 1,438
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/OutcomeModels.R
\name{plotKaplanMeier}
\alias{plotKaplanMeier}
\title{Plot the Kaplan-Meier curve}
\usage{
plotKaplanMeier(outcomeModel, censorMarks = FALSE,
confidenceIntervals = TRUE, includeZero = TRUE, dataCutoff = 0.99,
treatmentLabel = "Treated", comparatorLabel = "Comparator",
fileName = NULL)
}
\arguments{
\item{outcomeModel}{An object of type \code{outcomeModel} as generated using he
\code{fitOutcomeModel} function.}
\item{censorMarks}{Whether or not to include censor marks in the plot.}
\item{confidenceIntervals}{Plot 95 percent confidence intervals?}
\item{includeZero}{Should the y axis include zero, or only go down to the lowest observed
survival?}
\item{dataCutoff}{Fraction of the data (number censored) after which the graph will not
be shown.}
\item{treatmentLabel}{A label to us for the treated cohort.}
\item{comparatorLabel}{A label to us for the comparator cohort.}
\item{fileName}{Name of the file where the plot should be saved, for example
'plot.png'. See the function \code{ggsave} in the ggplot2 package for
supported file formats.}
}
\value{
A ggplot object. Use the \code{\link[ggplot2]{ggsave}} function to save to file in a different
format.
}
\description{
\code{plotKaplanMeier} creates the Kaplain-Meier survival plot
}
\examples{
# todo
}
|
setwd('~/git/csci183/assignments/inclass_d3_assignment/')
bike_theft <- read.csv('bikes/bike_theft_rev.csv', header = TRUE)
library('stringr')
bike_theft$loc <- "OTHER"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Swig")] <- "Swig"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Dunne")] <- "Dunne"
bike_theft$loc[str_detect(bike_theft$LOCATION, "McLaughlin")] <- "McLaughlin"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Walsh")] <- "Walsh"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Casa")] <- "Casa"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Sobrato")] <- "Sobrato"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Campisi")] <- "Campisi"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Sanfilippo")] <- "Sanfilippo"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Nobili")] <- "Nobili"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Graham")] <- "Graham"
# non-resident halls
bike_theft$loc[str_detect(bike_theft$LOCATION, "Benson")] <- "Benson"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Bannan")] <- "Bannan"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Daly")] <- "Daly"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Kenna")] <- "Kenna"
bike_theft$loc[str_detect(bike_theft$LOCATION, "O'Connor")] <- "O'Connor"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Malley")] <- "Malley"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Villa")] <- "Villas"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Bellarmine")] <- "Bellarmine"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Engineering")] <- "Bannan"
loc <- table(bike_theft$loc)
library('zoo')
library('lubridate')
bike_theft$DOW <- "N/A"
bike_theft$date <- mdy(bike_theft$DATE)
bike_theft$DOW <- wday(bike_theft$date)
bike_theft$yearmon <- strftime(bike_theft$date, '%Y-%m')
locations <- c("Swig", "Dunne", "McLaughlin", "Walsh", "Casa", "Sobrato", "Campisi",
"Sanfilippo", "Nobili", "Graham", "Benson", "Bannan", "Daly", "Kenna",
"O'Connor", "Malley", "Villas", "Bellarmine")
#bike_theft$DATE <- as.Date(bike_theft$DATE, "%Y-%m-%d")
#bike_theft$DayOfWeek <- weekdays(as.Date(bike_theft$DATE))
#bike_theft$time <- as.Date(bike_theft$TIME..Hours., )
WorstDayOfWeek <- table(bike_theft$DOW)
sort(WorstDayOfWeek, decreasing = TRUE)
# calculate number of bike thefts per location per month
allYearMon <- unique(bike_theft$yearmon)
# remove NAs
# allYearMon <- apply(allYearMon, 1, function(x) unique(x[!is.na(x)]))
# allYearMon <- allYearMon[!is.na(allYearMon)]
#df <- data.frame("Location" = locations)
df <- as.data.frame.matrix(table(bike_theft$loc, bike_theft$yearmon))
library('plyr')
rename(df, c("row.names" = "name"))
write.table(df, file="bike_theft_loc_occurrences.tsv", sep='\t')
### PART 2 ###
install.packages('choroplethr')
library(choroplethr)
library(acs)
api.key.install('4f32526cc94e8dd53cf2f751eff6967a1069878a')
acs_data <- get_acs_data("B08101", "county",column_idx = 41,)
acs_employment <- get_acs_data('B08101', 'county', column_idx = 1)
bikes2 <- data.frame(id = acs_data$df$region, rate= acs_data$df$value / max(acs_data$df$value))
write.table(bikes2, file="bike2.tsv", sep='\t', row.names = F)
|
/assignments/inclass_d3_assignment/bikes/bikes.R
|
no_license
|
ryanrishi/csci183
|
R
| false
| false
| 3,153
|
r
|
setwd('~/git/csci183/assignments/inclass_d3_assignment/')
bike_theft <- read.csv('bikes/bike_theft_rev.csv', header = TRUE)
library('stringr')
bike_theft$loc <- "OTHER"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Swig")] <- "Swig"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Dunne")] <- "Dunne"
bike_theft$loc[str_detect(bike_theft$LOCATION, "McLaughlin")] <- "McLaughlin"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Walsh")] <- "Walsh"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Casa")] <- "Casa"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Sobrato")] <- "Sobrato"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Campisi")] <- "Campisi"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Sanfilippo")] <- "Sanfilippo"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Nobili")] <- "Nobili"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Graham")] <- "Graham"
# non-resident halls
bike_theft$loc[str_detect(bike_theft$LOCATION, "Benson")] <- "Benson"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Bannan")] <- "Bannan"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Daly")] <- "Daly"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Kenna")] <- "Kenna"
bike_theft$loc[str_detect(bike_theft$LOCATION, "O'Connor")] <- "O'Connor"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Malley")] <- "Malley"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Villa")] <- "Villas"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Bellarmine")] <- "Bellarmine"
bike_theft$loc[str_detect(bike_theft$LOCATION, "Engineering")] <- "Bannan"
loc <- table(bike_theft$loc)
library('zoo')
library('lubridate')
bike_theft$DOW <- "N/A"
bike_theft$date <- mdy(bike_theft$DATE)
bike_theft$DOW <- wday(bike_theft$date)
bike_theft$yearmon <- strftime(bike_theft$date, '%Y-%m')
locations <- c("Swig", "Dunne", "McLaughlin", "Walsh", "Casa", "Sobrato", "Campisi",
"Sanfilippo", "Nobili", "Graham", "Benson", "Bannan", "Daly", "Kenna",
"O'Connor", "Malley", "Villas", "Bellarmine")
#bike_theft$DATE <- as.Date(bike_theft$DATE, "%Y-%m-%d")
#bike_theft$DayOfWeek <- weekdays(as.Date(bike_theft$DATE))
#bike_theft$time <- as.Date(bike_theft$TIME..Hours., )
WorstDayOfWeek <- table(bike_theft$DOW)
sort(WorstDayOfWeek, decreasing = TRUE)
# calculate number of bike thefts per location per month
allYearMon <- unique(bike_theft$yearmon)
# remove NAs
# allYearMon <- apply(allYearMon, 1, function(x) unique(x[!is.na(x)]))
# allYearMon <- allYearMon[!is.na(allYearMon)]
#df <- data.frame("Location" = locations)
df <- as.data.frame.matrix(table(bike_theft$loc, bike_theft$yearmon))
library('plyr')
rename(df, c("row.names" = "name"))
write.table(df, file="bike_theft_loc_occurrences.tsv", sep='\t')
### PART 2 ###
install.packages('choroplethr')
library(choroplethr)
library(acs)
api.key.install('4f32526cc94e8dd53cf2f751eff6967a1069878a')
acs_data <- get_acs_data("B08101", "county",column_idx = 41,)
acs_employment <- get_acs_data('B08101', 'county', column_idx = 1)
bikes2 <- data.frame(id = acs_data$df$region, rate= acs_data$df$value / max(acs_data$df$value))
write.table(bikes2, file="bike2.tsv", sep='\t', row.names = F)
|
`ensemble.raster` <- function(
xn=NULL, ext=NULL,
models.list=NULL,
input.weights=models.list$output.weights,
thresholds=models.list$thresholds,
RASTER.species.name="Species001", RASTER.stack.name=xn@title,
RASTER.format="raster", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
RASTER.models.overwrite=TRUE,
KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
evaluate=FALSE, SINK=FALSE,
p=models.list$p, a=models.list$a,
pt=models.list$pt, at=models.list$at
)
{
.BiodiversityR <- new.env()
if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(xn) == T) {stop("value for parameter xn is missing (RasterStack object)")}
if (is.null(models.list) == T) {stop("provide 'models.list' as models will not be recalibrated and retested")}
if (is.null(input.weights) == T) {input.weights <- models.list$output.weights}
if (is.null(thresholds) == T) {stop("provide 'thresholds' as models will not be recalibrated and retested")}
retest <- F
if (evaluate == T) {
if (is.null(p)==T || is.null(a)==T) {
cat(paste("\n", "NOTE: not possible to evaluate the models since locations p and a are not provided", "\n", sep = ""))
evaluate <- F
}
if (is.null(pt)==F && is.null(at)==F) {
if(identical(pt, p) == F || identical(at, a) == F) {retest <- T}
}
}
if (is.null(ext) == F) {
if(length(xn@title) == 0) {xn@title <- "stack1"}
title.old <- xn@title
xn <- raster::crop(xn, y=ext, snap="in")
xn@title <- title.old
}
# create output file
if (RASTER.species.name == "Species001") {
RASTER.species.name <- models.list$species.name
}
dir.create("outputs", showWarnings = F)
paste.file <- paste(getwd(), "/outputs/", RASTER.species.name, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.raster function)", "\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
#
# check if all variables are present
vars <- models.list$vars
vars.xn <- names(xn)
nv <- length(vars)
for (i in 1:nv) {
if (any(vars.xn==vars[i]) == F) {stop("explanatory variable '", vars[i], "' not among grid layers of RasterStack xn \n", sep = "")}
}
nv <- length(vars.xn)
for (i in 1:nv) {
if (any(vars==vars.xn[i]) == F) {
cat(paste("\n", "NOTE: RasterStack layer '", vars.xn[i], "' was not calibrated as explanatory variable", "\n", sep = ""))
xn <- raster::dropLayer(xn, which(names(xn) %in% c(vars.xn[i]) ))
}
}
#
# set minimum and maximum values for xn
for (i in 1:raster::nlayers(xn)) {
xn[[i]] <- raster::setMinMax(xn[[i]])
}
if(raster::projection(xn)=="NA") {
raster::projection(xn) <- "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
}
# declare categorical layers for xn
factors <- models.list$factors
categories <- NULL
if(is.null(factors) == F) {
for (i in 1:length(factors)) {
j <- which(names(xn) == factors[i])
xn[[j]] <- raster::as.factor(xn[[j]])
}
categories <- models.list$categories
}
dummy.vars <- models.list$dummy.vars
#
KML.blur <- trunc(KML.blur)
if (KML.blur < 1) {KML.blur <- 1}
if (is.null(input.weights)==F) {
MAXENT <- max(c(input.weights["MAXENT"], -1), na.rm=T)
GBM <- max(c(input.weights["GBM"], -1), na.rm=T)
GBMSTEP <- max(c(input.weights["GBMSTEP"], -1), na.rm=T)
RF <- max(c(input.weights["RF"], -1), na.rm=T)
GLM <- max(c(input.weights["GLM"], -1), na.rm=T)
GLMSTEP <- max(c(input.weights["GLMSTEP"], -1), na.rm=T)
GAM <- max(c(input.weights["GAM"], -1), na.rm=T)
GAMSTEP <- max(c(input.weights["GAMSTEP"], -1), na.rm=T)
MGCV <- max(c(input.weights["MGCV"], -1), na.rm=T)
MGCVFIX <- max(c(input.weights["MGCVFIX"], -1), na.rm=T)
EARTH <- max(c(input.weights["EARTH"], -1), na.rm=T)
RPART <- max(c(input.weights["RPART"], -1), na.rm=T)
NNET <- max(c(input.weights["NNET"], -1), na.rm=T)
FDA <- max(c(input.weights["FDA"], -1), na.rm=T)
SVM <- max(c(input.weights["SVM"], -1), na.rm=T)
SVME <- max(c(input.weights["SVME"], -1), na.rm=T)
BIOCLIM <- max(c(input.weights["BIOCLIM"], -1), na.rm=T)
DOMAIN <- max(c(input.weights["DOMAIN"], -1), na.rm=T)
MAHAL<- max(c(input.weights["MAHAL"], -1), na.rm=T)
}
MAXENT.OLD <- GBM.OLD <- GBMSTEP.OLD <- RF.OLD <- GLM.OLD <- GLMSTEP.OLD <- GAM.OLD <- GAMSTEP.OLD <- MGCV.OLD <- NULL
MGCVFIX.OLD <- EARTH.OLD <- RPART.OLD <- NNET.OLD <- FDA.OLD <- SVM.OLD <- SVME.OLD <- BIOCLIM.OLD <- DOMAIN.OLD <- MAHAL.OLD <- GEODIST.OLD <- NULL
# probit models, NULL if no probit model fitted
MAXENT.PROBIT.OLD <- GBM.PROBIT.OLD <- GBMSTEP.PROBIT.OLD <- RF.PROBIT.OLD <- GLM.PROBIT.OLD <- GLMSTEP.PROBIT.OLD <- GAM.PROBIT.OLD <- GAMSTEP.PROBIT.OLD <- MGCV.PROBIT.OLD <- NULL
MGCVFIX.PROBIT.OLD <- EARTH.PROBIT.OLD <- RPART.PROBIT.OLD <- NNET.PROBIT.OLD <- FDA.PROBIT.OLD <- SVM.PROBIT.OLD <- SVME.PROBIT.OLD <- BIOCLIM.PROBIT.OLD <- DOMAIN.PROBIT.OLD <- MAHAL.PROBIT.OLD <- NULL
if (is.null(models.list) == F) {
if (is.null(models.list$MAXENT) == F) {MAXENT.OLD <- models.list$MAXENT}
if (is.null(models.list$GBM) == F) {GBM.OLD <- models.list$GBM}
if (is.null(models.list$GBMSTEP) == F) {GBMSTEP.OLD <- models.list$GBMSTEP}
if (is.null(models.list$RF) == F) {RF.OLD <- models.list$RF}
if (is.null(models.list$GLM) == F) {GLM.OLD <- models.list$GLM}
if (is.null(models.list$GLMSTEP) == F) {GLMSTEP.OLD <- models.list$GLMSTEP}
if (is.null(models.list$GAM) == F) {GAM.OLD <- models.list$GAM}
if (is.null(models.list$GAMSTEP) == F) {GAMSTEP.OLD <- models.list$GAMSTEP}
if (is.null(models.list$MGCV) == F) {MGCV.OLD <- models.list$MGCV}
if (is.null(models.list$MGCVFIX) == F) {MGCVFIX.OLD <- models.list$MGCVFIX}
if (is.null(models.list$EARTH) == F) {EARTH.OLD <- models.list$EARTH}
if (is.null(models.list$RPART) == F) {RPART.OLD <- models.list$RPART}
if (is.null(models.list$NNET) == F) {NNET.OLD <- models.list$NNET}
if (is.null(models.list$FDA) == F) {FDA.OLD <- models.list$FDA}
if (is.null(models.list$SVM) == F) {SVM.OLD <- models.list$SVM}
if (is.null(models.list$SVME) == F) {SVME.OLD <- models.list$SVME}
if (is.null(models.list$BIOCLIM) == F) {BIOCLIM.OLD <- models.list$BIOCLIM}
if (is.null(models.list$DOMAIN) == F) {DOMAIN.OLD <- models.list$DOMAIN}
if (is.null(models.list$MAHAL) == F) {MAHAL.OLD <- models.list$MAHAL}
if (is.null(models.list$GEODIST) == F) {GEODIST.OLD <- models.list$GEODIST}
# probit models
if (is.null(models.list$MAXENT.PROBIT) == F) {MAXENT.PROBIT.OLD <- models.list$MAXENT.PROBIT}
if (is.null(models.list$GBM.PROBIT) == F) {GBM.PROBIT.OLD <- models.list$GBM.PROBIT}
if (is.null(models.list$GBMSTEP.PROBIT) == F) {GBMSTEP.PROBIT.OLD <- models.list$GBMSTEP.PROBIT}
if (is.null(models.list$RF.PROBIT) == F) {RF.PROBIT.OLD <- models.list$RF.PROBIT}
if (is.null(models.list$GLM.PROBIT) == F) {GLM.PROBIT.OLD <- models.list$GLM.PROBIT}
if (is.null(models.list$GLMSTEP.PROBIT) == F) {GLMSTEP.PROBIT.OLD <- models.list$GLMSTEP.PROBIT}
if (is.null(models.list$GAM.PROBIT) == F) {GAM.PROBIT.OLD <- models.list$GAM.PROBIT}
if (is.null(models.list$GAMSTEP.PROBIT) == F) {GAMSTEP.PROBIT.OLD <- models.list$GAMSTEP.PROBIT}
if (is.null(models.list$MGCV.PROBIT) == F) {MGCV.PROBIT.OLD <- models.list$MGCV.PROBIT}
if (is.null(models.list$MGCVFIX.PROBIT) == F) {MGCVFIX.PROBIT.OLD <- models.list$MGCVFIX.PROBIT}
if (is.null(models.list$EARTH.PROBIT) == F) {EARTH.PROBIT.OLD <- models.list$EARTH.PROBIT}
if (is.null(models.list$RPART.PROBIT) == F) {RPART.PROBIT.OLD <- models.list$RPART.PROBIT}
if (is.null(models.list$NNET.PROBIT) == F) {NNET.PROBIT.OLD <- models.list$NNET.PROBIT}
if (is.null(models.list$FDA.PROBIT) == F) {FDA.PROBIT.OLD <- models.list$FDA.PROBIT}
if (is.null(models.list$SVM.PROBIT) == F) {SVM.PROBIT.OLD <- models.list$SVM.PROBIT}
if (is.null(models.list$SVME.PROBIT) == F) {SVME.PROBIT.OLD <- models.list$SVME.PROBIT}
if (is.null(models.list$BIOCLIM.PROBIT) == F) {BIOCLIM.PROBIT.OLD <- models.list$BIOCLIM.PROBIT}
if (is.null(models.list$DOMAIN.PROBIT) == F) {DOMAIN.PROBIT.OLD <- models.list$DOMAIN.PROBIT}
if (is.null(models.list$MAHAL.PROBIT) == F) {MAHAL.PROBIT.OLD <- models.list$MAHAL.PROBIT}
}
#
if (MAXENT > 0) {
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
if (!file.exists(jar)) {stop('maxent program is missing: ', jar, '\nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/')}
}
if (GBM > 0) {
if (! require(gbm)) {stop("Please install the gbm package")}
}
if (GBMSTEP > 0) {
if (! require(gbm)) {stop("Please install the gbm package")}
}
if (RF > 0) {
if (! require(randomForest)) {stop("Please install the randomForest package")}
}
if (GLMSTEP > 0) {
if (! require(MASS)) {stop("Please install the MASS package")}
}
if (GAM > 0 || GAMSTEP > 0) {
cat(paste("\n"))
try(detach(package:mgcv), silent=T)
suppressMessages(require(gam))
if (! require(gam)) {stop("Please install the gam package")}
}
if (MGCV > 0 || MGCVFIX > 0) {
cat(paste("\n"))
try(detach(package:gam), silent=T)
cat(paste("\n"))
options(warn=-1)
if (! require(mgcv)) {stop("Please install the mgcv package")}
# get the probabilities from MGCV
predict.mgcv <- function(object, newdata, type="response") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
options(warn=0)
}
if (EARTH > 0) {
if (! require(earth)) {stop("Please install the earth package")}
# get the probabilities from earth
predict.earth2 <- function(object, newdata, type="response") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (RPART > 0) {
if (! require(rpart)) {stop("Please install the rpart package")}
}
if (NNET > 0) {
if (! require(nnet)) {stop("Please install the nnet package")}
# get the probabilities from nnet
predict.nnet2 <- function(object, newdata, type="raw") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (FDA > 0) {
if (! require(mda)) {stop("Please install the mda package")}
}
if (SVM > 0) {
if (! require(kernlab)) {stop("Please install the kernlab package")}
}
if (SVME > 0) {
if (! require(e1071)) {stop("Please install the e1071 package")}
# get the probabilities from svm
predict.svme <- function(model, newdata, probability=T) {
p <- predict(model, newdata, probability=T)
return(attr(p, "probabilities")[,1])
}
}
if (MAHAL > 0) {
MAHAL.shape <- models.list$formulae$MAHAL.shape
# get the probabilities from mahal
predict.mahal <- function(model, newdata, MAHAL.shape) {
p <- dismo::predict(object=model, x=newdata)
p <- p - 1 - MAHAL.shape
p <- abs(p)
p <- MAHAL.shape / p
return(p)
}
}
#
ws <- input.weights
prediction.failures <- FALSE
#
# prepare for raster output
dir.create("models", showWarnings = F)
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/count", showWarnings = F)
dir.create("ensembles/presence", showWarnings = F)
stack.title <- RASTER.stack.name
# stack.title <- xn@title
if(KML.out == T) {
dir.create("kml", showWarnings = F)
dir.create("kml/count", showWarnings = F)
dir.create("kml/presence", showWarnings = F)
}
rasterfull <- paste("ensembles/", RASTER.species.name, "_", stack.title , sep="")
kmlfull <- paste("kml/", RASTER.species.name, "_", stack.title , sep="")
raster.title <- paste(RASTER.species.name, "_", stack.title , sep="")
rastercount <- paste("ensembles/count/", RASTER.species.name, "_", stack.title , sep="")
kmlcount <- paste("kml/count/", RASTER.species.name, "_", stack.title , sep="")
rasterpresence <- paste("ensembles/presence/", RASTER.species.name, "_", stack.title, sep="")
kmlpresence <- paste("kml/presence/", RASTER.species.name, "_", stack.title, sep="")
RASTER.species.orig <- RASTER.species.name
if (RASTER.models.overwrite==T) {
RASTER.species.name <- "working"
}else{
RASTER.species.name <- paste(RASTER.species.name, "_", stack.title, sep="")
}
#
#
cat(paste("\n", "Start of modelling for organism: ", RASTER.species.orig, "\n", sep = ""))
cat(paste("Predictions for RasterStack: ", stack.title, "\n", sep = ""))
ensemble.statistics <- NULL
cat(paste("ensemble raster layers will be saved in folder ", getwd(), "/ensembles", "\n\n", sep = ""))
statistics.names <- c("n.models", "ensemble.threshold", "ensemble.min", "ensemble.max", "count.min", "count.max")
ensemble.statistics <- numeric(6)
names(ensemble.statistics) <- statistics.names
#
# sometimes still error warnings for minimum and maximum values of the layers
# set minimum and maximum values for xn
for (i in 1:raster::nlayers(xn)) {
xn[[i]] <- raster::setMinMax(xn[[i]])
}
#
# since raster layers are scaled 0 - 1000, multiply the thresholds by 1000
thresholds <- trunc(1000*thresholds)
#
# count models
mc <- 0
#
# start raster layer creations
if (ws["MAXENT"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maximum entropy algorithm (package: dismo)\n", sep=""))
# Put the file 'maxent.jar' in the 'java' folder of dismo
# the file 'maxent.jar' can be obtained from from http://www.cs.princeton.edu/~schapire/maxent/.
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
results <- MAXENT.OLD
pmaxent <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAXENT", sep="")
tryCatch(pmaxent <- raster::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("MAXENT prediction failed"))},
silent=F)
if (is.null(pmaxent) == F) {
results2 <- MAXENT.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmaxent, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "MAXENT"
pmaxent <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pmaxent <- trunc(1000*pmaxent)
raster::writeRaster(x=pmaxent, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxent, p)/1000
abs1 <- raster::extract(pmaxent, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxent, pt)/1000
abs1 <- raster::extract(pmaxent, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MAXENT prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["MAXENT"] <- -1
}
}
if (ws["GBM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized boosted regression modeling (package: gbm) \n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- GBM.OLD
pgbm <- NULL
fullname <- paste("models/", RASTER.species.name, "_GBM", sep="")
tryCatch(pgbm <- raster::predict(object=xn, model=results, na.rm=TRUE, factors=categories,
n.trees=results$n.trees, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("GBM prediction failed"))},
silent=F)
if (is.null(pgbm) == F) {
results2 <- GBM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgbm, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GBM"
pgbm <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pgbm <- trunc(1000*pgbm)
raster::writeRaster(x=pgbm, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgbm, p)/1000
abs1 <- raster::extract(pgbm, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgbm, pt)/1000
abs1 <- raster::extract(pgbm, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GBM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GBM"] <- -1
}
}
if (ws["GBMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". gbm step algorithm (package: dismo)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- GBMSTEP.OLD
pgbms <- NULL
fullname <- paste("models/", RASTER.species.name, "_GBMSTEP", sep="")
tryCatch(pgbms <- raster::predict(object=xn, model=results, na.rm=TRUE, factors=categories,
n.trees=results$n.trees, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("stepwise GBM prediction failed"))},
silent=F)
if (is.null(pgbms) == F) {
results2 <- GBMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgbms, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GBMSTEP"
pgbms <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pgbms <- trunc(1000*pgbms)
raster::writeRaster(x=pgbms, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgbms, p)/1000
abs1 <- raster::extract(pgbms, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgbms, pt)/1000
abs1 <- raster::extract(pgbms, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GBM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GBMSTEP"] <- -1
}
}
if (ws["RF"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Random forest algorithm (package: randomForest)\n", sep=""))
results <- RF.OLD
prf <- NULL
fullname <- paste("models/", RASTER.species.name, "_RF", sep="")
tryCatch(prf <- raster::predict(object=xn, model=results, na.rm=TRUE, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("random forest prediction failed"))},
silent=F)
if (is.null(prf) == F) {
results2 <- RF.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=prf, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "RF"
prf <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
prf <- trunc(1000*prf)
raster::writeRaster(x=prf, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(prf, p)/1000
abs1 <- raster::extract(prf, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(prf, pt)/1000
abs1 <- raster::extract(prf, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: random forest prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["RF"] <- -1
}
}
if (ws["GLM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Linear Model \n", sep=""))
results <- GLM.OLD
pglm <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLM", sep="")
tryCatch(pglm <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("GLM prediction failed"))},
silent=F)
if (is.null(pglm) == F) {
results2 <- GLM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pglm, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GLM"
pglm <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pglm <- trunc(1000*pglm)
raster::writeRaster(x=pglm, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pglm, p)/1000
abs1 <- raster::extract(pglm, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pglm, pt)/1000
abs1 <- raster::extract(pglm, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GLM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GLM"] <- -1
}
}
if (ws["GLMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Linear Model \n", sep=""))
results <- GLMSTEP.OLD
pglms <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLMSTEP", sep="")
tryCatch(pglms <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("stepwise GLM prediction failed"))},
silent=F)
if (is.null(pglms) == F) {
results2 <- GLMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pglms, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GLMSTEP"
pglms <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pglms <- trunc(1000*pglms)
raster::writeRaster(x=pglms, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pglms, p)/1000
abs1 <- raster::extract(pglms, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pglms, pt)/1000
abs1 <- raster::extract(pglms, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GLM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GLMSTEP"] <- -1
}
}
if (ws["GAM"] > 0 || ws["GAMSTEP"] > 0) {
cat(paste("\n"))
try(detach(package:mgcv), silent=T)
suppressMessages(require(gam))
require(gam, quietly=T)
}
if (ws["GAM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: gam)\n", sep=""))
results <- GAM.OLD
pgam <- NULL
fullname <- paste("models/", RASTER.species.name, "_GAM", sep="")
tryCatch(pgam <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("GAM prediction (gam package) failed"))},
silent=F)
if (is.null(pgam) == F) {
results2 <- GAM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgam, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GAM"
pgam <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pgam <- trunc(1000*pgam)
raster::writeRaster(x=pgam, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgam, p)/1000
abs1 <- raster::extract(pgam, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgam, pt)/1000
abs1 <- raster::extract(pgam, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (gam package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GAM"] <- -1
}
}
if (ws["GAMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Additive Model (package: gam)\n", sep=""))
results <- GAMSTEP.OLD
pgams <- NULL
fullname <- paste("models/", RASTER.species.name, "_GAMSTEP", sep="")
tryCatch(pgams <- raster::predict(object=xn, model=results, type="response", na.rm=TRUE, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("stepwise GAM prediction (gam package) failed"))},
silent=F)
if (is.null(pgams) == F) {
results2 <- GAMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgams, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GAMSTEP"
pgams <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pgams <- trunc(1000*pgams)
raster::writeRaster(x=pgams, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgams, p)/1000
abs1 <- raster::extract(pgams, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgams, pt)/1000
abs1 <- raster::extract(pgams, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GAM prediction (gam package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GAMSTEP"] <- -1
}
}
if (ws["MGCV"] > 0 || ws["MGCVFIX"] > 0) {
cat(paste("\n"))
try(detach(package:gam), silent=T)
options(warn=-1)
require(mgcv, quietly=T)
options(warn=0)
}
if (ws["MGCV"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: mgcv)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- MGCV.OLD
pmgcv <- NULL
fullname <- paste("models/", RASTER.species.name, "_MGCV", sep="")
tryCatch(pmgcv <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("GAM prediction (mgcv package) failed"))},
silent=F)
if (is.null(pmgcv) == F) {
results2 <- MGCV.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmgcv, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "MGCV"
pmgcv <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pmgcv <- trunc(1000*pmgcv)
raster::writeRaster(x=pmgcv, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcv, p)/1000
abs1 <- raster::extract(pmgcv, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcv, pt)/1000
abs1 <- raster::extract(pmgcv, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (mgcv package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["MGCV"] <- -1
}
}
if (ws["MGCVFIX"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". GAM with fixed d.f. regression splines (package: mgcv)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- MGCVFIX.OLD
pmgcvf <- NULL
fullname <- paste("models/", RASTER.species.name, "_MGCVFIX", sep="")
tryCatch(pmgcvf <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("MGCVFIX prediction (mgcv package) failed"))},
silent=F)
if (is.null(pmgcvf) == F) {
results2 <- MGCVFIX.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmgcvf, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "MGCVFIX"
pmgcvf <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pmgcvf <- trunc(1000*pmgcvf)
raster::writeRaster(x=pmgcvf, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcvf, p)/1000
abs1 <- raster::extract(pmgcvf, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcvf, pt)/1000
abs1 <- raster::extract(pmgcvf, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (mgcv package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["MGCVFIX"] <- -1
}
}
if (ws["EARTH"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Multivariate Adaptive Regression Splines (package: earth)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: MARS (earth package) with factors may require explicit dummy variables", "\n", sep=""))
}
results <- EARTH.OLD
pearth <- NULL
fullname <- paste("models/", RASTER.species.name, "_EARTH", sep="")
tryCatch(pearth <- raster::predict(object=xn, model=results, fun=predict.earth2, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("MARS prediction (earth package) failed"))},
silent=F)
if (is.null(pearth) == F) {
results2 <- EARTH.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pearth, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "EARTH"
pearth <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pearth <- trunc(1000*pearth)
raster::writeRaster(x=pearth, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pearth, p)/1000
abs1 <- raster::extract(pearth, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pearth, pt)/1000
abs1 <- raster::extract(pearth, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MARS prediction (earth package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["EARTH"] <- -1
}
}
if (ws["RPART"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Recursive Partitioning And Regression Trees (package: rpart)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- RPART.OLD
prpart <- NULL
fullname <- paste("models/", RASTER.species.name, "_RPART", sep="")
tryCatch(prpart <- raster::predict(object=xn, model=results, na.rm=TRUE, type="prob", index=2, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("RPART prediction failed"))},
silent=F)
if (is.null(prpart) == F) {
results2 <- RPART.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=prpart, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "RPART"
prpart <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
prpart <- trunc(1000*prpart)
raster::writeRaster(x=prpart, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(prpart, p)/1000
abs1 <- raster::extract(prpart, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(prpart, pt)/1000
abs1 <- raster::extract(prpart, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: RPART prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["RPART"] <- -1
}
}
if (ws["NNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Artificial Neural Network (package: nnet)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- NNET.OLD
pnnet <- NULL
fullname <- paste("models/", RASTER.species.name, "_NNET", sep="")
tryCatch(pnnet <- raster::predict(object=xn, model=results, fun=predict.nnet2, na.rm=TRUE, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("ANN prediction (nnet package) failed"))},
silent=F)
if (is.null(pnnet) == F) {
results2 <- NNET.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pnnet, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "NNET"
pnnet <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pnnet <- trunc(1000*pnnet)
raster::writeRaster(x=pnnet, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pnnet, p)/1000
abs1 <- raster::extract(pnnet, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pnnet, pt)/1000
abs1 <- raster::extract(pnnet, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: ANN prediction (nnet package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["NNET"] <- -1
}
}
if (ws["FDA"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Flexible Discriminant Analysis (package: mda)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- FDA.OLD
pfda <- NULL
fullname <- paste("models/", RASTER.species.name, "_FDA", sep="")
tryCatch(pfda <- raster::predict(object=xn, model=results, na.rm=TRUE, type="posterior", index=2, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("FDA prediction failed"))},
silent=F)
if (is.null(pfda) == F) {
results2 <- FDA.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pfda, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "FDA"
pfda <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pfda <- trunc(1000*pfda)
raster::writeRaster(x=pfda, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pfda, p)/1000
abs1 <- raster::extract(pfda, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pfda, pt)/1000
abs1 <- raster::extract(pfda, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: FDA prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["FDA"] <- -1
}
}
if (ws["SVM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: kernlab)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: SVM model with factors may require explicit dummy variables", "\n", sep=""))
}
results <- SVM.OLD
psvm <- NULL
fullname <- paste("models/", RASTER.species.name, "_SVM", sep="")
predict.svm2 <- as.function(kernlab::predict)
tryCatch(psvm <- raster::predict(object=xn, model=results, fun=predict.svm2, na.rm=TRUE, type="probabilities", index=2, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("SVM prediction (kernlab package) failed"))},
silent=F)
if (is.null(psvm) == F) {
results2 <- SVM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=psvm, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "SVM"
psvm <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
psvm <- trunc(1000*psvm)
raster::writeRaster(x=psvm, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(psvm, p)/1000
abs1 <- raster::extract(psvm, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(psvm, pt)/1000
abs1 <- raster::extract(psvm, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: SVM prediction (kernlab package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["SVM"] <- -1
}
}
if (ws["SVME"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: e1071)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: SVME model with factors may require explicit dummy variables", "\n", sep=""))
}
results <- SVME.OLD
psvme <- NULL
fullname <- paste("models/", RASTER.species.name, "_SVME", sep="")
tryCatch(psvme <- raster::predict(object=xn, model=results, fun=predict.svme, na.rm=TRUE, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("SVM prediction (e1071 package) failed"))},
warning= function(war) {print(paste("SVM prediction (e1071 package) failed"))},
silent=F)
if (is.null(psvme) == F) {
results2 <- SVME.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=psvme, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "SVME"
psvme <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
psvme <- trunc(1000*psvme)
raster::writeRaster(x=psvme, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(psvme, p)/1000
abs1 <- raster::extract(psvme, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(psvme, pt)/1000
abs1 <- raster::extract(psvme, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: SVM prediction (e1071 package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["SVME"] <- -1
}
}
if (BIOCLIM > 0 || DOMAIN > 0 || MAHAL > 0) {
if(is.null(factors) == F) {
xn <- raster::dropLayer(xn, which(names(xn) %in% factors))
}
if(is.null(dummy.vars) == F) {
xn <- raster::dropLayer(xn, which(names(xn) %in% dummy.vars))
}
}
if (ws["BIOCLIM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". BIOCLIM algorithm (package: dismo)\n", sep=""))
results <- BIOCLIM.OLD
pbio <- NULL
fullname <- paste("models/", RASTER.species.name, "_BIOCLIM", sep="")
tryCatch(pbio <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("BIOCLIM prediction failed"))},
silent=F)
if (is.null(pbio) == F) {
results2 <- BIOCLIM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pbio, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "BIOCLIM"
pbio <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pbio <- trunc(1000*pbio)
raster::writeRaster(x=pbio, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pbio, p)/1000
abs1 <- raster::extract(pbio, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pbio, pt)/1000
abs1 <- raster::extract(pbio, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: BIOCLIM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["BIOCLIM"] <- -1
}
}
if (ws["DOMAIN"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". DOMAIN algorithm (package: dismo)\n", sep=""))
results <- DOMAIN.OLD
pdom <- NULL
fullname <- paste("models/", RASTER.species.name, "_DOMAIN", sep="")
tryCatch(pdom <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("DOMAIN prediction failed"))},
silent=F)
if (is.null(pdom) == F) {
results2 <- DOMAIN.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pdom, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "DOMAIN"
pdom <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pdom <- trunc(1000*pdom)
raster::writeRaster(x=pdom, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pdom, p)/1000
abs1 <- raster::extract(pdom, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pdom, pt)/1000
abs1 <- raster::extract(pdom, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: DOMAIN prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["DOMAIN"] <- -1
}
}
if (ws["MAHAL"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Mahalanobis algorithm (package: dismo)\n", sep=""))
results <- MAHAL.OLD
pmahal <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAHAL", sep="")
# not possible to use the predict.mahal function as raster::predict automatically reverts to dismo::predict for 'DistModel' objects
tryCatch(pmahal <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("Mahalanobis prediction failed"))},
silent=F)
if (is.null(pmahal) == F) {
pmahal <- pmahal - 1 - MAHAL.shape
pmahal <- abs(pmahal)
pmahal <- MAHAL.shape / pmahal
results2 <- MAHAL.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmahal, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "MAHAL"
pmahal <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pmahal <- trunc(1000*pmahal)
raster::writeRaster(x=pmahal, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmahal, p)/1000
abs1 <- raster::extract(pmahal, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmahal, pt)/1000
abs1 <- raster::extract(pmahal, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: Mahalanobis prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["MAHAL"] <- -1
}
}
if (prediction.failures == T) {
cat(paste("\n", "WARNING: some predictions failed","\n", sep = ""))
cat(paste("\n", "actual weights that were used were (-1 indicates failed predictions):","\n", sep = ""))
print(ws)
ws[which(ws==-1)] <- 0
}
#
# create ensembles
mc <- mc+1
cat(paste("\n\n", mc, ". Ensemble algorithm\n", sep=""))
ensemble.statistics["n.models"] <- sum(as.numeric(ws > 0))
ensemble <- xn[[1]] == raster::NAvalue(xn[[1]])
raster::setMinMax(ensemble)
names(ensemble) <- raster.title
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
enscount <- ensemble
raster::setMinMax(enscount)
names(enscount) <- paste(raster.title, "_count", sep="")
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
enspresence <- ensemble
raster::setMinMax(enspresence)
names(enspresence) <- paste(raster.title, "_presence", sep="")
raster::writeRaster(x=enspresence, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
if (ws["MAXENT"] > 0) {
ensemble <- ensemble + ws["MAXENT"] * pmaxent
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmaxent <- pmaxent >= thresholds["MAXENT"]
enscount <- enscount + pmaxent
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GBM"] > 0) {
ensemble <- ensemble + ws["GBM"] * pgbm
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgbm <- pgbm >= thresholds["GBM"]
enscount <- enscount + pgbm
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GBMSTEP"] > 0) {
ensemble <- ensemble + ws["GBMSTEP"] * pgbms
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgbms <- pgbms >= thresholds["GBMSTEP"]
enscount <- enscount + pgbms
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["RF"] > 0) {
ensemble <- ensemble + ws["RF"] * prf
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
prf <- prf >= thresholds["RF"]
enscount <- enscount + prf
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GLM"] > 0) {
ensemble <- ensemble + ws["GLM"] * pglm
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglm <- pglm >= thresholds["GLM"]
enscount <- enscount + pglm
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GLMSTEP"] > 0) {
ensemble <- ensemble + ws["GLMSTEP"] * pglms
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglms <- pglms >= thresholds["GLMSTEP"]
enscount <- enscount + pglms
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GAM"] > 0) {
ensemble <- ensemble + ws["GAM"] * pgam
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgam <- pgam >= thresholds["GAM"]
enscount <- enscount + pgam
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GAMSTEP"] > 0) {
ensemble <- ensemble + ws["GAMSTEP"] * pgams
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgams <- pgams >= thresholds["GAMSTEP"]
enscount <- enscount + pgams
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["MGCV"] > 0) {
ensemble <- ensemble + ws["MGCV"] * pmgcv
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmgcv <- pmgcv >= thresholds["MGCV"]
enscount <- enscount + pmgcv
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["MGCVFIX"] > 0) {
ensemble <- ensemble + ws["MGCVFIX"] * pmgcvf
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmgcvf <- pmgcvf >= thresholds["MGCVFIX"]
enscount <- enscount + pmgcvf
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["EARTH"] > 0) {
ensemble <- ensemble + ws["EARTH"] * pearth
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pearth <- pearth >= thresholds["EARTH"]
enscount <- enscount + pearth
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["RPART"] > 0) {
ensemble <- ensemble + ws["RPART"] * prpart
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
prpart <- prpart >= thresholds["RPART"]
enscount <- enscount + prpart
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["NNET"] > 0) {
ensemble <- ensemble + ws["NNET"] * pnnet
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pnnet <- pnnet >= thresholds["NNET"]
enscount <- enscount + pnnet
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["FDA"] > 0) {
ensemble <- ensemble + ws["FDA"] * pfda
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pfda <- pfda >= thresholds["FDA"]
enscount <- enscount + pfda
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["SVM"] > 0) {
ensemble <- ensemble + ws["SVM"] * psvm
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
psvm <- psvm >= thresholds["SVM"]
enscount <- enscount + psvm
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["SVME"] > 0) {
ensemble <- ensemble + ws["SVME"] * psvme
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
psvme <- psvme >= thresholds["SVME"]
enscount <- enscount + psvme
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["BIOCLIM"] > 0) {
ensemble <- ensemble + ws["BIOCLIM"] * pbio
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pbio <- pbio >= thresholds["BIOCLIM"]
enscount <- enscount + pbio
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["DOMAIN"] > 0) {
ensemble <- ensemble + ws["DOMAIN"] * pdom
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pdom <- pdom >= thresholds["DOMAIN"]
enscount <- enscount + pdom
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["MAHAL"] > 0) {
ensemble <- ensemble + ws["MAHAL"] * pmahal
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmahal <- pmahal >= thresholds["MAHAL"]
enscount <- enscount + pmahal
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
ensemble <- trunc(ensemble)
raster::setMinMax(ensemble)
ensemble.statistics["ensemble.min"] <- raster::minValue(ensemble)
ensemble.statistics["ensemble.max"] <- raster::maxValue(ensemble)
# names(ensemble) <- raster.title
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
# avoid possible problems with saving of names of the raster layers
raster::writeRaster(ensemble, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- raster.title
raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
if (KML.out == T) {
thresholdx <- thresholds["ENSEMBLE"]
seq1 <- seq(from = 0, to = thresholdx, length.out = 10)
seq2 <- seq(from = thresholdx, to = 1000, length.out = 11)
raster::KML(working.raster, filename=kmlfull, col = c(rainbow(n = 10, start = 0, end = 1/6), rainbow(n = 10, start = 3/6, end = 4/6)), colNA = 0,
blur=KML.blur, maxpixels=KML.maxpixels, overwrite=T, breaks = c(seq1, seq2))
}
raster::setMinMax(enscount)
ensemble.statistics["count.min"] <- raster::minValue(enscount)
ensemble.statistics["count.max"] <- raster::maxValue(enscount)
# names(enscount) <- paste(raster.title, "_count", sep="")
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
# avoid possible problems with saving of names of the raster layers
raster::writeRaster(enscount, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- paste(raster.title, "_count", sep="")
raster::writeRaster(working.raster, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
if (KML.out == T) {
nmax <- sum(as.numeric(ws > 0))
if (nmax > 3) {
raster::KML(working.raster, filename=kmlcount, col=c("grey", rainbow(n=(nmax-1), start=0, end=1/3), "blue"),
colNA=0, blur=10, overwrite=T, breaks=seq(from=-1, to=nmax, by=1))
}else{
raster::KML(working.raster, filename=kmlcount, col=c("grey", rainbow(n=nmax, start=0, end=1/3)),
colNA=0, blur=10, overwrite=TRUE, breaks=seq(from=-1, to=nmax, by=1))
}
}
ensemble.statistics["ensemble.threshold"] <- thresholds["ENSEMBLE"]
enspresence <- ensemble >= thresholds["ENSEMBLE"]
raster::setMinMax(enspresence)
# names(enspresence) <- paste(raster.title, "_presence", sep="")
# raster::writeRaster(x=enspresence, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
# avoid possible problems with saving of names of the raster layers
raster::writeRaster(enspresence, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- paste(raster.title, "_presence", sep="")
raster::writeRaster(working.raster, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
if (KML.out == T) {
raster::KML(working.raster, filename=kmlpresence, col=c("grey", "green"),
colNA=0, blur=KML.blur, maxpixels=KML.maxpixels, overwrite=T)
}
if(evaluate == T) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created ensemble raster layer (", rasterfull, ") at locations p and a", "\n\n", sep = ""))
pres_consensus <- raster::extract(ensemble, p)/1000
abs_consensus <- raster::extract(ensemble, a)/1000
eval1 <- evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
}
if(retest == T) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created ensemble raster layer (", rasterfull, ") at locations pt and at", "\n\n", sep = ""))
pres_consensus <- raster::extract(ensemble, pt)/1000
abs_consensus <- raster::extract(ensemble, at)/1000
eval1 <- evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
}
cat(paste("\n", "End of modelling for organism: ", RASTER.species.orig, "\n", sep = ""))
cat(paste("Predictions were made for RasterStack: ", stack.title, "\n\n", sep = ""))
#
# avoid possible problems with saving of names of the raster layers
raster::writeRaster(ensemble, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- raster.title
raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
raster::writeRaster(enscount, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- paste(raster.title, "_count", sep="")
raster::writeRaster(working.raster, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
raster::writeRaster(enspresence, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- paste(raster.title, "_presence", sep="")
raster::writeRaster(working.raster, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
result <- list(ensemble.statistics=ensemble.statistics, call=match.call() )
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
return(result)
}
|
/R/ensemble.raster.R
|
no_license
|
BRozhkov/BiodiversityR
|
R
| false
| false
| 80,080
|
r
|
`ensemble.raster` <- function(
xn=NULL, ext=NULL,
models.list=NULL,
input.weights=models.list$output.weights,
thresholds=models.list$thresholds,
RASTER.species.name="Species001", RASTER.stack.name=xn@title,
RASTER.format="raster", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
RASTER.models.overwrite=TRUE,
KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
evaluate=FALSE, SINK=FALSE,
p=models.list$p, a=models.list$a,
pt=models.list$pt, at=models.list$at
)
{
.BiodiversityR <- new.env()
if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(xn) == T) {stop("value for parameter xn is missing (RasterStack object)")}
if (is.null(models.list) == T) {stop("provide 'models.list' as models will not be recalibrated and retested")}
if (is.null(input.weights) == T) {input.weights <- models.list$output.weights}
if (is.null(thresholds) == T) {stop("provide 'thresholds' as models will not be recalibrated and retested")}
retest <- F
if (evaluate == T) {
if (is.null(p)==T || is.null(a)==T) {
cat(paste("\n", "NOTE: not possible to evaluate the models since locations p and a are not provided", "\n", sep = ""))
evaluate <- F
}
if (is.null(pt)==F && is.null(at)==F) {
if(identical(pt, p) == F || identical(at, a) == F) {retest <- T}
}
}
if (is.null(ext) == F) {
if(length(xn@title) == 0) {xn@title <- "stack1"}
title.old <- xn@title
xn <- raster::crop(xn, y=ext, snap="in")
xn@title <- title.old
}
# create output file
if (RASTER.species.name == "Species001") {
RASTER.species.name <- models.list$species.name
}
dir.create("outputs", showWarnings = F)
paste.file <- paste(getwd(), "/outputs/", RASTER.species.name, "_output.txt", sep="")
OLD.SINK <- TRUE
if (sink.number(type="output") == 0) {OLD.SINK <- F}
if (SINK==T && OLD.SINK==F) {
if (file.exists(paste.file) == F) {
cat(paste("\n", "NOTE: results captured in file: ", paste.file, "\n", sep = ""))
}else{
cat(paste("\n", "NOTE: results appended in file: ", paste.file, "\n", sep = ""))
}
cat(paste("\n\n", "RESULTS (ensemble.raster function)", "\n", sep=""), file=paste.file, append=T)
sink(file=paste.file, append=T)
cat(paste(date(), "\n", sep=""))
print(match.call())
}
#
# check if all variables are present
vars <- models.list$vars
vars.xn <- names(xn)
nv <- length(vars)
for (i in 1:nv) {
if (any(vars.xn==vars[i]) == F) {stop("explanatory variable '", vars[i], "' not among grid layers of RasterStack xn \n", sep = "")}
}
nv <- length(vars.xn)
for (i in 1:nv) {
if (any(vars==vars.xn[i]) == F) {
cat(paste("\n", "NOTE: RasterStack layer '", vars.xn[i], "' was not calibrated as explanatory variable", "\n", sep = ""))
xn <- raster::dropLayer(xn, which(names(xn) %in% c(vars.xn[i]) ))
}
}
#
# set minimum and maximum values for xn
for (i in 1:raster::nlayers(xn)) {
xn[[i]] <- raster::setMinMax(xn[[i]])
}
if(raster::projection(xn)=="NA") {
raster::projection(xn) <- "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
}
# declare categorical layers for xn
factors <- models.list$factors
categories <- NULL
if(is.null(factors) == F) {
for (i in 1:length(factors)) {
j <- which(names(xn) == factors[i])
xn[[j]] <- raster::as.factor(xn[[j]])
}
categories <- models.list$categories
}
dummy.vars <- models.list$dummy.vars
#
KML.blur <- trunc(KML.blur)
if (KML.blur < 1) {KML.blur <- 1}
if (is.null(input.weights)==F) {
MAXENT <- max(c(input.weights["MAXENT"], -1), na.rm=T)
GBM <- max(c(input.weights["GBM"], -1), na.rm=T)
GBMSTEP <- max(c(input.weights["GBMSTEP"], -1), na.rm=T)
RF <- max(c(input.weights["RF"], -1), na.rm=T)
GLM <- max(c(input.weights["GLM"], -1), na.rm=T)
GLMSTEP <- max(c(input.weights["GLMSTEP"], -1), na.rm=T)
GAM <- max(c(input.weights["GAM"], -1), na.rm=T)
GAMSTEP <- max(c(input.weights["GAMSTEP"], -1), na.rm=T)
MGCV <- max(c(input.weights["MGCV"], -1), na.rm=T)
MGCVFIX <- max(c(input.weights["MGCVFIX"], -1), na.rm=T)
EARTH <- max(c(input.weights["EARTH"], -1), na.rm=T)
RPART <- max(c(input.weights["RPART"], -1), na.rm=T)
NNET <- max(c(input.weights["NNET"], -1), na.rm=T)
FDA <- max(c(input.weights["FDA"], -1), na.rm=T)
SVM <- max(c(input.weights["SVM"], -1), na.rm=T)
SVME <- max(c(input.weights["SVME"], -1), na.rm=T)
BIOCLIM <- max(c(input.weights["BIOCLIM"], -1), na.rm=T)
DOMAIN <- max(c(input.weights["DOMAIN"], -1), na.rm=T)
MAHAL<- max(c(input.weights["MAHAL"], -1), na.rm=T)
}
MAXENT.OLD <- GBM.OLD <- GBMSTEP.OLD <- RF.OLD <- GLM.OLD <- GLMSTEP.OLD <- GAM.OLD <- GAMSTEP.OLD <- MGCV.OLD <- NULL
MGCVFIX.OLD <- EARTH.OLD <- RPART.OLD <- NNET.OLD <- FDA.OLD <- SVM.OLD <- SVME.OLD <- BIOCLIM.OLD <- DOMAIN.OLD <- MAHAL.OLD <- GEODIST.OLD <- NULL
# probit models, NULL if no probit model fitted
MAXENT.PROBIT.OLD <- GBM.PROBIT.OLD <- GBMSTEP.PROBIT.OLD <- RF.PROBIT.OLD <- GLM.PROBIT.OLD <- GLMSTEP.PROBIT.OLD <- GAM.PROBIT.OLD <- GAMSTEP.PROBIT.OLD <- MGCV.PROBIT.OLD <- NULL
MGCVFIX.PROBIT.OLD <- EARTH.PROBIT.OLD <- RPART.PROBIT.OLD <- NNET.PROBIT.OLD <- FDA.PROBIT.OLD <- SVM.PROBIT.OLD <- SVME.PROBIT.OLD <- BIOCLIM.PROBIT.OLD <- DOMAIN.PROBIT.OLD <- MAHAL.PROBIT.OLD <- NULL
if (is.null(models.list) == F) {
if (is.null(models.list$MAXENT) == F) {MAXENT.OLD <- models.list$MAXENT}
if (is.null(models.list$GBM) == F) {GBM.OLD <- models.list$GBM}
if (is.null(models.list$GBMSTEP) == F) {GBMSTEP.OLD <- models.list$GBMSTEP}
if (is.null(models.list$RF) == F) {RF.OLD <- models.list$RF}
if (is.null(models.list$GLM) == F) {GLM.OLD <- models.list$GLM}
if (is.null(models.list$GLMSTEP) == F) {GLMSTEP.OLD <- models.list$GLMSTEP}
if (is.null(models.list$GAM) == F) {GAM.OLD <- models.list$GAM}
if (is.null(models.list$GAMSTEP) == F) {GAMSTEP.OLD <- models.list$GAMSTEP}
if (is.null(models.list$MGCV) == F) {MGCV.OLD <- models.list$MGCV}
if (is.null(models.list$MGCVFIX) == F) {MGCVFIX.OLD <- models.list$MGCVFIX}
if (is.null(models.list$EARTH) == F) {EARTH.OLD <- models.list$EARTH}
if (is.null(models.list$RPART) == F) {RPART.OLD <- models.list$RPART}
if (is.null(models.list$NNET) == F) {NNET.OLD <- models.list$NNET}
if (is.null(models.list$FDA) == F) {FDA.OLD <- models.list$FDA}
if (is.null(models.list$SVM) == F) {SVM.OLD <- models.list$SVM}
if (is.null(models.list$SVME) == F) {SVME.OLD <- models.list$SVME}
if (is.null(models.list$BIOCLIM) == F) {BIOCLIM.OLD <- models.list$BIOCLIM}
if (is.null(models.list$DOMAIN) == F) {DOMAIN.OLD <- models.list$DOMAIN}
if (is.null(models.list$MAHAL) == F) {MAHAL.OLD <- models.list$MAHAL}
if (is.null(models.list$GEODIST) == F) {GEODIST.OLD <- models.list$GEODIST}
# probit models
if (is.null(models.list$MAXENT.PROBIT) == F) {MAXENT.PROBIT.OLD <- models.list$MAXENT.PROBIT}
if (is.null(models.list$GBM.PROBIT) == F) {GBM.PROBIT.OLD <- models.list$GBM.PROBIT}
if (is.null(models.list$GBMSTEP.PROBIT) == F) {GBMSTEP.PROBIT.OLD <- models.list$GBMSTEP.PROBIT}
if (is.null(models.list$RF.PROBIT) == F) {RF.PROBIT.OLD <- models.list$RF.PROBIT}
if (is.null(models.list$GLM.PROBIT) == F) {GLM.PROBIT.OLD <- models.list$GLM.PROBIT}
if (is.null(models.list$GLMSTEP.PROBIT) == F) {GLMSTEP.PROBIT.OLD <- models.list$GLMSTEP.PROBIT}
if (is.null(models.list$GAM.PROBIT) == F) {GAM.PROBIT.OLD <- models.list$GAM.PROBIT}
if (is.null(models.list$GAMSTEP.PROBIT) == F) {GAMSTEP.PROBIT.OLD <- models.list$GAMSTEP.PROBIT}
if (is.null(models.list$MGCV.PROBIT) == F) {MGCV.PROBIT.OLD <- models.list$MGCV.PROBIT}
if (is.null(models.list$MGCVFIX.PROBIT) == F) {MGCVFIX.PROBIT.OLD <- models.list$MGCVFIX.PROBIT}
if (is.null(models.list$EARTH.PROBIT) == F) {EARTH.PROBIT.OLD <- models.list$EARTH.PROBIT}
if (is.null(models.list$RPART.PROBIT) == F) {RPART.PROBIT.OLD <- models.list$RPART.PROBIT}
if (is.null(models.list$NNET.PROBIT) == F) {NNET.PROBIT.OLD <- models.list$NNET.PROBIT}
if (is.null(models.list$FDA.PROBIT) == F) {FDA.PROBIT.OLD <- models.list$FDA.PROBIT}
if (is.null(models.list$SVM.PROBIT) == F) {SVM.PROBIT.OLD <- models.list$SVM.PROBIT}
if (is.null(models.list$SVME.PROBIT) == F) {SVME.PROBIT.OLD <- models.list$SVME.PROBIT}
if (is.null(models.list$BIOCLIM.PROBIT) == F) {BIOCLIM.PROBIT.OLD <- models.list$BIOCLIM.PROBIT}
if (is.null(models.list$DOMAIN.PROBIT) == F) {DOMAIN.PROBIT.OLD <- models.list$DOMAIN.PROBIT}
if (is.null(models.list$MAHAL.PROBIT) == F) {MAHAL.PROBIT.OLD <- models.list$MAHAL.PROBIT}
}
#
if (MAXENT > 0) {
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
if (!file.exists(jar)) {stop('maxent program is missing: ', jar, '\nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/')}
}
if (GBM > 0) {
if (! require(gbm)) {stop("Please install the gbm package")}
}
if (GBMSTEP > 0) {
if (! require(gbm)) {stop("Please install the gbm package")}
}
if (RF > 0) {
if (! require(randomForest)) {stop("Please install the randomForest package")}
}
if (GLMSTEP > 0) {
if (! require(MASS)) {stop("Please install the MASS package")}
}
if (GAM > 0 || GAMSTEP > 0) {
cat(paste("\n"))
try(detach(package:mgcv), silent=T)
suppressMessages(require(gam))
if (! require(gam)) {stop("Please install the gam package")}
}
if (MGCV > 0 || MGCVFIX > 0) {
cat(paste("\n"))
try(detach(package:gam), silent=T)
cat(paste("\n"))
options(warn=-1)
if (! require(mgcv)) {stop("Please install the mgcv package")}
# get the probabilities from MGCV
predict.mgcv <- function(object, newdata, type="response") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
options(warn=0)
}
if (EARTH > 0) {
if (! require(earth)) {stop("Please install the earth package")}
# get the probabilities from earth
predict.earth2 <- function(object, newdata, type="response") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (RPART > 0) {
if (! require(rpart)) {stop("Please install the rpart package")}
}
if (NNET > 0) {
if (! require(nnet)) {stop("Please install the nnet package")}
# get the probabilities from nnet
predict.nnet2 <- function(object, newdata, type="raw") {
p <- predict(object=object, newdata=newdata, type=type)
return(as.numeric(p))
}
}
if (FDA > 0) {
if (! require(mda)) {stop("Please install the mda package")}
}
if (SVM > 0) {
if (! require(kernlab)) {stop("Please install the kernlab package")}
}
if (SVME > 0) {
if (! require(e1071)) {stop("Please install the e1071 package")}
# get the probabilities from svm
predict.svme <- function(model, newdata, probability=T) {
p <- predict(model, newdata, probability=T)
return(attr(p, "probabilities")[,1])
}
}
if (MAHAL > 0) {
MAHAL.shape <- models.list$formulae$MAHAL.shape
# get the probabilities from mahal
predict.mahal <- function(model, newdata, MAHAL.shape) {
p <- dismo::predict(object=model, x=newdata)
p <- p - 1 - MAHAL.shape
p <- abs(p)
p <- MAHAL.shape / p
return(p)
}
}
#
ws <- input.weights
prediction.failures <- FALSE
#
# prepare for raster output
dir.create("models", showWarnings = F)
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/count", showWarnings = F)
dir.create("ensembles/presence", showWarnings = F)
stack.title <- RASTER.stack.name
# stack.title <- xn@title
if(KML.out == T) {
dir.create("kml", showWarnings = F)
dir.create("kml/count", showWarnings = F)
dir.create("kml/presence", showWarnings = F)
}
rasterfull <- paste("ensembles/", RASTER.species.name, "_", stack.title , sep="")
kmlfull <- paste("kml/", RASTER.species.name, "_", stack.title , sep="")
raster.title <- paste(RASTER.species.name, "_", stack.title , sep="")
rastercount <- paste("ensembles/count/", RASTER.species.name, "_", stack.title , sep="")
kmlcount <- paste("kml/count/", RASTER.species.name, "_", stack.title , sep="")
rasterpresence <- paste("ensembles/presence/", RASTER.species.name, "_", stack.title, sep="")
kmlpresence <- paste("kml/presence/", RASTER.species.name, "_", stack.title, sep="")
RASTER.species.orig <- RASTER.species.name
if (RASTER.models.overwrite==T) {
RASTER.species.name <- "working"
}else{
RASTER.species.name <- paste(RASTER.species.name, "_", stack.title, sep="")
}
#
#
cat(paste("\n", "Start of modelling for organism: ", RASTER.species.orig, "\n", sep = ""))
cat(paste("Predictions for RasterStack: ", stack.title, "\n", sep = ""))
ensemble.statistics <- NULL
cat(paste("ensemble raster layers will be saved in folder ", getwd(), "/ensembles", "\n\n", sep = ""))
statistics.names <- c("n.models", "ensemble.threshold", "ensemble.min", "ensemble.max", "count.min", "count.max")
ensemble.statistics <- numeric(6)
names(ensemble.statistics) <- statistics.names
#
# sometimes still error warnings for minimum and maximum values of the layers
# set minimum and maximum values for xn
for (i in 1:raster::nlayers(xn)) {
xn[[i]] <- raster::setMinMax(xn[[i]])
}
#
# since raster layers are scaled 0 - 1000, multiply the thresholds by 1000
thresholds <- trunc(1000*thresholds)
#
# count models
mc <- 0
#
# start raster layer creations
if (ws["MAXENT"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Maximum entropy algorithm (package: dismo)\n", sep=""))
# Put the file 'maxent.jar' in the 'java' folder of dismo
# the file 'maxent.jar' can be obtained from from http://www.cs.princeton.edu/~schapire/maxent/.
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
results <- MAXENT.OLD
pmaxent <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAXENT", sep="")
tryCatch(pmaxent <- raster::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("MAXENT prediction failed"))},
silent=F)
if (is.null(pmaxent) == F) {
results2 <- MAXENT.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmaxent, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "MAXENT"
pmaxent <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pmaxent <- trunc(1000*pmaxent)
raster::writeRaster(x=pmaxent, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxent, p)/1000
abs1 <- raster::extract(pmaxent, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmaxent, pt)/1000
abs1 <- raster::extract(pmaxent, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MAXENT prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["MAXENT"] <- -1
}
}
if (ws["GBM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized boosted regression modeling (package: gbm) \n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- GBM.OLD
pgbm <- NULL
fullname <- paste("models/", RASTER.species.name, "_GBM", sep="")
tryCatch(pgbm <- raster::predict(object=xn, model=results, na.rm=TRUE, factors=categories,
n.trees=results$n.trees, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("GBM prediction failed"))},
silent=F)
if (is.null(pgbm) == F) {
results2 <- GBM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgbm, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GBM"
pgbm <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pgbm <- trunc(1000*pgbm)
raster::writeRaster(x=pgbm, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgbm, p)/1000
abs1 <- raster::extract(pgbm, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgbm, pt)/1000
abs1 <- raster::extract(pgbm, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GBM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GBM"] <- -1
}
}
if (ws["GBMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". gbm step algorithm (package: dismo)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- GBMSTEP.OLD
pgbms <- NULL
fullname <- paste("models/", RASTER.species.name, "_GBMSTEP", sep="")
tryCatch(pgbms <- raster::predict(object=xn, model=results, na.rm=TRUE, factors=categories,
n.trees=results$n.trees, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("stepwise GBM prediction failed"))},
silent=F)
if (is.null(pgbms) == F) {
results2 <- GBMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgbms, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GBMSTEP"
pgbms <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pgbms <- trunc(1000*pgbms)
raster::writeRaster(x=pgbms, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgbms, p)/1000
abs1 <- raster::extract(pgbms, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgbms, pt)/1000
abs1 <- raster::extract(pgbms, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GBM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GBMSTEP"] <- -1
}
}
if (ws["RF"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Random forest algorithm (package: randomForest)\n", sep=""))
results <- RF.OLD
prf <- NULL
fullname <- paste("models/", RASTER.species.name, "_RF", sep="")
tryCatch(prf <- raster::predict(object=xn, model=results, na.rm=TRUE, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("random forest prediction failed"))},
silent=F)
if (is.null(prf) == F) {
results2 <- RF.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=prf, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "RF"
prf <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
prf <- trunc(1000*prf)
raster::writeRaster(x=prf, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(prf, p)/1000
abs1 <- raster::extract(prf, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(prf, pt)/1000
abs1 <- raster::extract(prf, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: random forest prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["RF"] <- -1
}
}
if (ws["GLM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Linear Model \n", sep=""))
results <- GLM.OLD
pglm <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLM", sep="")
tryCatch(pglm <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("GLM prediction failed"))},
silent=F)
if (is.null(pglm) == F) {
results2 <- GLM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pglm, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GLM"
pglm <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pglm <- trunc(1000*pglm)
raster::writeRaster(x=pglm, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pglm, p)/1000
abs1 <- raster::extract(pglm, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pglm, pt)/1000
abs1 <- raster::extract(pglm, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GLM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GLM"] <- -1
}
}
if (ws["GLMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Linear Model \n", sep=""))
results <- GLMSTEP.OLD
pglms <- NULL
fullname <- paste("models/", RASTER.species.name, "_GLMSTEP", sep="")
tryCatch(pglms <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("stepwise GLM prediction failed"))},
silent=F)
if (is.null(pglms) == F) {
results2 <- GLMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pglms, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GLMSTEP"
pglms <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pglms <- trunc(1000*pglms)
raster::writeRaster(x=pglms, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pglms, p)/1000
abs1 <- raster::extract(pglms, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pglms, pt)/1000
abs1 <- raster::extract(pglms, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GLM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GLMSTEP"] <- -1
}
}
if (ws["GAM"] > 0 || ws["GAMSTEP"] > 0) {
cat(paste("\n"))
try(detach(package:mgcv), silent=T)
suppressMessages(require(gam))
require(gam, quietly=T)
}
if (ws["GAM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: gam)\n", sep=""))
results <- GAM.OLD
pgam <- NULL
fullname <- paste("models/", RASTER.species.name, "_GAM", sep="")
tryCatch(pgam <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("GAM prediction (gam package) failed"))},
silent=F)
if (is.null(pgam) == F) {
results2 <- GAM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgam, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GAM"
pgam <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pgam <- trunc(1000*pgam)
raster::writeRaster(x=pgam, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgam, p)/1000
abs1 <- raster::extract(pgam, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgam, pt)/1000
abs1 <- raster::extract(pgam, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (gam package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GAM"] <- -1
}
}
if (ws["GAMSTEP"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Stepwise Generalized Additive Model (package: gam)\n", sep=""))
results <- GAMSTEP.OLD
pgams <- NULL
fullname <- paste("models/", RASTER.species.name, "_GAMSTEP", sep="")
tryCatch(pgams <- raster::predict(object=xn, model=results, type="response", na.rm=TRUE, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("stepwise GAM prediction (gam package) failed"))},
silent=F)
if (is.null(pgams) == F) {
results2 <- GAMSTEP.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pgams, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "GAMSTEP"
pgams <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pgams <- trunc(1000*pgams)
raster::writeRaster(x=pgams, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pgams, p)/1000
abs1 <- raster::extract(pgams, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pgams, pt)/1000
abs1 <- raster::extract(pgams, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: stepwise GAM prediction (gam package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["GAMSTEP"] <- -1
}
}
if (ws["MGCV"] > 0 || ws["MGCVFIX"] > 0) {
cat(paste("\n"))
try(detach(package:gam), silent=T)
options(warn=-1)
require(mgcv, quietly=T)
options(warn=0)
}
if (ws["MGCV"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Generalized Additive Model (package: mgcv)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- MGCV.OLD
pmgcv <- NULL
fullname <- paste("models/", RASTER.species.name, "_MGCV", sep="")
tryCatch(pmgcv <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("GAM prediction (mgcv package) failed"))},
silent=F)
if (is.null(pmgcv) == F) {
results2 <- MGCV.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmgcv, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "MGCV"
pmgcv <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pmgcv <- trunc(1000*pmgcv)
raster::writeRaster(x=pmgcv, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcv, p)/1000
abs1 <- raster::extract(pmgcv, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcv, pt)/1000
abs1 <- raster::extract(pmgcv, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (mgcv package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["MGCV"] <- -1
}
}
if (ws["MGCVFIX"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". GAM with fixed d.f. regression splines (package: mgcv)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- MGCVFIX.OLD
pmgcvf <- NULL
fullname <- paste("models/", RASTER.species.name, "_MGCVFIX", sep="")
tryCatch(pmgcvf <- raster::predict(object=xn, model=results, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("MGCVFIX prediction (mgcv package) failed"))},
silent=F)
if (is.null(pmgcvf) == F) {
results2 <- MGCVFIX.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmgcvf, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "MGCVFIX"
pmgcvf <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pmgcvf <- trunc(1000*pmgcvf)
raster::writeRaster(x=pmgcvf, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcvf, p)/1000
abs1 <- raster::extract(pmgcvf, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmgcvf, pt)/1000
abs1 <- raster::extract(pmgcvf, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: GAM prediction (mgcv package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["MGCVFIX"] <- -1
}
}
if (ws["EARTH"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Multivariate Adaptive Regression Splines (package: earth)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: MARS (earth package) with factors may require explicit dummy variables", "\n", sep=""))
}
results <- EARTH.OLD
pearth <- NULL
fullname <- paste("models/", RASTER.species.name, "_EARTH", sep="")
tryCatch(pearth <- raster::predict(object=xn, model=results, fun=predict.earth2, na.rm=TRUE, type="response", factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("MARS prediction (earth package) failed"))},
silent=F)
if (is.null(pearth) == F) {
results2 <- EARTH.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pearth, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "EARTH"
pearth <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pearth <- trunc(1000*pearth)
raster::writeRaster(x=pearth, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pearth, p)/1000
abs1 <- raster::extract(pearth, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pearth, pt)/1000
abs1 <- raster::extract(pearth, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: MARS prediction (earth package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["EARTH"] <- -1
}
}
if (ws["RPART"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Recursive Partitioning And Regression Trees (package: rpart)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- RPART.OLD
prpart <- NULL
fullname <- paste("models/", RASTER.species.name, "_RPART", sep="")
tryCatch(prpart <- raster::predict(object=xn, model=results, na.rm=TRUE, type="prob", index=2, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("RPART prediction failed"))},
silent=F)
if (is.null(prpart) == F) {
results2 <- RPART.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=prpart, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "RPART"
prpart <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
prpart <- trunc(1000*prpart)
raster::writeRaster(x=prpart, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(prpart, p)/1000
abs1 <- raster::extract(prpart, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(prpart, pt)/1000
abs1 <- raster::extract(prpart, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: RPART prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["RPART"] <- -1
}
}
if (ws["NNET"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Artificial Neural Network (package: nnet)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- NNET.OLD
pnnet <- NULL
fullname <- paste("models/", RASTER.species.name, "_NNET", sep="")
tryCatch(pnnet <- raster::predict(object=xn, model=results, fun=predict.nnet2, na.rm=TRUE, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("ANN prediction (nnet package) failed"))},
silent=F)
if (is.null(pnnet) == F) {
results2 <- NNET.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pnnet, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "NNET"
pnnet <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pnnet <- trunc(1000*pnnet)
raster::writeRaster(x=pnnet, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pnnet, p)/1000
abs1 <- raster::extract(pnnet, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pnnet, pt)/1000
abs1 <- raster::extract(pnnet, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: ANN prediction (nnet package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["NNET"] <- -1
}
}
if (ws["FDA"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Flexible Discriminant Analysis (package: mda)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "WARRNING: not certain whether the correct factor levels will be used", "\n", sep=""))
}
results <- FDA.OLD
pfda <- NULL
fullname <- paste("models/", RASTER.species.name, "_FDA", sep="")
tryCatch(pfda <- raster::predict(object=xn, model=results, na.rm=TRUE, type="posterior", index=2, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("FDA prediction failed"))},
silent=F)
if (is.null(pfda) == F) {
results2 <- FDA.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pfda, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "FDA"
pfda <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pfda <- trunc(1000*pfda)
raster::writeRaster(x=pfda, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pfda, p)/1000
abs1 <- raster::extract(pfda, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pfda, pt)/1000
abs1 <- raster::extract(pfda, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: FDA prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["FDA"] <- -1
}
}
if (ws["SVM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: kernlab)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: SVM model with factors may require explicit dummy variables", "\n", sep=""))
}
results <- SVM.OLD
psvm <- NULL
fullname <- paste("models/", RASTER.species.name, "_SVM", sep="")
predict.svm2 <- as.function(kernlab::predict)
tryCatch(psvm <- raster::predict(object=xn, model=results, fun=predict.svm2, na.rm=TRUE, type="probabilities", index=2, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("SVM prediction (kernlab package) failed"))},
silent=F)
if (is.null(psvm) == F) {
results2 <- SVM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=psvm, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "SVM"
psvm <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
psvm <- trunc(1000*psvm)
raster::writeRaster(x=psvm, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(psvm, p)/1000
abs1 <- raster::extract(psvm, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(psvm, pt)/1000
abs1 <- raster::extract(psvm, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: SVM prediction (kernlab package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["SVM"] <- -1
}
}
if (ws["SVME"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Support Vector Machines (package: e1071)\n", sep=""))
if (!is.null(factors)) {
cat(paste("\n", "NOTE: SVME model with factors may require explicit dummy variables", "\n", sep=""))
}
results <- SVME.OLD
psvme <- NULL
fullname <- paste("models/", RASTER.species.name, "_SVME", sep="")
tryCatch(psvme <- raster::predict(object=xn, model=results, fun=predict.svme, na.rm=TRUE, factors=categories,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("SVM prediction (e1071 package) failed"))},
warning= function(war) {print(paste("SVM prediction (e1071 package) failed"))},
silent=F)
if (is.null(psvme) == F) {
results2 <- SVME.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=psvme, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "SVME"
psvme <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
psvme <- trunc(1000*psvme)
raster::writeRaster(x=psvme, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(psvme, p)/1000
abs1 <- raster::extract(psvme, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(psvme, pt)/1000
abs1 <- raster::extract(psvme, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: SVM prediction (e1071 package) failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["SVME"] <- -1
}
}
if (BIOCLIM > 0 || DOMAIN > 0 || MAHAL > 0) {
if(is.null(factors) == F) {
xn <- raster::dropLayer(xn, which(names(xn) %in% factors))
}
if(is.null(dummy.vars) == F) {
xn <- raster::dropLayer(xn, which(names(xn) %in% dummy.vars))
}
}
if (ws["BIOCLIM"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". BIOCLIM algorithm (package: dismo)\n", sep=""))
results <- BIOCLIM.OLD
pbio <- NULL
fullname <- paste("models/", RASTER.species.name, "_BIOCLIM", sep="")
tryCatch(pbio <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("BIOCLIM prediction failed"))},
silent=F)
if (is.null(pbio) == F) {
results2 <- BIOCLIM.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pbio, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "BIOCLIM"
pbio <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pbio <- trunc(1000*pbio)
raster::writeRaster(x=pbio, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pbio, p)/1000
abs1 <- raster::extract(pbio, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pbio, pt)/1000
abs1 <- raster::extract(pbio, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: BIOCLIM prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["BIOCLIM"] <- -1
}
}
if (ws["DOMAIN"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". DOMAIN algorithm (package: dismo)\n", sep=""))
results <- DOMAIN.OLD
pdom <- NULL
fullname <- paste("models/", RASTER.species.name, "_DOMAIN", sep="")
tryCatch(pdom <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("DOMAIN prediction failed"))},
silent=F)
if (is.null(pdom) == F) {
results2 <- DOMAIN.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pdom, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "DOMAIN"
pdom <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pdom <- trunc(1000*pdom)
raster::writeRaster(x=pdom, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pdom, p)/1000
abs1 <- raster::extract(pdom, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pdom, pt)/1000
abs1 <- raster::extract(pdom, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: DOMAIN prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["DOMAIN"] <- -1
}
}
if (ws["MAHAL"] > 0) {
mc <- mc+1
cat(paste("\n", mc, ". Mahalanobis algorithm (package: dismo)\n", sep=""))
results <- MAHAL.OLD
pmahal <- NULL
fullname <- paste("models/", RASTER.species.name, "_MAHAL", sep="")
# not possible to use the predict.mahal function as raster::predict automatically reverts to dismo::predict for 'DistModel' objects
tryCatch(pmahal <- dismo::predict(object=results, x=xn, na.rm=TRUE,
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("Mahalanobis prediction failed"))},
silent=F)
if (is.null(pmahal) == F) {
pmahal <- pmahal - 1 - MAHAL.shape
pmahal <- abs(pmahal)
pmahal <- MAHAL.shape / pmahal
results2 <- MAHAL.PROBIT.OLD
if (is.null(results2) == F) {
cat(paste("Probit transformation", "\n", sep=""))
fullname2 <- paste(fullname, "_step1", sep="")
raster::writeRaster(x=pmahal, filename=fullname2, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
explan.stack <- stack(fullname2)
names(explan.stack) <- "MAHAL"
pmahal <- raster::predict(object=explan.stack, model=results2, na.rm=TRUE, type="response",
filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format)
}
pmahal <- trunc(1000*pmahal)
raster::writeRaster(x=pmahal, filename=fullname, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
if(evaluate == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations p and a", "\n\n", sep = ""))
pres1 <- raster::extract(pmahal, p)/1000
abs1 <- raster::extract(pmahal, a)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
if(retest == T) {
eval1 <- pres1 <- abs1 <- NULL
cat(paste("\n", "Evaluation at locations pt and at", "\n\n", sep = ""))
pres1 <- raster::extract(pmahal, pt)/1000
abs1 <- raster::extract(pmahal, at)/1000
eval1 <- evaluate(p=pres1, a=abs1)
print(eval1)
}
}else{
cat(paste("\n", "WARNING: Mahalanobis prediction failed","\n\n", sep = ""))
prediction.failures <- TRUE
ws["MAHAL"] <- -1
}
}
if (prediction.failures == T) {
cat(paste("\n", "WARNING: some predictions failed","\n", sep = ""))
cat(paste("\n", "actual weights that were used were (-1 indicates failed predictions):","\n", sep = ""))
print(ws)
ws[which(ws==-1)] <- 0
}
#
# create ensembles
mc <- mc+1
cat(paste("\n\n", mc, ". Ensemble algorithm\n", sep=""))
ensemble.statistics["n.models"] <- sum(as.numeric(ws > 0))
ensemble <- xn[[1]] == raster::NAvalue(xn[[1]])
raster::setMinMax(ensemble)
names(ensemble) <- raster.title
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
enscount <- ensemble
raster::setMinMax(enscount)
names(enscount) <- paste(raster.title, "_count", sep="")
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
enspresence <- ensemble
raster::setMinMax(enspresence)
names(enspresence) <- paste(raster.title, "_presence", sep="")
raster::writeRaster(x=enspresence, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
if (ws["MAXENT"] > 0) {
ensemble <- ensemble + ws["MAXENT"] * pmaxent
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmaxent <- pmaxent >= thresholds["MAXENT"]
enscount <- enscount + pmaxent
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GBM"] > 0) {
ensemble <- ensemble + ws["GBM"] * pgbm
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgbm <- pgbm >= thresholds["GBM"]
enscount <- enscount + pgbm
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GBMSTEP"] > 0) {
ensemble <- ensemble + ws["GBMSTEP"] * pgbms
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgbms <- pgbms >= thresholds["GBMSTEP"]
enscount <- enscount + pgbms
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["RF"] > 0) {
ensemble <- ensemble + ws["RF"] * prf
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
prf <- prf >= thresholds["RF"]
enscount <- enscount + prf
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GLM"] > 0) {
ensemble <- ensemble + ws["GLM"] * pglm
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglm <- pglm >= thresholds["GLM"]
enscount <- enscount + pglm
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GLMSTEP"] > 0) {
ensemble <- ensemble + ws["GLMSTEP"] * pglms
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pglms <- pglms >= thresholds["GLMSTEP"]
enscount <- enscount + pglms
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GAM"] > 0) {
ensemble <- ensemble + ws["GAM"] * pgam
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgam <- pgam >= thresholds["GAM"]
enscount <- enscount + pgam
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["GAMSTEP"] > 0) {
ensemble <- ensemble + ws["GAMSTEP"] * pgams
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pgams <- pgams >= thresholds["GAMSTEP"]
enscount <- enscount + pgams
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["MGCV"] > 0) {
ensemble <- ensemble + ws["MGCV"] * pmgcv
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmgcv <- pmgcv >= thresholds["MGCV"]
enscount <- enscount + pmgcv
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["MGCVFIX"] > 0) {
ensemble <- ensemble + ws["MGCVFIX"] * pmgcvf
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmgcvf <- pmgcvf >= thresholds["MGCVFIX"]
enscount <- enscount + pmgcvf
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["EARTH"] > 0) {
ensemble <- ensemble + ws["EARTH"] * pearth
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pearth <- pearth >= thresholds["EARTH"]
enscount <- enscount + pearth
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["RPART"] > 0) {
ensemble <- ensemble + ws["RPART"] * prpart
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
prpart <- prpart >= thresholds["RPART"]
enscount <- enscount + prpart
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["NNET"] > 0) {
ensemble <- ensemble + ws["NNET"] * pnnet
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pnnet <- pnnet >= thresholds["NNET"]
enscount <- enscount + pnnet
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["FDA"] > 0) {
ensemble <- ensemble + ws["FDA"] * pfda
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pfda <- pfda >= thresholds["FDA"]
enscount <- enscount + pfda
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["SVM"] > 0) {
ensemble <- ensemble + ws["SVM"] * psvm
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
psvm <- psvm >= thresholds["SVM"]
enscount <- enscount + psvm
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["SVME"] > 0) {
ensemble <- ensemble + ws["SVME"] * psvme
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
psvme <- psvme >= thresholds["SVME"]
enscount <- enscount + psvme
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["BIOCLIM"] > 0) {
ensemble <- ensemble + ws["BIOCLIM"] * pbio
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pbio <- pbio >= thresholds["BIOCLIM"]
enscount <- enscount + pbio
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["DOMAIN"] > 0) {
ensemble <- ensemble + ws["DOMAIN"] * pdom
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pdom <- pdom >= thresholds["DOMAIN"]
enscount <- enscount + pdom
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
if (ws["MAHAL"] > 0) {
ensemble <- ensemble + ws["MAHAL"] * pmahal
raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
pmahal <- pmahal >= thresholds["MAHAL"]
enscount <- enscount + pmahal
raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
}
ensemble <- trunc(ensemble)
raster::setMinMax(ensemble)
ensemble.statistics["ensemble.min"] <- raster::minValue(ensemble)
ensemble.statistics["ensemble.max"] <- raster::maxValue(ensemble)
# names(ensemble) <- raster.title
# raster::writeRaster(x=ensemble, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
# avoid possible problems with saving of names of the raster layers
raster::writeRaster(ensemble, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- raster.title
raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
if (KML.out == T) {
thresholdx <- thresholds["ENSEMBLE"]
seq1 <- seq(from = 0, to = thresholdx, length.out = 10)
seq2 <- seq(from = thresholdx, to = 1000, length.out = 11)
raster::KML(working.raster, filename=kmlfull, col = c(rainbow(n = 10, start = 0, end = 1/6), rainbow(n = 10, start = 3/6, end = 4/6)), colNA = 0,
blur=KML.blur, maxpixels=KML.maxpixels, overwrite=T, breaks = c(seq1, seq2))
}
raster::setMinMax(enscount)
ensemble.statistics["count.min"] <- raster::minValue(enscount)
ensemble.statistics["count.max"] <- raster::maxValue(enscount)
# names(enscount) <- paste(raster.title, "_count", sep="")
# raster::writeRaster(x=enscount, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
# avoid possible problems with saving of names of the raster layers
raster::writeRaster(enscount, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- paste(raster.title, "_count", sep="")
raster::writeRaster(working.raster, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
if (KML.out == T) {
nmax <- sum(as.numeric(ws > 0))
if (nmax > 3) {
raster::KML(working.raster, filename=kmlcount, col=c("grey", rainbow(n=(nmax-1), start=0, end=1/3), "blue"),
colNA=0, blur=10, overwrite=T, breaks=seq(from=-1, to=nmax, by=1))
}else{
raster::KML(working.raster, filename=kmlcount, col=c("grey", rainbow(n=nmax, start=0, end=1/3)),
colNA=0, blur=10, overwrite=TRUE, breaks=seq(from=-1, to=nmax, by=1))
}
}
ensemble.statistics["ensemble.threshold"] <- thresholds["ENSEMBLE"]
enspresence <- ensemble >= thresholds["ENSEMBLE"]
raster::setMinMax(enspresence)
# names(enspresence) <- paste(raster.title, "_presence", sep="")
# raster::writeRaster(x=enspresence, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
# avoid possible problems with saving of names of the raster layers
raster::writeRaster(enspresence, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- paste(raster.title, "_presence", sep="")
raster::writeRaster(working.raster, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
if (KML.out == T) {
raster::KML(working.raster, filename=kmlpresence, col=c("grey", "green"),
colNA=0, blur=KML.blur, maxpixels=KML.maxpixels, overwrite=T)
}
if(evaluate == T) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created ensemble raster layer (", rasterfull, ") at locations p and a", "\n\n", sep = ""))
pres_consensus <- raster::extract(ensemble, p)/1000
abs_consensus <- raster::extract(ensemble, a)/1000
eval1 <- evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
}
if(retest == T) {
eval1 <- NULL
cat(paste("\n", "Evaluation of created ensemble raster layer (", rasterfull, ") at locations pt and at", "\n\n", sep = ""))
pres_consensus <- raster::extract(ensemble, pt)/1000
abs_consensus <- raster::extract(ensemble, at)/1000
eval1 <- evaluate(p=pres_consensus, a=abs_consensus)
print(eval1)
}
cat(paste("\n", "End of modelling for organism: ", RASTER.species.orig, "\n", sep = ""))
cat(paste("Predictions were made for RasterStack: ", stack.title, "\n\n", sep = ""))
#
# avoid possible problems with saving of names of the raster layers
raster::writeRaster(ensemble, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- raster.title
raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
raster::writeRaster(enscount, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- paste(raster.title, "_count", sep="")
raster::writeRaster(working.raster, filename=rastercount, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
raster::writeRaster(enspresence, filename="working.grd", overwrite=T)
working.raster <- raster::raster("working.grd")
names(working.raster) <- paste(raster.title, "_presence", sep="")
raster::writeRaster(working.raster, filename=rasterpresence, progress='text', overwrite=TRUE, format=RASTER.format, datatype="INT1U", NAflag=255)
#
result <- list(ensemble.statistics=ensemble.statistics, call=match.call() )
if (SINK==T && OLD.SINK==F) {sink(file=NULL, append=T)}
return(result)
}
|
\name{orderedLasso}
\alias{orderedLasso}
\title{Fit an ordered lasso}
\usage{
orderedLasso(x, y, lambda, intercept = TRUE, b0 = NULL, beta_pos = NULL,
beta_neg = NULL, method = c("Solve.QP", "GG"), strongly.ordered = FALSE,
standardize = TRUE, trace = FALSE, niter = 500, iter.gg = 100,
epsilon = 1e-08)
}
\arguments{
\item{x}{A matrix of predictors, where the rows are the samples and the columns are the predictors}
\item{y}{A vector of observations, where length(y) equals nrow(x)}
\item{lambda}{Regularization parameter(>0)}
\item{intercept}{TRUE if there is an intercept in the model.}
\item{b0}{Initial value for the intercept.}
\item{beta_pos}{Optional vector of initialization of the positive part of the coefficients.}
\item{beta_neg}{Optional vector of initialization of the negative part of the coefficients.}
\item{method}{Two options available, Solve.QP and Generalized Gradient. Solve.QP uses the package quadprog to solve a quadratic programming
problem. GG stands for genearlized gradient. GG uses proximal genearalized gradient algorithm to solve the problem. Detailed can be seen
in the paper refered in the description.}
\item{strongly.ordered}{An option which allows users to order the coefficients in absolute value. The coefficients returned by the orderedLasso
may not be monotone non-increasing in absolute value even though the positive parts and negative parts are monotone non-increasing.
Details can be seen in the paper referred in the Description. Strongly.ordered options returns the coefficients monotone non-increasing in absolute value.}
\item{standardize}{Standardize the data matrix x}
\item{trace}{Output option; trace = TRUE gives verbose output}
\item{niter}{Maximum number of iterations; default 500.}
\item{iter.gg}{Number of iterations of genearalized gradient; default 100}
\item{epsilon}{Error tolerance parameter for convergence criterion ; default 1e-05.}
}
\value{
\item{bp}{Estimated coefficients- positive part}
\item{bn}{Estimated coefficients- negative part}
\item{beta}{Estimated coefficients, which are equal to bp - bn}
\item{b0}{Estimated intercept, if there is one in the model. }
\item{fitted}{Fitted values of y}
\item{type}{Type of model fit, "gaussian"}
\item{call}{The call to orderedLasso}
}
\description{
One of the main functions. Ordered Lasso builds a regression model with an L1-constraint imposed on the coefficients.
The coefficients are re-written as negative and positive parts and the model requires positive and negative parts are
monotone non-increasing and positive.
}
\examples{
set.seed(3)
n = 50
b = c(7,3,1,0)
p = length(b)
x = matrix(rnorm(n*p),nrow = n)
sigma = 4
y = x \%*\% b + sigma * rnorm(n, 0, 1)
result1 = orderedLasso(x,y, lambda = 1, intercept =TRUE, standardize = TRUE,
method = "GG", strongly.ordered = TRUE)
result2 = orderedLasso(x,y, lambda = 1, intercept = TRUE, standardize =TRUE,
strongly.ordered = TRUE)
print(result1)
print(result2)
}
|
/man/orderedLasso.Rd
|
no_license
|
cran/orderedLasso
|
R
| false
| false
| 2,975
|
rd
|
\name{orderedLasso}
\alias{orderedLasso}
\title{Fit an ordered lasso}
\usage{
orderedLasso(x, y, lambda, intercept = TRUE, b0 = NULL, beta_pos = NULL,
beta_neg = NULL, method = c("Solve.QP", "GG"), strongly.ordered = FALSE,
standardize = TRUE, trace = FALSE, niter = 500, iter.gg = 100,
epsilon = 1e-08)
}
\arguments{
\item{x}{A matrix of predictors, where the rows are the samples and the columns are the predictors}
\item{y}{A vector of observations, where length(y) equals nrow(x)}
\item{lambda}{Regularization parameter(>0)}
\item{intercept}{TRUE if there is an intercept in the model.}
\item{b0}{Initial value for the intercept.}
\item{beta_pos}{Optional vector of initialization of the positive part of the coefficients.}
\item{beta_neg}{Optional vector of initialization of the negative part of the coefficients.}
\item{method}{Two options available, Solve.QP and Generalized Gradient. Solve.QP uses the package quadprog to solve a quadratic programming
problem. GG stands for genearlized gradient. GG uses proximal genearalized gradient algorithm to solve the problem. Detailed can be seen
in the paper refered in the description.}
\item{strongly.ordered}{An option which allows users to order the coefficients in absolute value. The coefficients returned by the orderedLasso
may not be monotone non-increasing in absolute value even though the positive parts and negative parts are monotone non-increasing.
Details can be seen in the paper referred in the Description. Strongly.ordered options returns the coefficients monotone non-increasing in absolute value.}
\item{standardize}{Standardize the data matrix x}
\item{trace}{Output option; trace = TRUE gives verbose output}
\item{niter}{Maximum number of iterations; default 500.}
\item{iter.gg}{Number of iterations of genearalized gradient; default 100}
\item{epsilon}{Error tolerance parameter for convergence criterion ; default 1e-05.}
}
\value{
\item{bp}{Estimated coefficients- positive part}
\item{bn}{Estimated coefficients- negative part}
\item{beta}{Estimated coefficients, which are equal to bp - bn}
\item{b0}{Estimated intercept, if there is one in the model. }
\item{fitted}{Fitted values of y}
\item{type}{Type of model fit, "gaussian"}
\item{call}{The call to orderedLasso}
}
\description{
One of the main functions. Ordered Lasso builds a regression model with an L1-constraint imposed on the coefficients.
The coefficients are re-written as negative and positive parts and the model requires positive and negative parts are
monotone non-increasing and positive.
}
\examples{
set.seed(3)
n = 50
b = c(7,3,1,0)
p = length(b)
x = matrix(rnorm(n*p),nrow = n)
sigma = 4
y = x \%*\% b + sigma * rnorm(n, 0, 1)
result1 = orderedLasso(x,y, lambda = 1, intercept =TRUE, standardize = TRUE,
method = "GG", strongly.ordered = TRUE)
result2 = orderedLasso(x,y, lambda = 1, intercept = TRUE, standardize =TRUE,
strongly.ordered = TRUE)
print(result1)
print(result2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.mediaconvert_operations.R
\name{list_presets}
\alias{list_presets}
\title{Retrieve a JSON array of up to twenty of your presets}
\usage{
list_presets(Category = NULL, ListBy = NULL, MaxResults = NULL,
NextToken = NULL, Order = NULL)
}
\arguments{
\item{Category}{Optionally, specify a preset category to limit responses to only presets from that category.}
\item{ListBy}{}
\item{MaxResults}{Optional. Number of presets, up to twenty, that will be returned at one time}
\item{NextToken}{Use this string, provided with the response to a previous request, to request the next batch of presets.}
\item{Order}{}
}
\description{
Retrieve a JSON array of up to twenty of your presets. This will return the presets themselves, not just a list of them. To retrieve the next twenty presets, use the nextToken string returned with the array.
}
\section{Accepted Parameters}{
\preformatted{list_presets(
Category = "string",
ListBy = "NAME"|"CREATION_DATE"|"SYSTEM",
MaxResults = 123,
NextToken = "string",
Order = "ASCENDING"|"DESCENDING"
)
}
}
|
/service/paws.mediaconvert/man/list_presets.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 1,134
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.mediaconvert_operations.R
\name{list_presets}
\alias{list_presets}
\title{Retrieve a JSON array of up to twenty of your presets}
\usage{
list_presets(Category = NULL, ListBy = NULL, MaxResults = NULL,
NextToken = NULL, Order = NULL)
}
\arguments{
\item{Category}{Optionally, specify a preset category to limit responses to only presets from that category.}
\item{ListBy}{}
\item{MaxResults}{Optional. Number of presets, up to twenty, that will be returned at one time}
\item{NextToken}{Use this string, provided with the response to a previous request, to request the next batch of presets.}
\item{Order}{}
}
\description{
Retrieve a JSON array of up to twenty of your presets. This will return the presets themselves, not just a list of them. To retrieve the next twenty presets, use the nextToken string returned with the array.
}
\section{Accepted Parameters}{
\preformatted{list_presets(
Category = "string",
ListBy = "NAME"|"CREATION_DATE"|"SYSTEM",
MaxResults = 123,
NextToken = "string",
Order = "ASCENDING"|"DESCENDING"
)
}
}
|
testlist <- list(m = NULL, repetitions = 164769688L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146), .Dim = c(3L, 1L )))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615786718-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 253
|
r
|
testlist <- list(m = NULL, repetitions = 164769688L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146), .Dim = c(3L, 1L )))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
#' Return standard deviation of residuals of the best models (1 for Mu, 1 for Sigma) applied on observed data
#'
#' @param PAC_obs dataframe containing the observed acute and chronic weighted parameters
#' @param form_mu Formula of the best model selected to predict muC_act
#' @param form_sigma Formula of the best model selected to predict SigmaC_act
#' @return a list with the residuals standrd deviation of the best model applied on observed data
#' @examples
#' library(ACTR)
#' data(cipr)
#' ciprKP <- subset_data(cipr, Class,6)
#' PAC_obs <- est_PAC(ciprKP,Class)
#' sd_res <- sd_res_reg (PAC_obs,bm_mu, bm_sigma)
#' @export
# PAC_obs <- wpara_obs_lg
# form_mu <- bm_mu
# form_sigma <- bm_sigma
sd_res_reg <- function (PAC_obs,form_mu, form_sigma)
{
form_mu<-as.character(form_mu)
form_sigma<-as.character(form_sigma)
dat <- PAC_obs
mod_mu <- lm (formula=form_mu,data=dat)
mod_sigma <- lm (formula=form_sigma,data=dat)
# mean ( residuals(mod_sigma)) = 0 (by definition)
sd_resMu <- as.numeric(sprintf("%4.2e",sd ( residuals(mod_mu))))
# m_resSigma <- mean ( residuals(mod_sigma)) = 0 (by definition)
sd_resSigma <- as.numeric(sprintf("%4.2e",sd ( residuals(mod_sigma))))
sd_res<-data.frame (Mu = sd_resMu, Sigma = sd_resSigma)
names( sd_res) <- c("Mod_Mu","Mod_Sigma")
return(sd_res)
}
|
/R/sd_res_reg.R
|
no_license
|
cdv04/ACTR
|
R
| false
| false
| 1,390
|
r
|
#' Return standard deviation of residuals of the best models (1 for Mu, 1 for Sigma) applied on observed data
#'
#' @param PAC_obs dataframe containing the observed acute and chronic weighted parameters
#' @param form_mu Formula of the best model selected to predict muC_act
#' @param form_sigma Formula of the best model selected to predict SigmaC_act
#' @return a list with the residuals standrd deviation of the best model applied on observed data
#' @examples
#' library(ACTR)
#' data(cipr)
#' ciprKP <- subset_data(cipr, Class,6)
#' PAC_obs <- est_PAC(ciprKP,Class)
#' sd_res <- sd_res_reg (PAC_obs,bm_mu, bm_sigma)
#' @export
# PAC_obs <- wpara_obs_lg
# form_mu <- bm_mu
# form_sigma <- bm_sigma
sd_res_reg <- function (PAC_obs,form_mu, form_sigma)
{
form_mu<-as.character(form_mu)
form_sigma<-as.character(form_sigma)
dat <- PAC_obs
mod_mu <- lm (formula=form_mu,data=dat)
mod_sigma <- lm (formula=form_sigma,data=dat)
# mean ( residuals(mod_sigma)) = 0 (by definition)
sd_resMu <- as.numeric(sprintf("%4.2e",sd ( residuals(mod_mu))))
# m_resSigma <- mean ( residuals(mod_sigma)) = 0 (by definition)
sd_resSigma <- as.numeric(sprintf("%4.2e",sd ( residuals(mod_sigma))))
sd_res<-data.frame (Mu = sd_resMu, Sigma = sd_resSigma)
names( sd_res) <- c("Mod_Mu","Mod_Sigma")
return(sd_res)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.